summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/freescale
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/Kconfig108
-rw-r--r--drivers/net/ethernet/freescale/Makefile27
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig11
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c3620
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h187
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c140
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h115
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c582
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig39
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c150
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c158
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c307
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h157
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4896
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h774
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c897
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c574
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c262
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h18
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c211
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c885
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3531
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h280
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h481
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h85
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c237
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h213
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h686
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c2181
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h1110
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h74
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.c293
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h556
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.c1661
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.h791
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig66
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2937
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h554
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c250
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c928
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h965
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c153
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c177
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_msg.c164
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c110
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c1414
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h60
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c145
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1625
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c263
-rw-r--r--drivers/net/ethernet/freescale/fec.h675
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4357
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1082
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.h294
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c154
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c639
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig39
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c2934
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h381
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c1538
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h17
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c757
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h19
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h256
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c1251
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h20
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c132
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h26
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c1920
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h135
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c142
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h77
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c831
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h17
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c507
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h80
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig35
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Makefile15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h42
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1128
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h241
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c583
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c486
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c479
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c226
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c227
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c539
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3648
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1369
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c1516
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3813
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h1233
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c407
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c404
102 files changed, 71009 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
new file mode 100644
index 000000000..ce866ae3d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Freescale device configuration
+#
+
+config NET_VENDOR_FREESCALE
+ bool "Freescale devices"
+ default y
+ depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
+ M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Freescale devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_FREESCALE
+
+config FEC
+ tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
+ default ARCH_MXC || SOC_IMX28 if ARM
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
+ select PHYLIB
+ select PAGE_POOL
+ imply NET_SELFTESTS
+ help
+ Say Y here if you want to use the built-in 10/100 Fast ethernet
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
+
+config FEC_MPC52xx
+ tristate "FEC MPC52xx driver"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select CRC32
+ select PHYLIB
+ select PPC_BESTCOMM_FEC
+ help
+ This option enables support for the MPC5200's on-chip
+ Fast Ethernet Controller
+ If compiled as module, it will be called fec_mpc52xx.
+
+config FEC_MPC52xx_MDIO
+ bool "FEC MPC52xx MDIO bus driver"
+ depends on FEC_MPC52xx
+ default y
+ help
+ The MPC5200's FEC can connect to the Ethernet either with
+ an external MII PHY chip or 10 Mbps 7-wire interface
+ (Motorola? industry standard).
+ If your board uses an external PHY connected to FEC, enable this.
+ If not sure, enable.
+ If compiled as module, it will be called fec_mpc52xx_phy.
+
+source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+source "drivers/net/ethernet/freescale/fman/Kconfig"
+
+config FSL_PQ_MDIO
+ tristate "Freescale PQ MDIO"
+ select PHYLIB
+ help
+ This driver supports the MDIO bus used by the gianfar and UCC drivers.
+
+config FSL_XGMAC_MDIO
+ tristate "Freescale XGMAC MDIO"
+ select PHYLIB
+ depends on OF
+ select OF_MDIO
+ help
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
+ on the FMan mEMAC (which supports both Clauses 22 and 45)
+
+config UCC_GETH
+ tristate "Freescale QE Gigabit Ethernet"
+ depends on QUICC_ENGINE && PPC32
+ select FSL_PQ_MDIO
+ select PHYLIB
+ select FIXED_PHY
+ help
+ This driver supports the Gigabit Ethernet mode of the QUICC Engine,
+ which is available on some Freescale SOCs.
+
+config UGETH_TX_ON_DEMAND
+ bool "Transmit on Demand support"
+ depends on UCC_GETH
+
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on HAS_DMA
+ select FSL_PQ_MDIO
+ select PHYLIB
+ select FIXED_PHY
+ select CRC32
+ help
+ This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
+ and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
+ on the 8540.
+
+source "drivers/net/ethernet/freescale/dpaa/Kconfig"
+source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
+source "drivers/net/ethernet/freescale/enetc/Kconfig"
+
+endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
new file mode 100644
index 000000000..de7b31842
--- /dev/null
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale network device drivers.
+#
+
+obj-$(CONFIG_FEC) += fec.o
+fec-objs :=fec_main.o fec_ptp.o
+
+obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
+endif
+obj-$(CONFIG_FS_ENET) += fs_enet/
+obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
+obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o
+obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
+
+obj-$(CONFIG_FSL_FMAN) += fman/
+obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+
+obj-y += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
new file mode 100644
index 000000000..0e1439fd0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig FSL_DPAA_ETH
+ tristate "DPAA Ethernet"
+ depends on FSL_DPAA && FSL_FMAN
+ select PHYLIB
+ select FIXED_PHY
+ help
+ Data Path Acceleration Architecture Ethernet driver,
+ supporting the Freescale QorIQ chips.
+ Depends on Freescale Buffer Manager and Queue Manager
+ driver and Frame Manager Driver.
diff --git a/drivers/net/ethernet/freescale/dpaa/Makefile b/drivers/net/ethernet/freescale/dpaa/Makefile
new file mode 100644
index 000000000..4f23e7923
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale DPAA Ethernet controllers
+#
+
+# Include FMan headers
+FMAN = $(srctree)/drivers/net/ethernet/freescale/fman
+ccflags-y += -I$(FMAN)
+
+obj-$(CONFIG_FSL_DPAA_ETH) += fsl_dpa.o
+
+fsl_dpa-objs += dpaa_eth.o dpaa_ethtool.o dpaa_eth_sysfs.o
+CFLAGS_dpaa_eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
new file mode 100644
index 000000000..981cc3248
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -0,0 +1,3620 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/io.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/icmp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/percpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/sort.h>
+#include <linux/phy_fixed.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <soc/fsl/bman.h>
+#include <soc/fsl/qman.h>
+#include "fman.h"
+#include "fman_port.h"
+#include "mac.h"
+#include "dpaa_eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa_eth_trace.h"
+
+static int debug = -1;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
+
+static u16 tx_timeout = 1000;
+module_param(tx_timeout, ushort, 0444);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+#define FM_FD_STAT_RX_ERRORS \
+ (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
+ FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
+ FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
+ FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
+ FM_FD_ERR_PRS_HDR_ERR)
+
+#define FM_FD_STAT_TX_ERRORS \
+ (FM_FD_ERR_UNSUPPORTED_FORMAT | \
+ FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
+
+#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_HW)
+
+#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
+/* Ingress congestion threshold on FMan ports
+ * The size in bytes of the ingress tail-drop threshold on FMan ports.
+ * Traffic piling up above this value will be rejected by QMan and discarded
+ * by FMan.
+ */
+
+/* Size in bytes of the FQ taildrop threshold */
+#define DPAA_FQ_TD 0x200000
+
+#define DPAA_CS_THRESHOLD_1G 0x06000000
+/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
+ * The size in bytes of the egress Congestion State notification threshold on
+ * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
+ * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ * the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ * behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ * threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+
+#define DPAA_CS_THRESHOLD_10G 0x10000000
+/* The size in bytes of the egress Congestion State notification threshold on
+ * 10G ports, range 0x1000 .. 0x10000000
+ */
+
+/* Largest value that the FQD's OAL field can hold */
+#define FSL_QMAN_MAX_OAL 127
+
+/* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+ DPAA_A050385_ALIGN : 16)
+#else
+#define DPAA_FD_DATA_ALIGNMENT 16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
+
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4 0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6 0x4000
+/* Values for the L4R field of the FM Parse Results */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP 0x20
+
+/* FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV 0x00000004
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+#define FSL_DPAA_BPID_INV 0xff
+#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
+#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
+
+#define DPAA_TX_PRIV_DATA_SIZE 16
+#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+#define DPAA_TIME_STAMP_SIZE 8
+#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ + DPAA_HASH_RESULTS_SIZE)
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
+ XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
+#else
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
+#endif
+
+#define DPAA_ETH_PCD_RXQ_NUM 128
+
+#define DPAA_ENQUEUE_RETRIES 100000
+
+enum port_type {RX, TX};
+
+struct fm_port_fqs {
+ struct dpaa_fq *tx_defq;
+ struct dpaa_fq *tx_errq;
+ struct dpaa_fq *rx_defq;
+ struct dpaa_fq *rx_errq;
+ struct dpaa_fq *rx_pcdq;
+};
+
+/* All the dpa bps in use at any moment */
+static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
+
+#define DPAA_BP_RAW_SIZE 4096
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+ ~(DPAA_A050385_ALIGN - 1))
+#else
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
+
+static int dpaa_max_frm;
+
+static int dpaa_rx_extra_headroom;
+
+#define dpaa_get_max_mtu() \
+ (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
+static int dpaa_netdev_init(struct net_device *net_dev,
+ const struct net_device_ops *dpaa_ops,
+ u16 tx_timeout)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
+ struct dpaa_percpu_priv *percpu_priv;
+ const u8 *mac_addr;
+ int i, err;
+
+ /* Although we access another CPU's private data here
+ * we do it at initialization so it is safe
+ */
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ percpu_priv->net_dev = net_dev;
+ }
+
+ net_dev->netdev_ops = dpaa_ops;
+ mac_addr = mac_dev->addr;
+
+ net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
+ net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
+
+ net_dev->min_mtu = ETH_MIN_MTU;
+ net_dev->max_mtu = dpaa_get_max_mtu();
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX | NETIF_F_RXHASH);
+
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
+ * For conformity, we'll still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+ net_dev->features |= NETIF_F_RXCSUM;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ /* we do not want shared skbs on TX */
+ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ if (is_valid_ether_addr(mac_addr)) {
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
+ } else {
+ eth_hw_addr_random(net_dev);
+ err = mac_dev->change_addr(mac_dev->fman_mac,
+ (const enet_addr_t *)net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "Failed to set random MAC address\n");
+ return -EINVAL;
+ }
+ dev_info(dev, "Using random MAC address: %pM\n",
+ net_dev->dev_addr);
+ }
+
+ net_dev->ethtool_ops = &dpaa_ethtool_ops;
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ mac_dev->net_dev = net_dev;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+
+ /* start without the RUNNING flag, phylib controls it later */
+ netif_carrier_off(net_dev);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa_stop(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int i, error;
+ int err = 0;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ netif_tx_stop_all_queues(net_dev);
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ msleep(200);
+
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+ mac_dev->disable(mac_dev->fman_mac);
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ error = fman_port_disable(mac_dev->port[i]);
+ if (error)
+ err = error;
+ }
+
+ if (net_dev->phydev)
+ phy_disconnect(net_dev->phydev);
+ net_dev->phydev = NULL;
+
+ msleep(200);
+
+ return err;
+}
+
+static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
+
+ percpu_priv->stats.tx_errors++;
+}
+
+/* Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+static void dpaa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *s)
+{
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ u64 *netstats = (u64 *)s;
+ u64 *cpustats;
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ /* add stats from all CPUs */
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+}
+
+static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ u8 num_tc;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
+
+ if (num_tc == priv->num_tc)
+ return 0;
+
+ if (!num_tc) {
+ netdev_reset_tc(net_dev);
+ goto out;
+ }
+
+ if (num_tc > DPAA_TC_NUM) {
+ netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
+ DPAA_TC_NUM);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(net_dev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
+ i * DPAA_TC_TXQ_NUM);
+
+out:
+ priv->num_tc = num_tc ? : 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ return 0;
+}
+
+static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
+{
+ struct dpaa_eth_data *eth_data;
+ struct device *dpaa_dev;
+ struct mac_device *mac_dev;
+
+ dpaa_dev = &pdev->dev;
+ eth_data = dpaa_dev->platform_data;
+ if (!eth_data) {
+ dev_err(dpaa_dev, "eth_data missing\n");
+ return ERR_PTR(-ENODEV);
+ }
+ mac_dev = eth_data->mac_dev;
+ if (!mac_dev) {
+ dev_err(dpaa_dev, "mac_dev missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return mac_dev;
+}
+
+static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpaa_priv *priv;
+ struct mac_device *mac_dev;
+ struct sockaddr old_addr;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
+ return err;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->change_addr(mac_dev->fman_mac,
+ (const enet_addr_t *)net_dev->dev_addr);
+ if (err < 0) {
+ netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
+ err);
+ /* reverting to previous address */
+ eth_mac_addr(net_dev, &old_addr);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpaa_set_rx_mode(struct net_device *net_dev)
+{
+ const struct dpaa_priv *priv;
+ int err;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
+ priv->mac_dev->promisc);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ err);
+ }
+
+ if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
+ priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
+ err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
+ priv->mac_dev->allmulti);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_allmulti() = %d\n",
+ err);
+ }
+
+ err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ if (err < 0)
+ netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ err);
+}
+
+static struct dpaa_bp *dpaa_bpid2pool(int bpid)
+{
+ if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
+ return NULL;
+
+ return dpaa_bp_array[bpid];
+}
+
+/* checks if this bpool is already allocated */
+static bool dpaa_bpid2pool_use(int bpid)
+{
+ if (dpaa_bpid2pool(bpid)) {
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+/* called only once per bpid by dpaa_bp_alloc_pool() */
+static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
+{
+ dpaa_bp_array[bpid] = dpaa_bp;
+ refcount_set(&dpaa_bp->refs, 1);
+}
+
+static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
+{
+ int err;
+
+ if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
+ pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
+ dpaa_bpid2pool_use(dpaa_bp->bpid))
+ return 0;
+
+ if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
+ dpaa_bp->pool = bman_new_pool();
+ if (!dpaa_bp->pool) {
+ pr_err("%s: bman_new_pool() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
+ }
+
+ if (dpaa_bp->seed_cb) {
+ err = dpaa_bp->seed_cb(dpaa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
+
+ return 0;
+
+pool_seed_failed:
+ pr_err("%s: pool seeding failed\n", __func__);
+ bman_free_pool(dpaa_bp->pool);
+
+ return err;
+}
+
+/* remove and free all the buffers from the given buffer pool */
+static void dpaa_bp_drain(struct dpaa_bp *bp)
+{
+ u8 num = 8;
+ int ret;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ ret = bman_acquire(bp->pool, bmb, num);
+ if (ret < 0) {
+ if (num == 8) {
+ /* we have less than 8 buffers left;
+ * drain them one by one
+ */
+ num = 1;
+ ret = 1;
+ continue;
+ } else {
+ /* Pool is fully drained */
+ break;
+ }
+ }
+
+ if (bp->free_buf_cb)
+ for (i = 0; i < num; i++)
+ bp->free_buf_cb(bp, &bmb[i]);
+ } while (ret > 0);
+}
+
+static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
+{
+ struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
+
+ /* the mapping between bpid and dpaa_bp is done very late in the
+ * allocation procedure; if something failed before the mapping, the bp
+ * was not configured, therefore we don't need the below instructions
+ */
+ if (!bp)
+ return;
+
+ if (!refcount_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->free_buf_cb)
+ dpaa_bp_drain(bp);
+
+ dpaa_bp_array[bp->bpid] = NULL;
+ bman_free_pool(bp->pool);
+}
+
+static void dpaa_bps_free(struct dpaa_priv *priv)
+{
+ dpaa_bp_free(priv->dpaa_bp);
+}
+
+/* Use multiple WQs for FQ assignment:
+ * - Tx Confirmation queues go to WQ1.
+ * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ6).
+ * - Rx Default goes to WQ6.
+ * - Tx queues go to different WQs depending on their priority. Equal
+ * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
+ * WQ0 (highest priority).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system, while
+ * dequeue scheduling is round-robin.
+ */
+static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
+{
+ switch (fq->fq_type) {
+ case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
+ fq->wq = 1;
+ break;
+ case FQ_TYPE_RX_ERROR:
+ case FQ_TYPE_TX_ERROR:
+ fq->wq = 5;
+ break;
+ case FQ_TYPE_RX_DEFAULT:
+ case FQ_TYPE_RX_PCD:
+ fq->wq = 6;
+ break;
+ case FQ_TYPE_TX:
+ switch (idx / DPAA_TC_TXQ_NUM) {
+ case 0:
+ /* Low priority (best effort) */
+ fq->wq = 6;
+ break;
+ case 1:
+ /* Medium priority */
+ fq->wq = 2;
+ break;
+ case 2:
+ /* High priority */
+ fq->wq = 1;
+ break;
+ case 3:
+ /* Very high priority */
+ fq->wq = 0;
+ break;
+ default:
+ WARN(1, "Too many TX FQs: more than %d!\n",
+ DPAA_ETH_TXQ_NUM);
+ }
+ break;
+ default:
+ WARN(1, "Invalid FQ type %d for FQID %d!\n",
+ fq->fq_type, fq->fqid);
+ }
+}
+
+static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
+ u32 start, u32 count,
+ struct list_head *list,
+ enum dpaa_fq_type fq_type)
+{
+ struct dpaa_fq *dpaa_fq;
+ int i;
+
+ dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
+ GFP_KERNEL);
+ if (!dpaa_fq)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ dpaa_fq[i].fq_type = fq_type;
+ dpaa_fq[i].fqid = start ? start + i : 0;
+ list_add_tail(&dpaa_fq[i].list, list);
+ }
+
+ for (i = 0; i < count; i++)
+ dpaa_assign_wq(dpaa_fq + i, i);
+
+ return dpaa_fq;
+}
+
+static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs)
+{
+ struct dpaa_fq *dpaa_fq;
+ u32 fq_base, fq_base_aligned, i;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_defq = &dpaa_fq[0];
+
+ /* the PCD FQIDs range needs to be aligned for correct operation */
+ if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
+ goto fq_alloc_failed;
+
+ fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
+
+ for (i = fq_base; i < fq_base_aligned; i++)
+ qman_release_fqid(i);
+
+ for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
+ i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
+ qman_release_fqid(i);
+
+ dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
+ list, FQ_TYPE_RX_PCD);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->rx_pcdq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_errq = &dpaa_fq[0];
+
+ dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
+ if (!dpaa_fq)
+ goto fq_alloc_failed;
+
+ port_fqs->tx_defq = &dpaa_fq[0];
+
+ if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ goto fq_alloc_failed;
+
+ return 0;
+
+fq_alloc_failed:
+ dev_err(dev, "dpaa_fq_alloc() failed\n");
+ return -ENOMEM;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+static int dpaa_get_channel(void)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret;
+
+ ret = qman_alloc_pool(&pool);
+
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+
+static void dpaa_release_channel(void)
+{
+ qman_release_pool(rx_pool_channel);
+}
+
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
+{
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct qman_portal *portal;
+ int cpu;
+
+ for_each_cpu_and(cpu, cpus, cpu_online_mask) {
+ portal = qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ qman_start_using_portal(portal, dev);
+ }
+}
+
+/* Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+ int congested)
+{
+ struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
+ struct dpaa_priv, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+ netif_tx_stop_all_queues(priv->net_dev);
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+ netif_tx_wake_all_queues(priv->net_dev);
+ }
+}
+
+static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d allocating CGR ID\n",
+ __func__, err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
+ * callback.
+ */
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ else
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("%s: Error %d creating CGR with ID %d\n",
+ __func__, err, priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ if (netif_msg_drv(priv))
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = mac_dev->net_dev;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
+static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ struct dpaa_fq *fq,
+ struct fman_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = (u16)fman_port_get_qman_channel_id(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+static void dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
+{
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+ u16 channels[NR_CPUS];
+ struct dpaa_fq *fq;
+
+ for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
+ channels[num_portals++] = qman_affine_channel(cpu);
+
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found\n");
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_RX_PCD:
+ if (!num_portals)
+ continue;
+ dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ fq->channel = channels[portal_cnt++ % num_portals];
+ break;
+ case FQ_TYPE_TX:
+ dpaa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TXQ_NUM)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ fallthrough;
+ case FQ_TYPE_TX_CONFIRM:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_ERROR:
+ dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+ default:
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+ break;
+ }
+ }
+
+ /* Make sure all CPUs receive a corresponding Tx queue. */
+ while (egress_cnt < DPAA_ETH_TXQ_NUM) {
+ list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TXQ_NUM)
+ break;
+ }
+ }
+}
+
+static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return i;
+
+ return -EINVAL;
+}
+
+static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
+{
+ const struct dpaa_priv *priv;
+ struct qman_fq *confq = NULL;
+ struct qm_mcc_initfq initfq;
+ struct device *dev;
+ struct qman_fq *fq;
+ int queue_id;
+ int err;
+
+ priv = netdev_priv(dpaa_fq->net_dev);
+ dev = dpaa_fq->net_dev->dev.parent;
+
+ if (dpaa_fq->fqid == 0)
+ dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
+ if (err) {
+ dev_err(dev, "qman_create_fq() failed\n");
+ return err;
+ }
+ fq = &dpaa_fq->fq_base;
+
+ if (dpaa_fq->init) {
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
+ /* Note: we may get to keep an empty FQ in cache */
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
+
+ /* FQ placement */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+
+ qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpaa_fq->fq_type == FQ_TYPE_TX ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+ min(sizeof(struct sk_buff) +
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
+ qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
+ }
+
+ if (dpaa_fq->fq_type == FQ_TYPE_TX) {
+ queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
+ if (queue_id >= 0)
+ confq = priv->conf_fqs[queue_id];
+ if (confq) {
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * A0V=1 (contextA A0 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+ */
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
+ }
+ }
+
+ /* Put all the ingress queues in our "ingress CGR". */
+ if (priv->use_ingress_cgr &&
+ (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
+ initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
+ /* Set a fixed overhead accounting, just like for the
+ * egress CGR.
+ */
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
+ qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
+ qm_fqd_set_oal(&initfq.fqd,
+ min(sizeof(struct sk_buff) +
+ priv->tx_headroom,
+ (size_t)FSL_QMAN_MAX_OAL));
+ }
+
+ /* Initialization common to all ingress queues */
+ if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
+ QM_FQCTRL_CTXASTASHING);
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ qm_fqd_set_stashing(&initfq.fqd, 1, 2,
+ DIV_ROUND_UP(sizeof(struct qman_fq),
+ 64));
+ }
+
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (err < 0) {
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+ qman_destroy_fq(fq);
+ return err;
+ }
+ }
+
+ dpaa_fq->fqid = qman_fq_fqid(fq);
+
+ if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
+ err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
+ dpaa_fq->fqid, 0);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg() = %d\n", err);
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n",
+ err);
+ xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
+{
+ const struct dpaa_priv *priv;
+ struct dpaa_fq *dpaa_fq;
+ int err, error;
+
+ err = 0;
+
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ priv = netdev_priv(dpaa_fq->net_dev);
+
+ if (dpaa_fq->init) {
+ err = qman_retire_fq(fq, NULL);
+ if (err < 0 && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), err);
+
+ error = qman_oos_fq(fq);
+ if (error < 0 && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), error);
+ if (err >= 0)
+ err = error;
+ }
+ }
+
+ if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
+ xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
+ xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
+
+ qman_destroy_fq(fq);
+ list_del(&dpaa_fq->list);
+
+ return err;
+}
+
+static int dpaa_fq_free(struct device *dev, struct list_head *list)
+{
+ struct dpaa_fq *dpaa_fq, *tmp;
+ int err, error;
+
+ err = 0;
+ list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
+ error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
+ if (error < 0 && err >= 0)
+ err = error;
+ }
+
+ return err;
+}
+
+static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_params params;
+ int err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = true;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ params.specific_params.non_rx_params.err_fqid = errq->fqid;
+ params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
+
+ err = fman_port_config(port, &params);
+ if (err) {
+ pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err) {
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+ return err;
+ }
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
+}
+
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+ struct dpaa_fq *errq,
+ struct dpaa_fq *defq, struct dpaa_fq *pcdq,
+ struct dpaa_buffer_layout *buf_layout)
+{
+ struct fman_buffer_prefix_content buf_prefix_content;
+ struct fman_port_rx_params *rx_p;
+ struct fman_port_params params;
+ int err;
+
+ memset(&params, 0, sizeof(params));
+ memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+ buf_prefix_content.pass_time_stamp = true;
+ buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
+
+ rx_p = &params.specific_params.rx_params;
+ rx_p->err_fqid = errq->fqid;
+ rx_p->dflt_fqid = defq->fqid;
+ if (pcdq) {
+ rx_p->pcd_base_fqid = pcdq->fqid;
+ rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
+ }
+
+ rx_p->ext_buf_pools.num_of_pools_used = 1;
+ rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
+
+ err = fman_port_config(port, &params);
+ if (err) {
+ pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
+
+ err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+ if (err) {
+ pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
+ __func__);
+ return err;
+ }
+
+ err = fman_port_init(port);
+ if (err)
+ pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
+}
+
+static int dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp *bp,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
+{
+ struct fman_port *rxport = mac_dev->port[RX];
+ struct fman_port *txport = mac_dev->port[TX];
+ int err;
+
+ err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ if (err)
+ return err;
+
+ err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
+ port_fqs->rx_defq, port_fqs->rx_pcdq,
+ &buf_layout[RX]);
+
+ return err;
+}
+
+static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
+ struct bm_buffer *bmb, int cnt)
+{
+ int err;
+
+ err = bman_release(dpaa_bp->pool, bmb, cnt);
+ /* Should never occur, address anyway to avoid leaking the buffers */
+ if (WARN_ON(err) && dpaa_bp->free_buf_cb)
+ while (cnt-- > 0)
+ dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
+
+ return cnt;
+}
+
+static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
+{
+ struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
+ struct dpaa_bp *dpaa_bp;
+ int i = 0, j;
+
+ memset(bmb, 0, sizeof(bmb));
+
+ do {
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ return;
+
+ j = 0;
+ do {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]) &&
+ sgt[i - 1].bpid == sgt[i].bpid);
+
+ dpaa_bman_release(dpaa_bp, bmb, j);
+ } while (!qm_sg_entry_is_final(&sgt[i - 1]));
+}
+
+static void dpaa_fd_release(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpaa_bp *dpaa_bp;
+ struct bm_buffer bmb;
+ dma_addr_t addr;
+ void *vaddr;
+
+ bmb.data = 0;
+ bm_buffer_set64(&bmb, qm_fd_addr(fd));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ return;
+
+ if (qm_fd_get_format(fd) == qm_fd_sg) {
+ vaddr = phys_to_virt(qm_fd_addr(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
+
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
+ dpaa_release_sgt_members(sgt);
+
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+ virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
+ netdev_err(net_dev, "DMA mapping failed\n");
+ return;
+ }
+ bm_buffer_set64(&bmb, addr);
+ }
+
+ dpaa_bman_release(dpaa_bp, &bmb, 1);
+}
+
+static void count_ern(struct dpaa_percpu_priv *percpu_priv,
+ const union qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
+ struct sk_buff *skb,
+ struct qm_fd *fd,
+ void *parse_results)
+{
+ struct fman_prs_result *parse_result;
+ u16 ethertype = ntohs(skb->protocol);
+ struct ipv6hdr *ipv6h = NULL;
+ struct iphdr *iph;
+ int retval = 0;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (struct fman_prs_result *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+ iph = ip_hdr(skb);
+ WARN_ON(!iph);
+ l4_proto = iph->protocol;
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+ ipv6h = ipv6_hdr(skb);
+ WARN_ON(!ipv6h);
+ l4_proto = ipv6h->nexthdr;
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ if (net_ratelimit())
+ netif_alert(priv, tx_err, priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = (u8)skb_network_offset(skb);
+ parse_result->l4_off = (u8)skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+
+static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
+{
+ struct net_device *net_dev = dpaa_bp->priv->net_dev;
+ struct bm_buffer bmb[8];
+ dma_addr_t addr;
+ struct page *p;
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
+ goto release_previous_buffs;
+ }
+
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
+ addr))) {
+ netdev_err(net_dev, "DMA map failed\n");
+ goto release_previous_buffs;
+ }
+
+ bmb[i].data = 0;
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+release_bufs:
+ return dpaa_bman_release(dpaa_bp, bmb, i);
+
+release_previous_buffs:
+ WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
+
+ bm_buffer_set64(&bmb[i], 0);
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buffer.
+ */
+ if (likely(i))
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
+{
+ int i;
+
+ /* Give each CPU an allotment of "config_count" buffers */
+ for_each_possible_cpu(i) {
+ int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
+ int j;
+
+ /* Although we access another CPU's counters here
+ * we do it at boot time so it is safe
+ */
+ for (j = 0; j < dpaa_bp->config_count; j += 8)
+ *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
+ }
+ return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
+{
+ int count = *countptr;
+ int new_bufs;
+
+ if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ do {
+ new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the
+ * next NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+ *countptr = count;
+ if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
+{
+ struct dpaa_bp *dpaa_bp;
+ int *countptr;
+
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+
+ return dpaa_eth_refill_bpool(dpaa_bp, countptr);
+}
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in. To avoid
+ * reading the invalid transmission timestamp for the error paths set ts to
+ * false.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ *
+ * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
+ * and return NULL in this case.
+ */
+static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+ const struct qm_fd *fd, bool ts)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct skb_shared_hwtstamps shhwtstamps;
+ dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr = phys_to_virt(addr);
+ const struct qm_sg_entry *sgt;
+ struct dpaa_eth_swbp *swbp;
+ struct sk_buff *skb;
+ u64 ns;
+ int i;
+
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ dma_unmap_page(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+ dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = vaddr + qm_fd_get_offset(fd);
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
+ qm_sg_entry_get_len(&sgt[0]), dma_dir);
+
+ /* remaining pages were mapped with skb_frag_dma_map() */
+ for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
+ qm_sg_entry_get_len(&sgt[i]), dma_dir);
+ }
+ } else {
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(fd) + qm_fd_get_length(fd),
+ dma_dir);
+ }
+
+ swbp = (struct dpaa_eth_swbp *)vaddr;
+ skb = swbp->skb;
+
+ /* No skb backpointer is set when running XDP. An xdp_frame
+ * backpointer is saved instead.
+ */
+ if (!skb) {
+ xdp_return_frame(swbp->xdpf);
+ return NULL;
+ }
+
+ /* DMA unmapping is required before accessing the HW provided info */
+ if (ts && priv->tx_tstamp &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
+ }
+
+ if (qm_fd_get_format(fd) == qm_fd_sg)
+ /* Free the page that we allocated on Tx for the SGT */
+ free_pages((unsigned long)vaddr, 0);
+
+ return skb;
+}
+
+static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
+{
+ /* The parser has run and performed L4 checksum validation.
+ * We know there were no parser errors (and implicitly no
+ * L4 csum error), otherwise we wouldn't be here.
+ */
+ if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
+ (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
+ return CHECKSUM_UNNECESSARY;
+
+ /* We're here because either the parser didn't run or the L4 checksum
+ * was not verified. This may include the case of a UDP frame with
+ * checksum zero or an L4 proto other than TCP/UDP
+ */
+ return CHECKSUM_NONE;
+}
+
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpaa_bp *dpaa_bp;
+ struct sk_buff *skb;
+ void *vaddr;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ dpaa_bp = dpaa_bpid2pool(fd->bpid);
+ if (!dpaa_bp)
+ goto free_buffer;
+
+ skb = build_skb(vaddr, dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
+ goto free_buffer;
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_fd_get_length(fd));
+
+ skb->ip_summed = rx_csum_offload(priv, fd);
+
+ return skb;
+
+free_buffer:
+ free_pages((unsigned long)vaddr, 0);
+ return NULL;
+}
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
+ const struct qm_fd *fd)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct page *page, *head_page;
+ struct dpaa_bp *dpaa_bp;
+ void *vaddr, *sg_vaddr;
+ int frag_off, frag_len;
+ struct sk_buff *skb;
+ dma_addr_t sg_addr;
+ int page_offset;
+ unsigned int sz;
+ int *count_ptr;
+ int i, j;
+
+ vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ /* Iterate through the SGT entries and add data buffers to the skb */
+ sgt = vaddr + fd_off;
+ skb = NULL;
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
+ /* Extension bit is not supported */
+ WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
+
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
+ /* We may use multiple Rx pools */
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (!dpaa_bp)
+ goto free_buffers;
+
+ if (!skb) {
+ sz = dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(sg_vaddr, sz);
+ if (WARN_ON(!skb))
+ goto free_buffers;
+
+ skb->ip_summed = rx_csum_offload(priv, fd);
+
+ /* Make sure forwarded skbs will have enough space
+ * on Tx, if extra headers are added.
+ */
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+ skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
+ } else {
+ /* Not the first S/G entry; all data from buffer will
+ * be added in an skb fragment; fragment index is offset
+ * by one since first S/G entry was incorporated in the
+ * linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
+ */
+ frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
+ frag_len = qm_sg_entry_get_len(&sgt[i]);
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
+ */
+ skb_add_rx_frag(skb, i - 1, head_page, frag_off,
+ frag_len, dpaa_bp->size);
+ }
+
+ /* Update the pool count for the current {cpu x bpool} */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+
+ if (qm_sg_entry_is_final(&sgt[i]))
+ break;
+ }
+ WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+ /* free the SG table buffer */
+ free_pages((unsigned long)vaddr, 0);
+
+ return skb;
+
+free_buffers:
+ /* free all the SG entries */
+ for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
+ sg_addr = qm_sg_addr(&sgt[j]);
+ sg_vaddr = phys_to_virt(sg_addr);
+ /* all pages 0..i were unmaped */
+ if (j > i)
+ dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ free_pages((unsigned long)sg_vaddr, 0);
+ /* counters 0..i-1 were decremented */
+ if (j >= i) {
+ dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
+ }
+
+ if (qm_sg_entry_is_final(&sgt[j]))
+ break;
+ }
+ /* free the SGT fragment */
+ free_pages((unsigned long)vaddr, 0);
+
+ return NULL;
+}
+
+static int skb_to_contig_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd,
+ int *offset)
+{
+ struct net_device *net_dev = priv->net_dev;
+ enum dma_data_direction dma_dir;
+ struct dpaa_eth_swbp *swbp;
+ unsigned char *buff_start;
+ dma_addr_t addr;
+ int err;
+
+ /* We are guaranteed to have at least tx_headroom bytes
+ * available, so just use that for offset.
+ */
+ fd->bpid = FSL_DPAA_BPID_INV;
+ buff_start = skb->data - priv->tx_headroom;
+ dma_dir = DMA_TO_DEVICE;
+
+ swbp = (struct dpaa_eth_swbp *)buff_start;
+ swbp->skb = skb;
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ return err;
+ }
+
+ /* Fill in the rest of the FD fields */
+ qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ priv->tx_headroom + skb->len, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+}
+
+static int skb_to_sg_fd(struct dpaa_priv *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ const int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct net_device *net_dev = priv->net_dev;
+ struct dpaa_eth_swbp *swbp;
+ struct qm_sg_entry *sgt;
+ void *buff_start;
+ skb_frag_t *frag;
+ dma_addr_t addr;
+ size_t frag_len;
+ struct page *p;
+ int i, j, err;
+
+ /* get a page to store the SGTable */
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
+ return -ENOMEM;
+ }
+ buff_start = page_address(p);
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+ * need to write into the skb.
+ */
+ err = dpaa_enable_tx_csum(priv, skb, fd,
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (net_ratelimit())
+ netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+ err);
+ goto csum_failed;
+ }
+
+ /* SGT[0] is used by the linear part */
+ sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
+ frag_len = skb_headlen(skb);
+ qm_sg_entry_set_len(&sgt[0], frag_len);
+ sgt[0].bpid = FSL_DPAA_BPID_INV;
+ sgt[0].offset = 0;
+ addr = dma_map_single(priv->tx_dma_dev, skb->data,
+ skb_headlen(skb), dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
+ err = -EINVAL;
+ goto sg0_map_failed;
+ }
+ qm_sg_entry_set64(&sgt[0], addr);
+
+ /* populate the rest of SGT entries */
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ frag_len = skb_frag_size(frag);
+ WARN_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
+ frag_len, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
+ err = -EINVAL;
+ goto sg_map_failed;
+ }
+
+ qm_sg_entry_set_len(&sgt[i + 1], frag_len);
+ sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
+ sgt[i + 1].offset = 0;
+
+ /* keep the offset in the address */
+ qm_sg_entry_set64(&sgt[i + 1], addr);
+ }
+
+ /* Set the final bit in the last used entry of the SGT */
+ qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
+
+ /* set fd offset to priv->tx_headroom */
+ qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
+
+ /* DMA map the SGT page */
+ swbp = (struct dpaa_eth_swbp *)buff_start;
+ swbp->skb = skb;
+
+ addr = dma_map_page(priv->tx_dma_dev, p, 0,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
+ err = -EINVAL;
+ goto sgt_map_failed;
+ }
+
+ fd->bpid = FSL_DPAA_BPID_INV;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+ qm_fd_addr_set64(fd, addr);
+
+ return 0;
+
+sgt_map_failed:
+sg_map_failed:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
+ qm_sg_entry_get_len(&sgt[j]), dma_dir);
+sg0_map_failed:
+csum_failed:
+ free_pages((unsigned long)buff_start, 0);
+
+ return err;
+}
+
+static inline int dpaa_xmit(struct dpaa_priv *priv,
+ struct rtnl_link_stats64 *percpu_stats,
+ int queue,
+ struct qm_fd *fd)
+{
+ struct qman_fq *egress_fq;
+ int err, i;
+
+ egress_fq = priv->egress_fqs[queue];
+ if (fd->bpid == FSL_DPAA_BPID_INV)
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
+
+ /* Trace this Tx fd */
+ trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
+
+ for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
+ err = qman_enqueue(egress_fq, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_fifo_errors++;
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += qm_fd_get_length(fd);
+
+ return 0;
+}
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct sk_buff *new_skb, *skb = *s;
+ unsigned char *start, i;
+
+ /* check linear buffer alignment */
+ if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* linear buffers just need to have an aligned start */
+ if (!skb_is_nonlinear(skb))
+ return 0;
+
+ /* linear data size for nonlinear skbs needs to be aligned */
+ if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /* all fragments need to have aligned start addresses */
+ if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* all but last fragment need to have aligned sizes */
+ if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+ (i < skb_shinfo(skb)->nr_frags - 1))
+ goto workaround;
+ }
+
+ return 0;
+
+workaround:
+ /* copy all the skb content into a new linear buffer */
+ new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+ priv->tx_headroom);
+ if (!new_skb)
+ return -ENOMEM;
+
+ /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+ skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+ /* Workaround for DPAA_A050385 requires data start to be aligned */
+ start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+ if (start - new_skb->data)
+ skb_reserve(new_skb, start - new_skb->data);
+
+ skb_put(new_skb, skb->len);
+ skb_copy_bits(skb, 0, new_skb->data, skb->len);
+ skb_copy_header(new_skb, skb);
+ new_skb->dev = skb->dev;
+
+ /* Copy relevant timestamp info from the old skb to the new */
+ if (priv->tx_tstamp) {
+ skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
+ skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
+ skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
+ if (skb->sk)
+ skb_set_owner_w(new_skb, skb->sk);
+ }
+
+ /* We move the headroom when we align it so we have to reset the
+ * network and transport header offsets relative to the new data
+ * pointer. The checksum offload relies on these offsets.
+ */
+ skb_set_network_header(new_skb, skb_network_offset(skb));
+ skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+ dev_kfree_skb(skb);
+ *s = new_skb;
+
+ return 0;
+}
+
+static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
+ struct xdp_frame **init_xdpf)
+{
+ struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
+ void *new_buff, *aligned_data;
+ struct page *p;
+ u32 data_shift;
+ int headroom;
+
+ /* Check the data alignment and make sure the headroom is large
+ * enough to store the xdpf backpointer. Use an aligned headroom
+ * value.
+ *
+ * Due to alignment constraints, we give XDP access to the full 256
+ * byte frame headroom. If the XDP program uses all of it, copy the
+ * data to a new buffer and make room for storing the backpointer.
+ */
+ if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
+ xdpf->headroom >= priv->tx_headroom) {
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ /* Try to move the data inside the buffer just enough to align it and
+ * store the xdpf backpointer. If the available headroom isn't large
+ * enough, resort to allocating a new buffer and copying the data.
+ */
+ aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
+ data_shift = xdpf->data - aligned_data;
+
+ /* The XDP frame's headroom needs to be large enough to accommodate
+ * shifting the data as well as storing the xdpf backpointer.
+ */
+ if (xdpf->headroom >= data_shift + priv->tx_headroom) {
+ memmove(aligned_data, xdpf->data, xdpf->len);
+ xdpf->data = aligned_data;
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ /* The new xdp_frame is stored in the new buffer. Reserve enough space
+ * in the headroom for storing it along with the driver's private
+ * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
+ * guarantee the data's alignment in the buffer.
+ */
+ headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
+ DPAA_FD_DATA_ALIGNMENT);
+
+ /* Assure the extended headroom and data don't overflow the buffer,
+ * while maintaining the mandatory tailroom.
+ */
+ if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+ return -ENOMEM;
+
+ p = dev_alloc_pages(0);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ /* Copy the data to the new buffer at a properly aligned offset */
+ new_buff = page_address(p);
+ memcpy(new_buff + headroom, xdpf->data, xdpf->len);
+
+ /* Create an XDP frame around the new buffer in a similar fashion
+ * to xdp_convert_buff_to_frame.
+ */
+ new_xdpf = new_buff;
+ new_xdpf->data = new_buff + headroom;
+ new_xdpf->len = xdpf->len;
+ new_xdpf->headroom = priv->tx_headroom;
+ new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
+ new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+
+ /* Release the initial buffer */
+ xdp_return_frame_rx_napi(xdpf);
+
+ *init_xdpf = new_xdpf;
+ return 0;
+}
+#endif
+
+static netdev_tx_t
+dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ const int queue_mapping = skb_get_queue_mapping(skb);
+ bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
+ int offset = 0;
+ int err = 0;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+ qm_fd_clear_fd(&fd);
+
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+ *
+ * We've made sure skb is not shared in dev->priv_flags,
+ * we need to verify the skb head is not cloned
+ */
+ if (skb_cow_head(skb, priv->tx_headroom))
+ goto enomem;
+
+ WARN_ON(skb_is_nonlinear(skb));
+ }
+
+ /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
+ * make sure we don't feed FMan with more fragments than it supports.
+ */
+ if (unlikely(nonlinear &&
+ (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
+ /* If the egress skb contains more fragments than we support
+ * we have no choice but to linearize it ourselves.
+ */
+ if (__skb_linearize(skb))
+ goto enomem;
+
+ nonlinear = skb_is_nonlinear(skb);
+ }
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ if (dpaa_a050385_wa_skb(net_dev, &skb))
+ goto enomem;
+ nonlinear = skb_is_nonlinear(skb);
+ }
+#endif
+
+ if (nonlinear) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* Create a contig FD from this skb */
+ err = skb_to_contig_fd(priv, skb, &fd, &offset);
+ }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
+
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+ /* LLTX requires to do our own update of trans_start */
+ txq_trans_cond_update(txq);
+
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
+ if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+ dpaa_cleanup_tx_fd(priv, &fd, false);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void dpaa_rx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ if (net_ratelimit())
+ netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
+ percpu_priv->rx_errors.dme++;
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
+ percpu_priv->rx_errors.fpe++;
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
+ percpu_priv->rx_errors.fse++;
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
+ percpu_priv->rx_errors.phe++;
+
+ dpaa_fd_release(net_dev, fd);
+}
+
+static void dpaa_tx_error(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct sk_buff *skb;
+
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
+ dev_kfree_skb(skb);
+}
+
+static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa_napi_portal *np =
+ container_of(napi, struct dpaa_napi_portal, napi);
+ int cleaned;
+
+ np->xdp_act = 0;
+
+ cleaned = qman_p_poll_dqrr(np->p, budget);
+
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ } else if (np->down) {
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ }
+
+ return cleaned;
+}
+
+static void dpaa_tx_conf(struct net_device *net_dev,
+ const struct dpaa_priv *priv,
+ struct dpaa_percpu_priv *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct sk_buff *skb;
+
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ percpu_priv->tx_confirm++;
+
+ skb = dpaa_cleanup_tx_fd(priv, fd, true);
+
+ consume_skb(skb);
+}
+
+static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
+ struct qman_portal *portal, bool sched_napi)
+{
+ if (sched_napi) {
+ /* Disable QMan IRQ and invoke NAPI */
+ qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+
+ percpu_priv->np.p = portal;
+ napi_schedule(&percpu_priv->np.napi);
+ percpu_priv->in_interrupt++;
+ return 1;
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
+{
+ struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+
+ net_dev = dpaa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
+ return qman_cb_dqrr_stop;
+
+ dpaa_eth_refill_bpools(priv);
+ dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
+ struct xdp_frame *xdpf)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_eth_swbp *swbp;
+ struct netdev_queue *txq;
+ void *buff_start;
+ struct qm_fd fd;
+ dma_addr_t addr;
+ int err;
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ if (dpaa_a050385_wa_xdpf(priv, &xdpf)) {
+ err = -ENOMEM;
+ goto out_error;
+ }
+ }
+#endif
+
+ if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) {
+ err = -EINVAL;
+ goto out_error;
+ }
+
+ buff_start = xdpf->data - xdpf->headroom;
+
+ /* Leave empty the skb backpointer at the start of the buffer.
+ * Save the XDP frame for easy cleanup on confirmation.
+ */
+ swbp = (struct dpaa_eth_swbp *)buff_start;
+ swbp->skb = NULL;
+ swbp->xdpf = xdpf;
+
+ qm_fd_clear_fd(&fd);
+ fd.bpid = FSL_DPAA_BPID_INV;
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+ qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len);
+
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ xdpf->headroom + xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ err = -EINVAL;
+ goto out_error;
+ }
+
+ qm_fd_addr_set64(&fd, addr);
+
+ /* Bump the trans_start */
+ txq = netdev_get_tx_queue(net_dev, smp_processor_id());
+ txq_trans_cond_update(txq);
+
+ err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
+ if (err) {
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(&fd) + qm_fd_get_length(&fd),
+ DMA_TO_DEVICE);
+ goto out_error;
+ }
+
+ return 0;
+
+out_error:
+ percpu_stats->tx_errors++;
+ return err;
+}
+
+static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
+ struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 xdp_act;
+ int err;
+
+ xdp_prog = READ_ONCE(priv->xdp_prog);
+ if (!xdp_prog)
+ return XDP_PASS;
+
+ xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
+ &dpaa_fq->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
+
+ /* We reserve a fixed headroom of 256 bytes under the erratum and we
+ * offer it all to XDP programs to use. If no room is left for the
+ * xdpf backpointer on TX, we will need to copy the data.
+ * Disable metadata support since data realignments might be required
+ * and the information can be lost.
+ */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA_BP_RAW_SIZE;
+ }
+#endif
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* Update the length and the offset of the FD */
+ qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 :
+ xdp.data - xdp.data_meta;
+#else
+ *xdp_meta_len = xdp.data - xdp.data_meta;
+#endif
+ break;
+ case XDP_TX:
+ /* We can access the full headroom when sending the frame
+ * back out
+ */
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA_BP_RAW_SIZE;
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (unlikely(!xdpf)) {
+ free_pages((unsigned long)vaddr, 0);
+ break;
+ }
+
+ if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
+ xdp_return_frame_rx_napi(xdpf);
+
+ break;
+ case XDP_REDIRECT:
+ /* Allow redirect to use the full headroom */
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA_BP_RAW_SIZE;
+
+ err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+ if (err) {
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ free_pages((unsigned long)vaddr, 0);
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ /* Free the buffer */
+ free_pages((unsigned long)vaddr, 0);
+ break;
+ }
+
+ return xdp_act;
+}
+
+static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
+{
+ bool ts_valid = false, hash_valid = false;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ unsigned int skb_len, xdp_meta_len = 0;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpaa_napi_portal *np;
+ enum qm_fd_format fd_format;
+ struct net_device *net_dev;
+ u32 fd_status, hash_offset;
+ struct qm_sg_entry *sgt;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_fq *dpaa_fq;
+ struct dpaa_priv *priv;
+ struct sk_buff *skb;
+ int *count_ptr;
+ u32 xdp_act;
+ void *vaddr;
+ u32 hash;
+ u64 ns;
+
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
+ net_dev = dpaa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+ /* Trace the Rx fd */
+ trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+ np = &percpu_priv->np;
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
+ return qman_cb_dqrr_stop;
+
+ /* Make sure we didn't run out of buffers */
+ if (unlikely(dpaa_eth_refill_bpools(priv))) {
+ /* Unable to refill the buffer pool due to insufficient
+ * system memory. Just release the frame back into the pool,
+ * otherwise we'll soon end up with an empty buffer pool.
+ */
+ dpaa_fd_release(net_dev, &dq->fd);
+ return qman_cb_dqrr_consume;
+ }
+
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ if (net_ratelimit())
+ netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_stats->rx_errors++;
+ dpaa_fd_release(net_dev, fd);
+ return qman_cb_dqrr_consume;
+ }
+
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+ vaddr = phys_to_virt(addr);
+ prefetch(vaddr + qm_fd_get_offset(fd));
+
+ /* The only FD types that we may receive are contig and S/G */
+ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
+
+ /* Account for either the contig buffer or the SGT buffer (depending on
+ * which case we were in) having been removed from the pool.
+ */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+
+ /* Extract the timestamp stored in the headroom before running XDP */
+ if (priv->rx_tstamp) {
+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+ ts_valid = true;
+ else
+ WARN_ONCE(1, "fman_port_get_tstamp failed!\n");
+ }
+
+ /* Extract the hash stored in the headroom before running XDP */
+ if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
+ !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
+ &hash_offset)) {
+ hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
+ hash_valid = true;
+ }
+
+ if (likely(fd_format == qm_fd_contig)) {
+ xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
+ dpaa_fq, &xdp_meta_len);
+ np->xdp_act |= xdp_act;
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += qm_fd_get_length(fd);
+ return qman_cb_dqrr_consume;
+ }
+ skb = contig_fd_to_skb(priv, fd);
+ } else {
+ /* XDP doesn't support S/G frames. Return the fragments to the
+ * buffer pool and release the SGT.
+ */
+ if (READ_ONCE(priv->xdp_prog)) {
+ WARN_ONCE(1, "S/G frames not supported under XDP\n");
+ sgt = vaddr + qm_fd_get_offset(fd);
+ dpaa_release_sgt_members(sgt);
+ free_pages((unsigned long)vaddr, 0);
+ return qman_cb_dqrr_consume;
+ }
+ skb = sg_fd_to_skb(priv, fd);
+ }
+ if (!skb)
+ return qman_cb_dqrr_consume;
+
+ if (xdp_meta_len)
+ skb_metadata_set(skb, xdp_meta_len);
+
+ /* Set the previously extracted timestamp */
+ if (ts_valid) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ /* Set the previously extracted hash */
+ if (hash_valid) {
+ enum pkt_hash_types type;
+
+ /* if L4 exists, it was used in the hash generation */
+ type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, hash, type);
+ }
+
+ skb_len = skb->len;
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
+ percpu_stats->rx_dropped++;
+ return qman_cb_dqrr_consume;
+ }
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += skb_len;
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
+ return qman_cb_dqrr_stop;
+
+ dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ /* Trace the fd */
+ trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
+ return qman_cb_dqrr_stop;
+
+ dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+ return qman_cb_dqrr_consume;
+}
+
+static void egress_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ const struct qm_fd *fd = &msg->ern.fd;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct dpaa_priv *priv;
+ struct net_device *net_dev;
+ struct sk_buff *skb;
+
+ net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
+ dev_kfree_skb_any(skb);
+}
+
+static const struct dpaa_fq_cbs dpaa_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
+ .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = egress_ern } }
+};
+
+static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ percpu_priv->np.down = false;
+ napi_enable(&percpu_priv->np.napi);
+ }
+}
+
+static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ int i;
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ percpu_priv->np.down = true;
+ napi_disable(&percpu_priv->np.napi);
+ }
+}
+
+static void dpaa_adjust_link(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ mac_dev->adjust_link(mac_dev);
+}
+
+/* The Aquantia PHYs are capable of performing rate adaptation */
+#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
+
+static int dpaa_phy_init(struct net_device *net_dev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+ struct dpaa_priv *priv;
+ u32 phy_vendor;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
+ &dpaa_adjust_link, 0,
+ mac_dev->phy_if);
+ if (!phy_dev) {
+ netif_err(priv, ifup, net_dev, "init_phy() failed\n");
+ return -ENODEV;
+ }
+
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
+ /* Unless the PHY is capable of rate adaptation */
+ if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
+ /* remove any features not supported by the controller */
+ ethtool_convert_legacy_u32_to_link_mode(mask,
+ mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+ }
+
+ phy_support_asym_pause(phy_dev);
+
+ mac_dev->phy_dev = phy_dev;
+ net_dev->phydev = phy_dev;
+
+ return 0;
+}
+
+static int dpaa_open(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+ int err, i;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ dpaa_eth_napi_enable(priv);
+
+ err = dpaa_phy_init(net_dev);
+ if (err)
+ goto phy_init_failed;
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ err = fman_port_enable(mac_dev->port[i]);
+ if (err)
+ goto mac_start_failed;
+ }
+
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
+ if (err < 0) {
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
+ goto mac_start_failed;
+ }
+ phy_start(priv->mac_dev->phy_dev);
+
+ netif_tx_start_all_queues(net_dev);
+
+ return 0;
+
+mac_start_failed:
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
+ fman_port_disable(mac_dev->port[i]);
+
+phy_init_failed:
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+static int dpaa_eth_stop(struct net_device *net_dev)
+{
+ struct dpaa_priv *priv;
+ int err;
+
+ err = dpaa_stop(net_dev);
+
+ priv = netdev_priv(net_dev);
+ dpaa_eth_napi_disable(priv);
+
+ return err;
+}
+
+static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
+{
+ int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom;
+
+ /* We do not support S/G fragments when XDP is enabled.
+ * Limit the MTU in relation to the buffer size.
+ */
+ if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) {
+ dev_warn(priv->net_dev->dev.parent,
+ "The maximum MTU for XDP is %d\n",
+ max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
+ return -EINVAL;
+
+ net_dev->mtu = new_mtu;
+ return 0;
+}
+
+static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct bpf_prog *old_prog;
+ int err;
+ bool up;
+
+ /* S/G fragments are not supported in XDP-mode */
+ if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
+ return -EINVAL;
+ }
+
+ up = netif_running(net_dev);
+
+ if (up)
+ dpaa_eth_stop(net_dev);
+
+ old_prog = xchg(&priv->xdp_prog, bpf->prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (up) {
+ err = dpaa_open(net_dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return dpaa_setup_xdp(net_dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct xdp_frame *xdpf;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!netif_running(net_dev))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (dpaa_xdp_xmit_frame(net_dev, xdpf))
+ break;
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->tx_tstamp = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->tx_tstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->rx_tstamp = false;
+ } else {
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+{
+ int ret = -EINVAL;
+
+ if (cmd == SIOCGMIIREG) {
+ if (net_dev->phydev)
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ }
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa_ts_ioctl(net_dev, rq, cmd);
+
+ return ret;
+}
+
+static const struct net_device_ops dpaa_ops = {
+ .ndo_open = dpaa_open,
+ .ndo_start_xmit = dpaa_start_xmit,
+ .ndo_stop = dpaa_eth_stop,
+ .ndo_tx_timeout = dpaa_tx_timeout,
+ .ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_change_carrier = fixed_phy_change_carrier,
+ .ndo_set_mac_address = dpaa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = dpaa_set_rx_mode,
+ .ndo_eth_ioctl = dpaa_ioctl,
+ .ndo_setup_tc = dpaa_setup_tc,
+ .ndo_change_mtu = dpaa_change_mtu,
+ .ndo_bpf = dpaa_xdp,
+ .ndo_xdp_xmit = dpaa_xdp_xmit,
+};
+
+static int dpaa_napi_add(struct net_device *net_dev)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
+ }
+
+ return 0;
+}
+
+static void dpaa_napi_del(struct net_device *net_dev)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct dpaa_percpu_priv *percpu_priv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+ netif_napi_del(&percpu_priv->np.napi);
+ }
+}
+
+static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
+ struct bm_buffer *bmb)
+{
+ dma_addr_t addr = bm_buf_addr(bmb);
+
+ dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_free_frag(phys_to_virt(addr));
+}
+
+/* Alloc the dpaa_bp struct and configure default values */
+static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
+{
+ struct dpaa_bp *dpaa_bp;
+
+ dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
+ if (!dpaa_bp)
+ return ERR_PTR(-ENOMEM);
+
+ dpaa_bp->bpid = FSL_DPAA_BPID_INV;
+ dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
+ if (!dpaa_bp->percpu_count)
+ return ERR_PTR(-ENOMEM);
+
+ dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ dpaa_bp->seed_cb = dpaa_bp_seed;
+ dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
+
+ return dpaa_bp;
+}
+
+/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
+ * We won't be sending congestion notifications to FMan; for now, we just use
+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
+ * before they reach our ingress queues and eat up memory.
+ */
+static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+
+ /* Enable CS TD, but disable Congestion State Change Notifications. */
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+ cs_th = DPAA_INGRESS_CS_THRESHOLD;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ /* This CGR will be associated with the SWP affined to the current CPU.
+ * However, we'll place all our ingress FQs in it.
+ */
+ err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ if (netif_msg_drv(priv))
+ pr_err("Error %d creating ingress CGR with ID %d\n",
+ err, priv->ingress_cgr.cgrid);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ goto out_error;
+ }
+ if (netif_msg_drv(priv))
+ pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+ priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+
+ priv->use_ingress_cgr = true;
+
+out_error:
+ return err;
+}
+
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
+ enum port_type port)
+{
+ u16 headroom;
+
+ /* The frame headroom must accommodate:
+ * - the driver private data area
+ * - parse results, hash results, timestamp if selected
+ * If either hash results or time stamp are selected, both will
+ * be copied to/from the frame headroom, as TS is located between PR and
+ * HR in the IC and IC copy size has a granularity of 16bytes
+ * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+ *
+ * Also make sure the headroom is a multiple of data_align bytes
+ */
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
+
+ if (port == RX) {
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385()))
+ headroom = XDP_PACKET_HEADROOM;
+#endif
+
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
+ } else {
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ }
+}
+
+static int dpaa_eth_probe(struct platform_device *pdev)
+{
+ struct net_device *net_dev = NULL;
+ struct dpaa_bp *dpaa_bp = NULL;
+ struct dpaa_fq *dpaa_fq, *tmp;
+ struct dpaa_priv *priv = NULL;
+ struct fm_port_fqs port_fqs;
+ struct mac_device *mac_dev;
+ int err = 0, channel;
+ struct device *dev;
+
+ dev = &pdev->dev;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+ err = bman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to bman portals probe error\n");
+ return -ENODEV;
+ }
+ err = qman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to qman portals probe error\n");
+ return -ENODEV;
+ }
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev->parent);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+
+ mac_dev = dpaa_mac_dev_get(pdev);
+ if (IS_ERR(mac_dev)) {
+ netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
+ err = PTR_ERR(mac_dev);
+ goto free_netdev;
+ }
+
+ /* Devices used for DMA mapping */
+ priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
+ priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
+ err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
+ if (!err)
+ err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
+ DMA_BIT_MASK(40));
+ if (err) {
+ netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
+ goto free_netdev;
+ }
+
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
+ */
+ net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
+
+ netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
+ net_dev->mtu);
+
+ priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
+ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
+
+ /* bp init */
+ dpaa_bp = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bp)) {
+ err = PTR_ERR(dpaa_bp);
+ goto free_dpaa_bps;
+ }
+ /* the raw size of the buffers used for reception */
+ dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+ dpaa_bp->priv = priv;
+
+ err = dpaa_bp_alloc_pool(dpaa_bp);
+ if (err < 0)
+ goto free_dpaa_bps;
+ priv->dpaa_bp = dpaa_bp;
+
+ INIT_LIST_HEAD(&priv->dpaa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
+ if (err < 0) {
+ dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
+ goto free_dpaa_bps;
+ }
+
+ priv->mac_dev = mac_dev;
+
+ channel = dpaa_get_channel();
+ if (channel < 0) {
+ dev_err(dev, "dpaa_get_channel() failed\n");
+ err = channel;
+ goto free_dpaa_bps;
+ }
+
+ priv->channel = (u16)channel;
+
+ /* Walk the CPUs with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
+
+ dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto free_dpaa_bps;
+ }
+
+ err = dpaa_ingress_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing ingress CGR\n");
+ goto delete_egress_cgr;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
+ err = dpaa_fq_init(dpaa_fq, false);
+ if (err < 0)
+ goto free_dpaa_fqs;
+ }
+
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
+
+ /* All real interfaces need their ports initialized */
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
+ &priv->buf_layout[0], dev);
+ if (err)
+ goto free_dpaa_fqs;
+
+ /* Rx traffic distribution based on keygen hashing defaults to on */
+ priv->keygen_in_use = true;
+
+ priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+ if (!priv->percpu_priv) {
+ dev_err(dev, "devm_alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto free_dpaa_fqs;
+ }
+
+ priv->num_tc = 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+
+ /* Initialize NAPI */
+ err = dpaa_napi_add(net_dev);
+ if (err < 0)
+ goto delete_dpaa_napi;
+
+ err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
+ if (err < 0)
+ goto delete_dpaa_napi;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ netif_info(priv, probe, net_dev, "Probed interface %s\n",
+ net_dev->name);
+
+ return 0;
+
+delete_dpaa_napi:
+ dpaa_napi_del(net_dev);
+free_dpaa_fqs:
+ dpaa_fq_free(dev, &priv->dpaa_fq_list);
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+delete_egress_cgr:
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+free_dpaa_bps:
+ dpaa_bps_free(priv);
+free_netdev:
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int dpaa_remove(struct platform_device *pdev)
+{
+ struct net_device *net_dev;
+ struct dpaa_priv *priv;
+ struct device *dev;
+ int err;
+
+ dev = &pdev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+
+ qman_delete_cgr_safe(&priv->ingress_cgr);
+ qman_release_cgrid(priv->ingress_cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr_data.cgr);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+ dpaa_napi_del(net_dev);
+
+ dpaa_bps_free(priv);
+
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static const struct platform_device_id dpaa_devtype[] = {
+ {
+ .name = "dpaa-ethernet",
+ .driver_data = 0,
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(platform, dpaa_devtype);
+
+static struct platform_driver dpaa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .id_table = dpaa_devtype,
+ .probe = dpaa_eth_probe,
+ .remove = dpaa_remove
+};
+
+static int __init dpaa_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA Ethernet driver\n");
+
+ /* initialize dpaa_eth mirror values */
+ dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
+ dpaa_max_frm = fman_get_max_frm();
+
+ err = platform_driver_register(&dpaa_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(dpaa_load);
+
+static void __exit dpaa_unload(void)
+{
+ platform_driver_unregister(&dpaa_driver);
+
+ /* Only one channel is used and needs to be released after all
+ * interfaces are removed
+ */
+ dpaa_release_channel();
+}
+module_exit(dpaa_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
new file mode 100644
index 000000000..35b8cea7f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ */
+
+#ifndef __DPAA_H
+#define __DPAA_H
+
+#include <linux/netdevice.h>
+#include <linux/refcount.h>
+#include <soc/fsl/qman.h>
+#include <soc/fsl/bman.h>
+
+#include "fman.h"
+#include "mac.h"
+#include "dpaa_eth_trace.h"
+
+/* Number of prioritised traffic classes */
+#define DPAA_TC_NUM 4
+/* Number of Tx queues per traffic class */
+#define DPAA_TC_TXQ_NUM NR_CPUS
+/* Total number of Tx queues */
+#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
+
+/* More detailed FQ types - used for fine-grained WQ assignments */
+enum dpaa_fq_type {
+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
+ FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */
+ FQ_TYPE_TX, /* "Real" Tx FQs */
+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
+};
+
+struct dpaa_fq {
+ struct qman_fq fq_base;
+ struct list_head list;
+ struct net_device *net_dev;
+ bool init;
+ u32 fqid;
+ u32 flags;
+ u16 channel;
+ u8 wq;
+ enum dpaa_fq_type fq_type;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+struct dpaa_fq_cbs {
+ struct qman_fq rx_defq;
+ struct qman_fq tx_defq;
+ struct qman_fq rx_errq;
+ struct qman_fq tx_errq;
+ struct qman_fq egress_ern;
+};
+
+struct dpaa_priv;
+
+struct dpaa_bp {
+ /* used in the DMA mapping operations */
+ struct dpaa_priv *priv;
+ /* current number of buffers in the buffer pool alloted to each CPU */
+ int __percpu *percpu_count;
+ /* all buffers allocated for this pool have this raw size */
+ size_t raw_size;
+ /* all buffers in this pool have this same usable size */
+ size_t size;
+ /* the buffer pools are initialized with config_count buffers for each
+ * CPU; at runtime the number of buffers per CPU is constantly brought
+ * back to this level
+ */
+ u16 config_count;
+ u8 bpid;
+ struct bman_pool *pool;
+ /* bpool can be seeded before use by this cb */
+ int (*seed_cb)(struct dpaa_bp *);
+ /* bpool can be emptied before freeing by this cb */
+ void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
+ refcount_t refs;
+};
+
+struct dpaa_rx_errors {
+ u64 dme; /* DMA Error */
+ u64 fpe; /* Frame Physical Error */
+ u64 fse; /* Frame Size Error */
+ u64 phe; /* Header Error */
+};
+
+/* Counters for QMan ERN frames - one counter per rejection code */
+struct dpaa_ern_cnt {
+ u64 cg_tdrop; /* Congestion group taildrop */
+ u64 wred; /* WRED congestion */
+ u64 err_cond; /* Error condition */
+ u64 early_window; /* Order restoration, frame too early */
+ u64 late_window; /* Order restoration, frame too late */
+ u64 fq_tdrop; /* FQ taildrop */
+ u64 fq_retired; /* FQ is retired */
+ u64 orp_zero; /* ORP disabled */
+};
+
+struct dpaa_napi_portal {
+ struct napi_struct napi;
+ struct qman_portal *p;
+ bool down;
+ int xdp_act;
+};
+
+struct dpaa_percpu_priv {
+ struct net_device *net_dev;
+ struct dpaa_napi_portal np;
+ u64 in_interrupt;
+ u64 tx_confirm;
+ /* fragmented (non-linear) skbuffs received from the stack */
+ u64 tx_frag_skbuffs;
+ struct rtnl_link_stats64 stats;
+ struct dpaa_rx_errors rx_errors;
+ struct dpaa_ern_cnt ern_cnt;
+};
+
+struct dpaa_buffer_layout {
+ u16 priv_data_size;
+};
+
+/* Information to be used on the Tx confirmation path. Stored just
+ * before the start of the transmit buffer. Maximum size allowed
+ * is DPAA_TX_PRIV_DATA_SIZE bytes.
+ */
+struct dpaa_eth_swbp {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+};
+
+struct dpaa_priv {
+ struct dpaa_percpu_priv __percpu *percpu_priv;
+ struct dpaa_bp *dpaa_bp;
+ /* Store here the needed Tx headroom for convenience and speed
+ * (even though it can be computed based on the fields of buf_layout)
+ */
+ u16 tx_headroom;
+ struct net_device *net_dev;
+ struct mac_device *mac_dev;
+ struct device *rx_dma_dev;
+ struct device *tx_dma_dev;
+ struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
+
+ u16 channel;
+ struct list_head dpaa_fq_list;
+
+ u8 num_tc;
+ bool keygen_in_use;
+ u32 msg_enable; /* net_device message level */
+
+ struct {
+ /* All egress queues to a given net device belong to one
+ * (and the same) congestion group.
+ */
+ struct qman_cgr cgr;
+ /* If congested, when it began. Used for performance stats. */
+ u32 congestion_start_jiffies;
+ /* Number of jiffies the Tx port was congested. */
+ u32 congested_jiffies;
+ /* Counter for the number of times the CGR
+ * entered congestion state
+ */
+ u32 cgr_congested_count;
+ } cgr_data;
+ /* Use a per-port CGR for ingress traffic. */
+ bool use_ingress_cgr;
+ struct qman_cgr ingress_cgr;
+
+ struct dpaa_buffer_layout buf_layout[2];
+ u16 rx_headroom;
+
+ bool tx_tstamp; /* Tx timestamping enabled */
+ bool rx_tstamp; /* Rx timestamping enabled */
+
+ struct bpf_prog *xdp_prog;
+};
+
+/* from dpaa_ethtool.c */
+extern const struct ethtool_ops dpaa_ethtool_ops;
+
+/* from dpaa_eth_sysfs.c */
+void dpaa_eth_sysfs_remove(struct device *dev);
+void dpaa_eth_sysfs_init(struct device *dev);
+#endif /* __DPAA_H */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
new file mode 100644
index 000000000..4fee74c02
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_net.h>
+#include "dpaa_eth.h"
+#include "mac.h"
+
+static ssize_t dpaa_eth_show_addr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev)
+ return sprintf(buf, "%llx",
+ (unsigned long long)mac_dev->res->start);
+ else
+ return sprintf(buf, "none");
+}
+
+static ssize_t dpaa_eth_show_fqids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
+ struct dpaa_fq *prev = NULL;
+ char *prevstr = NULL;
+ struct dpaa_fq *tmp;
+ struct dpaa_fq *fq;
+ u32 first_fqid = 0;
+ u32 last_fqid = 0;
+ ssize_t bytes = 0;
+ char *str;
+ int i = 0;
+
+ list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ str = "Rx default";
+ break;
+ case FQ_TYPE_RX_ERROR:
+ str = "Rx error";
+ break;
+ case FQ_TYPE_RX_PCD:
+ str = "Rx PCD";
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ str = "Tx default confirmation";
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ str = "Tx confirmation (mq)";
+ break;
+ case FQ_TYPE_TX_ERROR:
+ str = "Tx error";
+ break;
+ case FQ_TYPE_TX:
+ str = "Tx";
+ break;
+ default:
+ str = "Unknown";
+ }
+
+ if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+ str != prevstr)) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes,
+ "%s: %d\n", prevstr, prev->fqid);
+ else
+ bytes += sprintf(buf + bytes,
+ "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ if (prev && abs(fq->fqid - prev->fqid) == 1 &&
+ str == prevstr) {
+ last_fqid = fq->fqid;
+ } else {
+ first_fqid = fq->fqid;
+ last_fqid = fq->fqid;
+ }
+
+ prev = fq;
+ prevstr = str;
+ i++;
+ }
+
+ if (prev) {
+ if (last_fqid == first_fqid)
+ bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
+ prev->fqid);
+ else
+ bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+ first_fqid, last_fqid);
+ }
+
+ return bytes;
+}
+
+static ssize_t dpaa_eth_show_bpids(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
+ ssize_t bytes = 0;
+
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+ priv->dpaa_bp->bpid);
+
+ return bytes;
+}
+
+static struct device_attribute dpaa_eth_attrs[] = {
+ __ATTR(device_addr, 0444, dpaa_eth_show_addr, NULL),
+ __ATTR(fqids, 0444, dpaa_eth_show_fqids, NULL),
+ __ATTR(bpids, 0444, dpaa_eth_show_bpids, NULL),
+};
+
+void dpaa_eth_sysfs_init(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ if (device_create_file(dev, &dpaa_eth_attrs[i])) {
+ dev_err(dev, "Error creating sysfs file\n");
+ while (i > 0)
+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
+ return;
+ }
+}
+
+void dpaa_eth_sysfs_remove(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+ device_remove_file(dev, &dpaa_eth_attrs[i]);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
new file mode 100644
index 000000000..889f89df9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa_eth
+
+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa_eth.h"
+#include <linux/tracepoint.h>
+
+#define fd_format_name(format) { qm_fd_##format, #format }
+#define fd_format_list \
+ fd_format_name(contig), \
+ fd_format_name(sg)
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor and the FQ on which it was
+ * transmitted/received.
+ */
+DECLARE_EVENT_CLASS(dpaa_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fq, fd),
+
+ /* A structure containing the relevant information we want to record.
+ * Declare name and type for each normal element, name, type and size
+ * for arrays. Use __string for variable length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u32, fqid)
+ __field(u64, fd_addr)
+ __field(u8, fd_format)
+ __field(u16, fd_offset)
+ __field(u32, fd_length)
+ __field(u32, fd_status)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared fields */
+ TP_fast_assign(
+ __entry->fqid = fq->fqid;
+ __entry->fd_addr = qm_fd_addr_get64(fd);
+ __entry->fd_format = qm_fd_get_format(fd);
+ __entry->fd_offset = qm_fd_get_offset(fd);
+ __entry->fd_length = qm_fd_get_length(fd);
+ __entry->fd_status = fd->status;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is triggered */
+ TP_printk("[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u, status=0x%08x",
+ __get_str(name), __entry->fqid, __entry->fd_addr,
+ __print_symbolic(__entry->fd_format, fd_format_list),
+ __entry->fd_offset, __entry->fd_length, __entry->fd_status)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd,
+
+ TP_PROTO(struct net_device *netdev,
+ struct qman_fq *fq,
+ const struct qm_fd *fd),
+
+ TP_ARGS(netdev, fq, fd)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa_eth_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
new file mode 100644
index 000000000..769e936a2
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+#include <linux/of_platform.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "dpaa_eth.h"
+#include "mac.h"
+
+static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
+ "interrupts",
+ "rx packets",
+ "tx packets",
+ "tx confirm",
+ "tx S/G",
+ "tx error",
+ "rx error",
+ "rx dropped",
+ "tx dropped",
+};
+
+static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
+ /* dpa rx errors */
+ "rx dma error",
+ "rx frame physical error",
+ "rx frame size error",
+ "rx header error",
+
+ /* demultiplexing errors */
+ "qman cg_tdrop",
+ "qman wred",
+ "qman error cond",
+ "qman early window",
+ "qman late window",
+ "qman fq tdrop",
+ "qman fq retired",
+ "qman orp disabled",
+
+ /* congestion related stats */
+ "congestion time (ms)",
+ "entered congestion",
+ "congested (0/1)"
+};
+
+#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
+#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
+
+static int dpaa_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!net_dev->phydev)
+ return 0;
+
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+
+ return 0;
+}
+
+static int dpaa_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ int err;
+
+ if (!net_dev->phydev)
+ return -ENODEV;
+
+ err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
+ if (err < 0)
+ netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
+
+ return err;
+}
+
+static void dpaa_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
+ sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static u32 dpaa_get_msglevel(struct net_device *net_dev)
+{
+ return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
+}
+
+static void dpaa_set_msglevel(struct net_device *net_dev,
+ u32 msg_enable)
+{
+ ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
+}
+
+static int dpaa_nway_reset(struct net_device *net_dev)
+{
+ int err;
+
+ if (!net_dev->phydev)
+ return -ENODEV;
+
+ err = 0;
+ if (net_dev->phydev->autoneg) {
+ err = phy_start_aneg(net_dev->phydev);
+ if (err < 0)
+ netdev_err(net_dev, "phy_start_aneg() = %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static void dpaa_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ if (!net_dev->phydev)
+ return;
+
+ epause->autoneg = mac_dev->autoneg_pause;
+ epause->rx_pause = mac_dev->rx_pause_active;
+ epause->tx_pause = mac_dev->tx_pause_active;
+}
+
+static int dpaa_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct mac_device *mac_dev;
+ struct phy_device *phydev;
+ bool rx_pause, tx_pause;
+ struct dpaa_priv *priv;
+ int err;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ phydev = net_dev->phydev;
+ if (!phydev) {
+ netdev_err(net_dev, "phy device not initialized\n");
+ return -ENODEV;
+ }
+
+ if (!phy_validate_pause(phydev, epause))
+ return -EINVAL;
+
+ /* The MAC should know how to handle PAUSE frame autonegotiation before
+ * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
+ * settings.
+ */
+ mac_dev->autoneg_pause = !!epause->autoneg;
+ mac_dev->rx_pause_req = !!epause->rx_pause;
+ mac_dev->tx_pause_req = !!epause->tx_pause;
+
+ /* Determine the sym/asym advertised PAUSE capabilities from the desired
+ * rx/tx pause settings.
+ */
+
+ phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
+
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
+
+ return err;
+}
+
+static int dpaa_get_sset_count(struct net_device *net_dev, int type)
+{
+ unsigned int total_stats, num_stats;
+
+ num_stats = num_online_cpus() + 1;
+ total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
+ DPAA_STATS_GLOBAL_LEN;
+
+ switch (type) {
+ case ETH_SS_STATS:
+ return total_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
+ int crr_cpu, u64 bp_count, u64 *data)
+{
+ int num_values = num_cpus + 1;
+ int crr = 0;
+
+ /* update current CPU's stats and also add them to the total values */
+ data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
+ data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+ data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
+}
+
+static void dpaa_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_rx_errors rx_errors;
+ unsigned int num_cpus, offset;
+ u64 bp_count, cg_time, cg_num;
+ struct dpaa_ern_cnt ern_cnt;
+ struct dpaa_bp *dpaa_bp;
+ struct dpaa_priv *priv;
+ int total_stats, i;
+ bool cg_status;
+
+ total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
+ priv = netdev_priv(net_dev);
+ num_cpus = num_online_cpus();
+
+ memset(&bp_count, 0, sizeof(bp_count));
+ memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
+ memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
+ memset(data, 0, total_stats * sizeof(u64));
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp->percpu_count)
+ continue;
+ bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
+ rx_errors.dme += percpu_priv->rx_errors.dme;
+ rx_errors.fpe += percpu_priv->rx_errors.fpe;
+ rx_errors.fse += percpu_priv->rx_errors.fse;
+ rx_errors.phe += percpu_priv->rx_errors.phe;
+
+ ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
+ ern_cnt.wred += percpu_priv->ern_cnt.wred;
+ ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
+ ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+ ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
+ ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
+ ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
+ ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
+
+ copy_stats(percpu_priv, num_cpus, i, bp_count, data);
+ }
+
+ offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
+ memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
+
+ offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
+ memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
+
+ /* gather congestion related counters */
+ cg_num = 0;
+ cg_status = false;
+ cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
+ if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
+ cg_num = priv->cgr_data.cgr_congested_count;
+
+ /* reset congestion stats (like QMan API does */
+ priv->cgr_data.congested_jiffies = 0;
+ priv->cgr_data.cgr_congested_count = 0;
+ }
+
+ offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
+ data[offset++] = cg_time;
+ data[offset++] = cg_num;
+ data[offset++] = cg_status;
+}
+
+static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i, j, num_cpus, size;
+ char string_cpu[ETH_GSTRING_LEN];
+ u8 *strings;
+
+ memset(string_cpu, 0, sizeof(string_cpu));
+ strings = data;
+ num_cpus = num_online_cpus();
+ size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+ for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
+ dpaa_stats_percpu[i], j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
+ dpaa_stats_percpu[i]);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN,
+ "bpool [CPU %d]", j);
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+ }
+ snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+
+ memcpy(strings, dpaa_stats_global, size);
+}
+
+static int dpaa_get_hash_opts(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ cmd->data = 0;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (priv->keygen_in_use)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ if (priv->keygen_in_use)
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ cmd->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *unused)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = dpaa_get_hash_opts(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void dpaa_set_hash(struct net_device *net_dev, bool enable)
+{
+ struct mac_device *mac_dev;
+ struct fman_port *rxport;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ rxport = mac_dev->port[0];
+
+ fman_port_use_kg_hash(rxport, enable);
+ priv->keygen_in_use = enable;
+}
+
+static int dpaa_set_hash_opts(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ int ret = -EINVAL;
+
+ /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ dpaa_set_hash(dev, !!nfc->data);
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = dpaa_set_hash_opts(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int dpaa_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *info)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct device_node *mac_node = dev->of_node;
+ struct device_node *fman_node = NULL, *ptp_node = NULL;
+ struct platform_device *ptp_dev = NULL;
+ struct ptp_qoriq *ptp = NULL;
+
+ info->phc_index = -1;
+
+ fman_node = of_get_parent(mac_node);
+ if (fman_node) {
+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
+
+ if (ptp_node) {
+ ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
+
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static int dpaa_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct qman_portal *portal;
+ u32 period;
+ u8 thresh;
+
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &period);
+ qman_dqrr_get_ithresh(portal, &thresh);
+
+ c->rx_coalesce_usecs = period;
+ c->rx_max_coalesced_frames = thresh;
+
+ return 0;
+}
+
+static int dpaa_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ const cpumask_t *cpus = qman_affine_cpus();
+ bool needs_revert[NR_CPUS] = {false};
+ struct qman_portal *portal;
+ u32 period, prev_period;
+ u8 thresh, prev_thresh;
+ int cpu, res;
+
+ period = c->rx_coalesce_usecs;
+ thresh = c->rx_max_coalesced_frames;
+
+ /* save previous values */
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &prev_period);
+ qman_dqrr_get_ithresh(portal, &prev_thresh);
+
+ /* set new values */
+ for_each_cpu_and(cpu, cpus, cpu_online_mask) {
+ portal = qman_get_affine_portal(cpu);
+ res = qman_portal_set_iperiod(portal, period);
+ if (res)
+ goto revert_values;
+ res = qman_dqrr_set_ithresh(portal, thresh);
+ if (res) {
+ qman_portal_set_iperiod(portal, prev_period);
+ goto revert_values;
+ }
+ needs_revert[cpu] = true;
+ }
+
+ return 0;
+
+revert_values:
+ /* restore previous values */
+ for_each_cpu_and(cpu, cpus, cpu_online_mask) {
+ if (!needs_revert[cpu])
+ continue;
+ portal = qman_get_affine_portal(cpu);
+ /* previous values will not fail, ignore return value */
+ qman_portal_set_iperiod(portal, prev_period);
+ qman_dqrr_set_ithresh(portal, prev_thresh);
+ }
+
+ return res;
+}
+
+const struct ethtool_ops dpaa_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .get_drvinfo = dpaa_get_drvinfo,
+ .get_msglevel = dpaa_get_msglevel,
+ .set_msglevel = dpaa_set_msglevel,
+ .nway_reset = dpaa_nway_reset,
+ .get_pauseparam = dpaa_get_pauseparam,
+ .set_pauseparam = dpaa_set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_sset_count = dpaa_get_sset_count,
+ .get_ethtool_stats = dpaa_get_ethtool_stats,
+ .get_strings = dpaa_get_strings,
+ .get_link_ksettings = dpaa_get_link_ksettings,
+ .set_link_ksettings = dpaa_set_link_ksettings,
+ .get_rxnfc = dpaa_get_rxnfc,
+ .set_rxnfc = dpaa_set_rxnfc,
+ .get_ts_info = dpaa_get_ts_info,
+ .get_coalesce = dpaa_get_coalesce,
+ .set_coalesce = dpaa_set_coalesce,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
new file mode 100644
index 000000000..d029b69c3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config FSL_DPAA2_ETH
+ tristate "Freescale DPAA2 Ethernet"
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ select PHYLINK
+ select PCS_LYNX
+ select FSL_XGMAC_MDIO
+ select NET_DEVLINK
+ help
+ This is the DPAA2 Ethernet driver supporting Freescale SoCs
+ with DPAA2 (DataPath Acceleration Architecture v2).
+ The driver manages network objects discovered on the Freescale
+ MC bus.
+
+if FSL_DPAA2_ETH
+config FSL_DPAA2_ETH_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on DCB
+ help
+ Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
+ devices.
+endif
+
+config FSL_DPAA2_PTP_CLOCK
+ tristate "Freescale DPAA2 PTP Clock"
+ depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
+ default y
+ help
+ This driver adds support for using the DPAA2 1588 timer module
+ as a PTP clock.
+
+config FSL_DPAA2_SWITCH
+ tristate "Freescale DPAA2 Ethernet Switch"
+ depends on BRIDGE || BRIDGE=n
+ depends on NET_SWITCHDEV
+ help
+ Driver for Freescale DPAA2 Ethernet Switch. This driver manages
+ switch objects discovered on the Freeescale MC bus.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
new file mode 100644
index 000000000..3d9842af7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
+obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
+fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
+fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
+fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o dpaa2-mac.o dpmac.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
new file mode 100644
index 000000000..84de06441
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP */
+
+#include "dpaa2-eth.h"
+
+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
+ return 0;
+
+ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
+
+ return 0;
+}
+
+static inline bool dpaa2_eth_is_prio_enabled(u8 pfc_en, u8 tc)
+{
+ return !!(pfc_en & (1 << tc));
+}
+
+static int dpaa2_eth_set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+{
+ struct dpni_congestion_notification_cfg cfg = {0};
+ int i, err;
+
+ cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
+ cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cfg.message_iova = 0ULL;
+ cfg.message_ctx = 0ULL;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (dpaa2_eth_is_prio_enabled(pfc_en, i)) {
+ cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
+ cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
+ } else {
+ /* For priorities not set in the pfc_en mask, we leave
+ * the congestion thresholds at zero, which effectively
+ * disables generation of PFC frames for them
+ */
+ cfg.threshold_entry = 0;
+ cfg.threshold_exit = 0;
+ }
+
+ err = dpni_set_congestion_notification(priv->mc_io, 0,
+ priv->mc_token,
+ DPNI_QUEUE_RX, i, &cfg);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_congestion_notification failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg link_cfg = {0};
+ bool tx_pause;
+ int err;
+
+ if (pfc->mbc || pfc->delay)
+ return -EOPNOTSUPP;
+
+ /* If same PFC enabled mask, nothing to do */
+ if (priv->pfc.pfc_en == pfc->pfc_en)
+ return 0;
+
+ /* We allow PFC configuration even if it won't have any effect until
+ * general pause frames are enabled
+ */
+ tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
+ if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
+ netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");
+
+ link_cfg.rate = priv->link_state.rate;
+ link_cfg.options = priv->link_state.options;
+ if (pfc->pfc_en)
+ link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
+ else
+ link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_cfg failed\n");
+ return err;
+ }
+
+ /* Configure congestion notifications for the enabled priorities */
+ err = dpaa2_eth_set_pfc_cn(priv, pfc->pfc_en);
+ if (err)
+ return err;
+
+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+ priv->pfc_enabled = !!pfc->pfc_en;
+
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
+
+ return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return priv->dcbx_mode;
+}
+
+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return (mode != (priv->dcbx_mode)) ? 1 : 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_mode;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
+ .getcap = dpaa2_eth_dcbnl_getcap,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
new file mode 100644
index 000000000..8356af463
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "dpaa2-eth.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+static struct dentry *dpaa2_dbg_root;
+
+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct rtnl_link_stats64 *stats;
+ struct dpaa2_eth_drv_stats *extras;
+ int i;
+
+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
+ "Tx SG", "Tx converted to SG", "Enq busy");
+
+ for_each_online_cpu(i) {
+ stats = per_cpu_ptr(priv->percpu_stats, i);
+ extras = per_cpu_ptr(priv->percpu_extras, i);
+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+ i,
+ stats->rx_packets,
+ stats->rx_errors,
+ extras->rx_sg_frames,
+ stats->tx_packets,
+ stats->tx_errors,
+ extras->tx_conf_frames,
+ extras->tx_sg_frames,
+ extras->tx_converted_sg_frames,
+ extras->tx_portal_busy);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_cpu);
+
+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ return "Rx";
+ case DPAA2_TX_CONF_FQ:
+ return "Tx conf";
+ default:
+ return "N/A";
+ }
+}
+
+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt, bcnt;
+ int i, err;
+
+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err)
+ fcnt = 0;
+
+ /* Skip FQs with no traffic */
+ if (!fq->stats.frames && !fcnt)
+ continue;
+
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
+ fq->fqid,
+ fq->target_cpu,
+ fq->tc,
+ fq_type_to_str(fq),
+ fq->stats.frames,
+ fcnt);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_fqs);
+
+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ "Avg Frm/CDAN", "Buf count");
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
+ ch->ch_id,
+ ch->nctx.desired_cpu,
+ ch->stats.dequeue_portal_busy,
+ ch->stats.frames,
+ ch->stats.cdan,
+ div64_u64(ch->stats.frames, ch->stats.cdan),
+ ch->buf_count);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
+
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev;
+ struct dentry *dir;
+ char name[10];
+
+ /* Create a directory for the interface */
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ snprintf(name, 10, "dpni.%d", dpni_dev->obj_desc.id);
+ dir = debugfs_create_dir(name, dpaa2_dbg_root);
+ priv->dbg.dir = dir;
+
+ /* per-cpu stats file */
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_fops);
+
+ /* per-fq stats file */
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fqs_fops);
+
+ /* per-fq stats file */
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
+}
+
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
+ debugfs_remove_recursive(priv->dbg.dir);
+}
+
+void dpaa2_eth_dbg_init(void)
+{
+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
+ pr_debug("DPAA2-ETH: debugfs created\n");
+}
+
+void dpaa2_eth_dbg_exit(void)
+{
+ debugfs_remove(dpaa2_dbg_root);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
new file mode 100644
index 000000000..15598b28f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#ifndef DPAA2_ETH_DEBUGFS_H
+#define DPAA2_ETH_DEBUGFS_H
+
+#include <linux/dcache.h>
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_debugfs {
+ struct dentry *dir;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_eth_dbg_init(void);
+void dpaa2_eth_dbg_exit(void);
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
+#else
+static inline void dpaa2_eth_dbg_init(void) {}
+static inline void dpaa2_eth_dbg_exit(void) {}
+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
new file mode 100644
index 000000000..7fefe1574
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+#include "dpaa2-eth.h"
+/* Copyright 2020 NXP
+ */
+
+#define DPAA2_ETH_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, 0)
+
+static const struct devlink_trap_group dpaa2_eth_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(PARSER_ERROR_DROPS, 0),
+};
+
+static const struct devlink_trap dpaa2_eth_traps_arr[] = {
+ DPAA2_ETH_TRAP_DROP(VXLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(LLC_SNAP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(VLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(PPPOE_PPP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(MPLS_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ARP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_1_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_N_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GRE_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(UDP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(TCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IPSEC_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(SCTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(DCCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ESP_PARSING, PARSER_ERROR_DROPS),
+};
+
+static int dpaa2_eth_dl_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ char buf[10];
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
+ err = devlink_info_version_running_put(req, "dpni", buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct dpaa2_eth_trap_item *
+dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data = priv->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_traps_arr); i++) {
+ if (dpaa2_eth_traps_arr[i].id == trap_id)
+ return &dpaa2_eth_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr)
+{
+ static const struct dpaa2_faf_error_bit {
+ int position;
+ enum devlink_trap_generic_id trap_id;
+ } faf_bits[] = {
+ { .position = 5, .trap_id = DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING },
+ { .position = 20, .trap_id = DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING },
+ { .position = 24, .trap_id = DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING },
+ { .position = 26, .trap_id = DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING },
+ { .position = 29, .trap_id = DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING },
+ { .position = 31, .trap_id = DEVLINK_TRAP_GENERIC_ID_ARP_PARSING },
+ { .position = 52, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING },
+ { .position = 61, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING },
+ { .position = 67, .trap_id = DEVLINK_TRAP_GENERIC_ID_GRE_PARSING },
+ { .position = 71, .trap_id = DEVLINK_TRAP_GENERIC_ID_UDP_PARSING },
+ { .position = 76, .trap_id = DEVLINK_TRAP_GENERIC_ID_TCP_PARSING },
+ { .position = 80, .trap_id = DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING },
+ { .position = 82, .trap_id = DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING },
+ { .position = 84, .trap_id = DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING },
+ { .position = 88, .trap_id = DEVLINK_TRAP_GENERIC_ID_GTP_PARSING },
+ { .position = 90, .trap_id = DEVLINK_TRAP_GENERIC_ID_ESP_PARSING },
+ };
+ u64 faf_word;
+ u64 mask;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(faf_bits); i++) {
+ if (faf_bits[i].position < 32) {
+ /* Low part of FAF.
+ * position ranges from 31 to 0, mask from 0 to 31.
+ */
+ mask = 1ull << (31 - faf_bits[i].position);
+ faf_word = __le32_to_cpu(fapr->faf_lo);
+ } else {
+ /* High part of FAF.
+ * position ranges from 95 to 32, mask from 0 to 63.
+ */
+ mask = 1ull << (63 - (faf_bits[i].position - 32));
+ faf_word = __le64_to_cpu(fapr->faf_hi);
+ }
+ if (faf_word & mask)
+ return dpaa2_eth_dl_trap_item_lookup(priv, faf_bits[i].trap_id);
+ }
+ return NULL;
+}
+
+static int dpaa2_eth_dl_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct dpaa2_eth_trap_item *dpaa2_eth_trap_item;
+
+ dpaa2_eth_trap_item = dpaa2_eth_dl_trap_item_lookup(priv, trap->id);
+ if (WARN_ON(!dpaa2_eth_trap_item))
+ return -ENOENT;
+
+ dpaa2_eth_trap_item->trap_ctx = trap_ctx;
+
+ return 0;
+}
+
+static int dpaa2_eth_dl_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* No support for changing the action of an independent packet trap,
+ * only per trap group - parser error drops
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change trap action independently of group");
+ return -EOPNOTSUPP;
+}
+
+static int dpaa2_eth_dl_trap_group_action_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_error_cfg err_cfg = {0};
+ int err;
+
+ if (group->id != DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS)
+ return -EOPNOTSUPP;
+
+ /* Configure handling of frames marked as errors from the parser */
+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops dpaa2_eth_devlink_ops = {
+ .info_get = dpaa2_eth_dl_info_get,
+ .trap_init = dpaa2_eth_dl_trap_init,
+ .trap_action_set = dpaa2_eth_dl_trap_action_set,
+ .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
+};
+
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_devlink_priv *dl_priv;
+
+ priv->devlink =
+ devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev);
+ if (!priv->devlink) {
+ dev_err(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->dpaa2_priv = priv;
+ return 0;
+}
+
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv)
+{
+ devlink_free(priv->devlink);
+}
+
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ devlink_register(priv->devlink);
+}
+
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_unregister(priv->devlink);
+}
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devlink_port_register(priv->devlink, devlink_port, 0);
+ if (err)
+ return err;
+
+ devlink_port_type_eth_set(devlink_port, priv->net_dev);
+
+ return 0;
+}
+
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
+}
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ dpaa2_eth_trap_data = kzalloc(sizeof(*dpaa2_eth_trap_data), GFP_KERNEL);
+ if (!dpaa2_eth_trap_data)
+ return -ENOMEM;
+ priv->trap_data = dpaa2_eth_trap_data;
+
+ dpaa2_eth_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(dpaa2_eth_traps_arr),
+ sizeof(struct dpaa2_eth_trap_item),
+ GFP_KERNEL);
+ if (!dpaa2_eth_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto trap_data_free;
+ }
+
+ err = devlink_trap_groups_register(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ if (err) {
+ dev_err(dev, "devlink_trap_groups_register() = %d\n", err);
+ goto trap_items_arr_free;
+ }
+
+ err = devlink_traps_register(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr), priv);
+ if (err) {
+ dev_err(dev, "devlink_traps_register() = %d\n", err);
+ goto trap_groups_unregiser;
+ }
+
+ return 0;
+
+trap_groups_unregiser:
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+trap_items_arr_free:
+ kfree(dpaa2_eth_trap_data->trap_items_arr);
+trap_data_free:
+ kfree(dpaa2_eth_trap_data);
+ priv->trap_data = NULL;
+
+ return err;
+}
+
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_traps_unregister(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr));
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ kfree(priv->trap_data->trap_items_arr);
+ kfree(priv->trap_data);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
new file mode 100644
index 000000000..5fb5f14e0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa2_eth
+
+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA2_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+
+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
+/* trace_printk format for raw buffer event class */
+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor */
+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fd),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u64, fd_addr)
+ __field(u32, fd_len)
+ __field(u16, fd_offset)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->fd_addr = dpaa2_fd_get_addr(fd);
+ __entry->fd_len = dpaa2_fd_get_len(fd);
+ __entry->fd_offset = dpaa2_fd_get_offset(fd);
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_FMT,
+ __get_str(name),
+ __entry->fd_addr,
+ __entry->fd_len,
+ __entry->fd_offset)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Log data about raw buffers. Useful for tracing DPBP content. */
+TRACE_EVENT(dpaa2_eth_buf_seed,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA2_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa2-eth-trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
new file mode 100644
index 000000000..de62eee58
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -0,0 +1,4896 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2020 NXP
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/iommu.h>
+#include <linux/fsl/mc.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/fsl/ptp_qoriq.h>
+#include <linux/ptp_classify.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/tso.h>
+
+#include "dpaa2-eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa2-eth-trace.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+struct ptp_qoriq *dpaa2_ptp;
+EXPORT_SYMBOL(dpaa2_ptp);
+
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* HW checksum validation is disabled, nothing to do here */
+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* Read checksum validation bits */
+ if (!((fd_status & DPAA2_FAS_L3CV) &&
+ (fd_status & DPAA2_FAS_L4CV)))
+ return;
+
+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_sg_entry *sgt;
+ void *sg_vaddr;
+ int i;
+
+ /* If single buffer frame, just free the data buffer */
+ if (fd_format == dpaa2_fd_single)
+ goto free_buf;
+ else if (fd_format != dpaa2_fd_sg)
+ /* We don't support any other format */
+ return;
+
+ /* For S/G frames, we first need to free all SG entries
+ * except the first one, which was taken care of already
+ */
+ sgt = vaddr + dpaa2_fd_get_offset(fd);
+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ addr = dpaa2_sg_get_addr(&sgt[i]);
+ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ free_pages((unsigned long)sg_vaddr, 0);
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ }
+
+free_buf:
+ free_pages((unsigned long)vaddr, 0);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct sk_buff *skb = NULL;
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ ch->buf_count--;
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
+{
+ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sg_vaddr;
+ dma_addr_t sg_addr;
+ u16 sg_offset;
+ u32 sg_length;
+ struct page *page, *head_page;
+ int page_offset;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ struct dpaa2_sg_entry *sge = &sgt[i];
+
+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
+ * but this is the only format we may receive from HW anyway
+ */
+
+ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
+ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
+ dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
+ if (unlikely(!skb)) {
+ /* Free the first SG entry now, since we already
+ * unmapped it and obtained the virtual address
+ */
+ free_pages((unsigned long)sg_vaddr, 0);
+
+ /* We still need to subtract the buffers used
+ * by this FD from our software counter
+ */
+ while (!dpaa2_sg_is_final(&sgt[i]) &&
+ i < DPAA2_ETH_MAX_SG_ENTRIES)
+ i++;
+ break;
+ }
+
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
+ skb_put(skb, sg_length);
+ } else {
+ /* Rest of the data buffers are stored as skb frags */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Offset in page (which may be compound).
+ * Data in subsequent SG entries is stored from the
+ * beginning of the buffer, so we don't need to add the
+ * sg_offset.
+ */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ sg_length, priv->rx_buf_size);
+ }
+
+ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
+ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
+ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
+ int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
+static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
+{
+ int retries = 0;
+ int err;
+
+ ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
+ if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ return;
+
+ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ ch->recycled_bufs,
+ ch->recycled_bufs_cnt)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
+ cpu_relax();
+ }
+
+ if (err) {
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ ch->buf_count -= ch->recycled_bufs_cnt;
+ }
+
+ ch->recycled_bufs_cnt = 0;
+}
+
+static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_eth_xdp_fds *xdp_fds)
+{
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ int num_fds, err, max_retries;
+ struct dpaa2_fd *fds;
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* try to enqueue all the FDs until the max number of retries is hit */
+ fds = xdp_fds->fds;
+ num_fds = xdp_fds->num;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued],
+ 0, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ percpu_extras->tx_portal_busy += ++retries;
+ continue;
+ }
+ total_enqueued += enqueued;
+ }
+ xdp_fds->num = 0;
+
+ return total_enqueued;
+}
+
+static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_fd *fds;
+ int enqueued, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ // enqueue the array of XDP_TX frames
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ fds = fq->xdp_tx_fds.fds;
+ for (i = 0; i < enqueued; i++) {
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ ch->stats.xdp_tx++;
+ }
+ for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ }
+ fq->xdp_tx_fds.num = 0;
+}
+
+static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
+{
+ struct dpaa2_faead *faead;
+ struct dpaa2_fd *dest_fd;
+ struct dpaa2_eth_fq *fq;
+ u32 ctrl, frc;
+
+ /* Mark the egress frame hardware annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+ /* Instruct hardware to release the FD buffer directly into
+ * the buffer pool once transmission is completed, instead of
+ * sending a Tx confirmation frame to us
+ */
+ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+ faead = dpaa2_get_faead(buf_start, false);
+ faead->ctrl = cpu_to_le32(ctrl);
+ faead->conf_fqid = 0;
+
+ fq = &priv->fq[queue_id];
+ dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
+ memcpy(dest_fd, fd, sizeof(*dest_fd));
+
+ if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
+ return;
+
+ dpaa2_eth_xdp_tx_flush(priv, ch, fq);
+}
+
+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 xdp_act = XDP_PASS;
+ int err, offset;
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+ dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ case XDP_REDIRECT:
+ dma_unmap_page(priv->net_dev->dev.parent, addr,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ ch->buf_count--;
+
+ /* Allow redirect use of full headroom */
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+
+ err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+ if (unlikely(err)) {
+ addr = dma_map_page(priv->net_dev->dev.parent,
+ virt_to_page(vaddr), 0,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ ch->buf_count++;
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ }
+ ch->stats.xdp_drop++;
+ } else {
+ ch->stats.xdp_redirect++;
+ }
+ break;
+ }
+
+ ch->xdp.res |= xdp_act;
+out:
+ return xdp_act;
+}
+
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb = NULL;
+ unsigned int skb_len;
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
+
+ skb = napi_alloc_skb(&ch->napi, skb_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
+ skb_put(skb, fd_length);
+
+ memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+
+ return skb;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ void *vaddr;
+ struct sk_buff *skb;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_fas *fas;
+ bool recycle_rx_buf = false;
+ void *buf_data;
+ u32 status = 0;
+ u32 xdp_act;
+
+ /* Tracing point */
+ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+ prefetch(buf_data);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ if (fd_format == dpaa2_fd_single) {
+ xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ skb = dpaa2_eth_copybreak(ch, fd, vaddr);
+ if (!skb) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else {
+ recycle_rx_buf = true;
+ }
+ } else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
+ free_pages((unsigned long)vaddr, 0);
+ percpu_extras->rx_sg_frames++;
+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
+ } else {
+ /* We don't support any other format */
+ goto err_frame_format;
+ }
+
+ if (unlikely(!skb))
+ goto err_build_skb;
+
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ status = le32_to_cpu(fas->status);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, fq->flowid);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
+
+ list_add_tail(&skb->list, ch->rx_list);
+
+ if (recycle_rx_buf)
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+ return;
+
+err_build_skb:
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+/* Processing of Rx frames received on the error FQ
+ * We check and print the error bits and then free the frame
+ */
+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq __always_unused)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_trap_item *trap_item;
+ struct dpaa2_fapr *fapr;
+ struct sk_buff *skb;
+ void *buf_data;
+ void *vaddr;
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+
+ if (fd_format == dpaa2_fd_single) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ /* We don't support any other format */
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
+ goto err_frame_format;
+ }
+
+ fapr = dpaa2_get_fapr(vaddr, false);
+ trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
+ if (trap_item)
+ devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
+ &priv->devlink_port, NULL);
+ consume_skb(skb);
+
+err_frame_format:
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_stats->rx_errors++;
+ ch->buf_count--;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq **src)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ struct dpaa2_eth_fq *fq = NULL;
+ struct dpaa2_dq *dq;
+ const struct dpaa2_fd *fd;
+ int cleaned = 0, retries = 0;
+ int is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ch->store, &is_last);
+ if (unlikely(!dq)) {
+ /* If we're here, we *must* have placed a
+ * volatile dequeue comnmand, so keep reading through
+ * the store until we get some sort of valid response
+ * token (either a valid frame or an "empty dequeue")
+ */
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
+ netdev_err_once(priv->net_dev,
+ "Unable to read a valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ fd = dpaa2_dq_fd(dq);
+ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+
+ fq->consume(priv, ch, fd, fq);
+ cleaned++;
+ retries = 0;
+ } while (!is_last);
+
+ if (!cleaned)
+ return 0;
+
+ fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
+ ch->stats.frames_per_cdan += cleaned;
+
+ /* A dequeue operation only pulls frames from a single queue
+ * into the store. Return the frame queue as an out param.
+ */
+ if (src)
+ *src = fq;
+
+ return cleaned;
+}
+
+static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
+ u8 *msgtype, u8 *twostep, u8 *udp,
+ u16 *correction_offset,
+ u16 *origintimestamp_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 ||
+ type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
+/* Configure the egress frame annotation for timestamp update */
+static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fd *fd,
+ void *buf_start,
+ struct sk_buff *skb)
+{
+ struct ptp_tstamp origin_timestamp;
+ u8 msgtype, twostep, udp;
+ struct dpaa2_faead *faead;
+ struct dpaa2_fas *fas;
+ struct timespec64 ts;
+ u16 offset1, offset2;
+ u32 ctrl, frc;
+ __le64 *ns;
+ u8 *data;
+
+ /* Mark the egress frame annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
+ /* Set hardware annotation size */
+ ctrl = dpaa2_fd_get_ctrl(fd);
+ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
+
+ /* enable UPD (update prepanded data) bit in FAEAD field of
+ * hardware frame annotation area
+ */
+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+ faead = dpaa2_get_faead(buf_start, true);
+ faead->ctrl = cpu_to_le32(ctrl);
+
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep) {
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ return;
+ }
+
+ /* Mark the frame annotation status as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
+
+ /* Mark the PTP flag for one step timestamping */
+ fas = dpaa2_get_fas(buf_start, true);
+ fas->status = cpu_to_le32(DPAA2_FAS_PTP);
+
+ dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
+ ns = dpaa2_get_ts(buf_start, true);
+ *ns = cpu_to_le64(timespec64_to_ns(&ts) /
+ DPAA2_PTP_CLK_PERIOD_NS);
+
+ /* Update current time to PTP message originTimestamp field */
+ ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(origin_timestamp.sec_lsb);
+ *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
+
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
+
+ }
+}
+
+static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sgt_buf = NULL;
+ dma_addr_t addr;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct dpaa2_sg_entry *sgt;
+ int i, err;
+ int sgt_buf_size;
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+ * to go beyond nr_frags+1.
+ * Note: We don't support chained scatterlists
+ */
+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+ return -EINVAL;
+
+ scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (unlikely(!scl))
+ return -ENOMEM;
+
+ sg_init_table(scl, nr_frags + 1);
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+ if (unlikely(num_sg < 0)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Fill in the HW SGT structure.
+ *
+ * sgt_buf is zeroed out, so the following fields are implicit
+ * in all sgt entries:
+ * - offset is 0
+ * - format is 'dpaa2_sg_single'
+ */
+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+ }
+ dpaa2_sg_set_final(&sgt[i - 1], true);
+
+ /* Store the skb backpointer in the SGT buffer.
+ * Fit the scatterlist and the number of buffers alongside the
+ * skb backpointer in the software annotation area. We'll need
+ * all of them on Tx Conf.
+ */
+ *swa_addr = (void *)sgt_buf;
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SG;
+ swa->sg.skb = skb;
+ swa->sg.scl = scl;
+ swa->sg.num_sg = num_sg;
+ swa->sg.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto dma_map_single_failed;
+ }
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+dma_map_single_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+sgt_buf_alloc_failed:
+ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+dma_map_sg_failed:
+ kfree(scl);
+ return err;
+}
+
+/* Create a SG frame descriptor based on a linear skb.
+ *
+ * This function is used on the Tx path when the skb headroom is not large
+ * enough for the HW requirements, thus instead of realloc-ing the skb we
+ * create a SG frame descriptor with only one entry.
+ */
+static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t addr, sgt_addr;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+ int err;
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf))
+ return -ENOMEM;
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto data_map_failed;
+ }
+
+ /* Fill in the HW SGT structure */
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, skb->len);
+ dpaa2_sg_set_final(sgt, true);
+
+ /* Store the skb backpointer in the SGT buffer */
+ *swa_addr = (void *)sgt_buf;
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SINGLE;
+ swa->single.skb = skb;
+ swa->single.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ err = -ENOMEM;
+ goto sgt_map_failed;
+ }
+
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+sgt_map_failed:
+ dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
+data_map_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+
+ return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u8 *buffer_start, *aligned_start;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t addr;
+
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
+ else
+ return -ENOMEM;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ *swa_addr = (void *)buffer_start;
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+ swa->type = DPAA2_ETH_SWA_SINGLE;
+ swa->single.skb = skb;
+
+ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ */
+static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr, sg_addr;
+ struct sk_buff *skb = NULL;
+ unsigned char *buffer_start;
+ struct dpaa2_eth_swa *swa;
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ u32 fd_len = dpaa2_fd_get_len(fd);
+ struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ void *tso_hdr;
+ int i;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+
+ if (fd_format == dpaa2_fd_single) {
+ if (swa->type == DPAA2_ETH_SWA_SINGLE) {
+ skb = swa->single.skb;
+ /* Accessing the skb buffer is safe before dma unmap,
+ * because we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ } else {
+ WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
+ dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
+ DMA_BIDIRECTIONAL);
+ }
+ } else if (fd_format == dpaa2_fd_sg) {
+ if (swa->type == DPAA2_ETH_SWA_SG) {
+ skb = swa->sg.skb;
+
+ /* Unmap the scatterlist */
+ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+ DMA_BIDIRECTIONAL);
+ kfree(swa->sg.scl);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+ DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Unmap and free the header */
+ tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(tso_hdr);
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
+ } else {
+ skb = swa->single.skb;
+
+ /* Unmap the SGT Buffer */
+ dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+ sg_addr = dpaa2_sg_get_addr(sgt);
+ dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
+ }
+ } else {
+ netdev_dbg(priv->net_dev, "Invalid FD format\n");
+ return;
+ }
+
+ if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
+ }
+
+ if (swa->type == DPAA2_ETH_SWA_XDP) {
+ xdp_return_frame(swa->xdp.xdpf);
+ return;
+ }
+
+ /* Get the timestamp value */
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
+ }
+
+ /* Free SGT buffer allocated on tx */
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
+ }
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
+ }
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
+ }
+
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
+
+ return err;
+}
+
+static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
+ struct dpaa2_eth_fq *fq;
+ struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
+ u16 queue_mapping;
+ void *swa = NULL;
+ u8 prio = 0;
+ int err, i;
+ u32 fd_len;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
+
+ needed_headroom = dpaa2_eth_needed_headroom(skb);
+
+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ percpu_stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* Setup the FD fields */
+
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
+ } else if (skb_headroom(skb) < needed_headroom) {
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ percpu_extras->tx_converted_sg_frames++;
+ percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
+ } else {
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
+ }
+
+ if (unlikely(err)) {
+ percpu_stats->tx_dropped++;
+ goto err_build_fd;
+ }
+
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
+
+ /* Tracing point */
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
+
+ /* TxConf FQ selection relies on queue id from the stack.
+ * In case of a forwarded frame from another DPNI interface, we choose
+ * a queue affined to the same core that processed the Rx frame
+ */
+ queue_mapping = skb_get_queue_mapping(skb);
+
+ if (net_dev->num_tc) {
+ prio = netdev_txq_to_tc(net_dev, queue_mapping);
+ /* Hardware interprets priority level 0 as being the highest,
+ * so we need to do a reverse mapping to the netdev tc index
+ */
+ prio = net_dev->num_tc - prio - 1;
+ /* We have only one FQ array entry for all Tx hardware queues
+ * with the same flow id (but different priority levels)
+ */
+ queue_mapping %= dpaa2_eth_queue_count(priv);
+ }
+ fq = &priv->fq[queue_mapping];
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
+
+ /* Everything that happens after this enqueues might race with
+ * the Tx confirmation callback for this frame
+ */
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
+ }
+ percpu_extras->tx_portal_busy += retries;
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
+ dpaa2_eth_free_tx_fd(priv, fq, fd, false);
+ netdev_tx_completed_queue(nq, 1, fd_len);
+ } else {
+ percpu_stats->tx_packets += total_enqueued;
+ percpu_stats->tx_bytes += fd_len;
+ }
+
+ return NETDEV_TX_OK;
+
+err_build_fd:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
+ tx_onestep_tstamp);
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (!skb)
+ return;
+
+ /* Lock just before TX one-step timestamping packet,
+ * and release the lock in dpaa2_eth_free_tx_fd when
+ * confirm the packet has been sent on hardware, or
+ * when clean up during transmit failure.
+ */
+ mutex_lock(&priv->onestep_tstamp_lock);
+ __dpaa2_eth_tx(skb, priv->net_dev);
+ }
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 msgtype, twostep, udp;
+ u16 offset1, offset2;
+
+ /* Utilize skb->cb[0] for timestamping request per skb */
+ skb->cb[0] = 0;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
+ skb->cb[0] = TX_TSTAMP;
+ else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
+ }
+
+ /* TX for one-step timestamping PTP Sync packet */
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2))
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ queue_work(priv->dpaa2_ptp_wq,
+ &priv->tx_onestep_tstamp);
+ return NETDEV_TX_OK;
+ }
+ /* Use two-step timestamping if not one-step timestamping
+ * PTP Sync packet
+ */
+ skb->cb[0] = TX_TSTAMP;
+ }
+
+ /* TX for other packets */
+ return __dpaa2_eth_tx(skb, net_dev);
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_len = dpaa2_fd_get_len(fd);
+ u32 fd_errors;
+
+ /* Tracing point */
+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ percpu_extras->tx_conf_frames++;
+ percpu_extras->tx_conf_bytes += fd_len;
+ ch->stats.bytes_per_cdan += fd_len;
+
+ /* Check frame errors in the FD field */
+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+ dpaa2_eth_free_tx_fd(priv, fq, fd, true);
+
+ if (likely(!fd_errors))
+ return;
+
+ if (net_ratelimit())
+ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
+ fd_errors);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
+}
+
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ struct page *page;
+ dma_addr_t addr;
+ int retries = 0;
+ int i, err;
+
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
+ * alignment padding
+ */
+ /* allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
+ goto err_alloc;
+
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ bpid);
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful */
+ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
+ cpu_relax();
+ }
+
+ /* If release command failed, clean up and bail out;
+ * not much else we can do about it
+ */
+ if (err) {
+ dpaa2_eth_free_bufs(priv, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ __free_pages(page, 0);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ int i, j;
+ int new_count;
+
+ for (j = 0; j < priv->num_channels; j++) {
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+ i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
+ priv->channel[j]->buf_count += new_count;
+
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ int retries = 0;
+ int ret;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
+ buf_array, count);
+ if (ret < 0) {
+ if (ret == -EBUSY &&
+ retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
+ continue;
+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+ return;
+ }
+ dpaa2_eth_free_bufs(priv, buf_array, ret);
+ retries = 0;
+ } while (ret);
+}
+
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, 1);
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->buf_count = 0;
+}
+
+/* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
+{
+ int new_count;
+
+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+ return 0;
+
+ do {
+ new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll try later on */
+ break;
+ }
+ ch->buf_count += new_count;
+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ u16 count;
+ int k, i;
+
+ for_each_possible_cpu(k) {
+ sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
+ count = sgt_cache->count;
+
+ for (i = 0; i < count; i++)
+ skb_free_frag(sgt_cache->buf[i]);
+ sgt_cache->count = 0;
+ }
+}
+
+static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
+{
+ int err;
+ int dequeues = -1;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
+ ch->store);
+ dequeues++;
+ cpu_relax();
+ } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
+
+ ch->stats.dequeue_portal_busy += dequeues;
+ if (unlikely(err))
+ ch->stats.pull_err++;
+
+ return err;
+}
+
+/* NAPI poll routine
+ *
+ * Frames are dequeued from the QMan channel associated with this NAPI context.
+ * Rx, Tx confirmation and (if configured) Rx error frames all count
+ * towards the NAPI budget.
+ */
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_eth_channel *ch;
+ struct dpaa2_eth_priv *priv;
+ int rx_cleaned = 0, txconf_cleaned = 0;
+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+ struct netdev_queue *nq;
+ int store_cleaned, work_done;
+ struct list_head rx_list;
+ int retries = 0;
+ u16 flowid;
+ int err;
+
+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
+ ch->xdp.res = 0;
+ priv = ch->priv;
+
+ INIT_LIST_HEAD(&rx_list);
+ ch->rx_list = &rx_list;
+
+ do {
+ err = dpaa2_eth_pull_channel(ch);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ dpaa2_eth_refill_pool(priv, ch, priv->bpid);
+
+ store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
+ if (store_cleaned <= 0)
+ break;
+ if (fq->type == DPAA2_RX_FQ) {
+ rx_cleaned += store_cleaned;
+ flowid = fq->flowid;
+ } else {
+ txconf_cleaned += store_cleaned;
+ /* We have a single Tx conf FQ on this channel */
+ txc_fq = fq;
+ }
+
+ /* If we either consumed the whole NAPI budget with Rx frames
+ * or we reached the Tx confirmations threshold, we're done.
+ */
+ if (rx_cleaned >= budget ||
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+ goto out;
+ }
+ } while (store_cleaned);
+
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
+ /* Update NET DIM with the values for this CDAN */
+ dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ ch->stats.bytes_per_cdan);
+ ch->stats.frames_per_cdan = 0;
+ ch->stats.bytes_per_cdan = 0;
+
+ /* We didn't consume the entire budget, so finish napi and
+ * re-enable data availability notifications
+ */
+ napi_complete_done(napi, rx_cleaned);
+ do {
+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+ ch->nctx.desired_cpu);
+
+ work_done = max(rx_cleaned, 1);
+
+out:
+ netif_receive_skb_list(ch->rx_list);
+
+ if (txc_fq && txc_fq->dq_frames) {
+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+ txc_fq->dq_bytes);
+ txc_fq->dq_frames = 0;
+ txc_fq->dq_bytes = 0;
+ }
+
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
+ dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
+
+ return work_done;
+}
+
+static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_enable(&ch->napi);
+ }
+}
+
+static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_disable(&ch->napi);
+ }
+}
+
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc)
+{
+ struct dpni_taildrop td = {0};
+ struct dpaa2_eth_fq *fq;
+ int i, err;
+
+ /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
+ * flow control is disabled (as it might interfere with either the
+ * buffer pool depletion trigger for pause frames or with the group
+ * congestion trigger for PFC frames)
+ */
+ td.enable = !tx_pause;
+ if (priv->rx_fqtd_enabled == td.enable)
+ goto set_cgtd;
+
+ td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
+ td.units = DPNI_CONGESTION_UNIT_BYTES;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_RX_FQ)
+ continue;
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ fq->tc, fq->flowid, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(FQ) failed\n");
+ return;
+ }
+ }
+
+ priv->rx_fqtd_enabled = td.enable;
+
+set_cgtd:
+ /* Congestion group taildrop: threshold is in frames, per group
+ * of FQs belonging to the same traffic class
+ * Enabled if general Tx pause disabled or if PFCs are enabled
+ * (congestion group threhsold for PFC generation is lower than the
+ * CG taildrop threshold, so it won't interfere with it; we also
+ * want frames in non-PFC enabled traffic classes to be kept in check)
+ */
+ td.enable = !tx_pause || pfc;
+ if (priv->rx_cgtd_enabled == td.enable)
+ return;
+
+ td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
+ i, 0, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(CG) failed\n");
+ return;
+ }
+ }
+
+ priv->rx_cgtd_enabled = td.enable;
+}
+
+static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_link_state state = {0};
+ bool tx_pause;
+ int err;
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (unlikely(err)) {
+ netdev_err(priv->net_dev,
+ "dpni_get_link_state() failed\n");
+ return err;
+ }
+
+ /* If Tx pause frame settings have changed, we need to update
+ * Rx FQ taildrop configuration as well. We configure taildrop
+ * only when pause frame generation is disabled.
+ */
+ tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
+
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (dpaa2_eth_is_type_phy(priv))
+ goto out;
+
+ /* Chech link state; speed / duplex changes are not treated yet */
+ if (priv->link_state.up == state.up)
+ goto out;
+
+ if (state.up) {
+ netif_carrier_on(priv->net_dev);
+ netif_tx_start_all_queues(priv->net_dev);
+ } else {
+ netif_tx_stop_all_queues(priv->net_dev);
+ netif_carrier_off(priv->net_dev);
+ }
+
+ netdev_info(priv->net_dev, "Link Event: state %s\n",
+ state.up ? "up" : "down");
+
+out:
+ priv->link_state = state;
+
+ return 0;
+}
+
+static int dpaa2_eth_open(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpaa2_eth_seed_pool(priv, priv->bpid);
+ if (err) {
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ priv->dpbp_dev->obj_desc.id, priv->bpid);
+ }
+
+ if (!dpaa2_eth_is_type_phy(priv)) {
+ /* We'll only start the txqs when the link is actually ready;
+ * make sure we don't race against the link up notification,
+ * which may come immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+
+ /* Also, explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+ }
+ dpaa2_eth_enable_ch_napi(priv);
+
+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
+ netdev_err(net_dev, "dpni_enable() failed\n");
+ goto enable_err;
+ }
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ dpaa2_mac_start(priv->mac);
+ phylink_start(priv->mac->phylink);
+ }
+
+ return 0;
+
+enable_err:
+ dpaa2_eth_disable_ch_napi(priv);
+ dpaa2_eth_drain_pool(priv);
+ return err;
+}
+
+/* Total number of in-flight frames on ingress queues */
+static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt = 0, bcnt = 0, total = 0;
+ int i, err;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "query_fq_count failed");
+ break;
+ }
+ total += fcnt;
+ }
+
+ return total;
+}
+
+static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+ int retries = 10;
+ u32 pending;
+
+ do {
+ pending = dpaa2_eth_ingress_fq_count(priv);
+ if (pending)
+ msleep(100);
+ } while (pending && --retries);
+}
+
+#define DPNI_TX_PENDING_VER_MAJOR 7
+#define DPNI_TX_PENDING_VER_MINOR 13
+static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+ union dpni_statistics stats;
+ int retries = 10;
+ int err;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
+ DPNI_TX_PENDING_VER_MINOR) < 0)
+ goto out;
+
+ do {
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
+ &stats);
+ if (err)
+ goto out;
+ if (stats.page_6.tx_pending_frames == 0)
+ return;
+ } while (--retries);
+
+out:
+ msleep(500);
+}
+
+static int dpaa2_eth_stop(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int dpni_enabled = 0;
+ int retries = 10;
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
+ } else {
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+ }
+
+ /* On dpni_disable(), the MC firmware will:
+ * - stop MAC Rx and wait for all Rx frames to be enqueued to software
+ * - cut off WRIOP dequeues from egress FQs and wait until transmission
+ * of all in flight Tx frames is finished (and corresponding Tx conf
+ * frames are enqueued back to software)
+ *
+ * Before calling dpni_disable(), we wait for all Tx frames to arrive
+ * on WRIOP. After it finishes, wait until all remaining frames on Rx
+ * and Tx conf queues are consumed on NAPI poll.
+ */
+ dpaa2_eth_wait_for_egress_fq_empty(priv);
+
+ do {
+ dpni_disable(priv->mc_io, 0, priv->mc_token);
+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+ if (dpni_enabled)
+ /* Allow the hardware some slack */
+ msleep(100);
+ } while (dpni_enabled && --retries);
+ if (!retries) {
+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
+ /* Must go on and disable NAPI nonetheless, so we don't crash at
+ * the next "ifconfig up"
+ */
+ }
+
+ dpaa2_eth_wait_for_ingress_fq_empty(priv);
+ dpaa2_eth_disable_ch_napi(priv);
+
+ /* Empty the buffer pool */
+ dpaa2_eth_drain_pool(priv);
+
+ /* Empty the Scatter-Gather Buffer cache */
+ dpaa2_eth_sgt_cache_drain(priv);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/** Fill in counters maintained by the GPP driver. These may be different from
+ * the hardware counters obtained by ethtool.
+ */
+static void dpaa2_eth_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct rtnl_link_stats64 *percpu_stats;
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_possible_cpu(i) {
+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
+ cpustats = (u64 *)percpu_stats;
+ for (j = 0; j < num; j++)
+ netstats[j] += cpustats[j];
+ }
+}
+
+/* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netdev_for_each_uc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+ if (err)
+ netdev_warn(priv->net_dev,
+ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
+ ha->addr, err);
+ }
+}
+
+/* Copy mac multicast addresses from @net_dev to @priv
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netdev_for_each_mc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+ if (err)
+ netdev_warn(priv->net_dev,
+ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
+ ha->addr, err);
+ }
+}
+
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int uc_count = netdev_uc_count(net_dev);
+ int mc_count = netdev_mc_count(net_dev);
+ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
+ u32 options = priv->dpni_attrs.options;
+ u16 mc_token = priv->mc_token;
+ struct fsl_mc_io *mc_io = priv->mc_io;
+ int err;
+
+ /* Basic sanity checks; these probably indicate a misconfiguration */
+ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
+ netdev_info(net_dev,
+ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
+ max_mac);
+
+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
+ if (uc_count > max_mac) {
+ netdev_info(net_dev,
+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count, max_mac);
+ goto force_promisc;
+ }
+ if (mc_count + uc_count > max_mac) {
+ netdev_info(net_dev,
+ "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count + mc_count, max_mac);
+ goto force_mc_promisc;
+ }
+
+ /* Adjust promisc settings due to flag combinations */
+ if (net_dev->flags & IFF_PROMISC)
+ goto force_promisc;
+ if (net_dev->flags & IFF_ALLMULTI) {
+ /* First, rebuild unicast filtering table. This should be done
+ * in promisc mode, in order to avoid frame loss while we
+ * progressively add entries to the table.
+ * We don't know whether we had been in promisc already, and
+ * making an MC call to find out is expensive; so set uc promisc
+ * nonetheless.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set uc promisc\n");
+
+ /* Actual uc table reconstruction. */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear uc filters\n");
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
+
+ /* Finally, clear uc promisc and set mc promisc as requested. */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear uc promisc\n");
+ goto force_mc_promisc;
+ }
+
+ /* Neither unicast, nor multicast promisc will be on... eventually.
+ * For now, rebuild mac filtering tables while forcing both of them on.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
+ /* Actual mac filtering tables reconstruction */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't clear mac filters\n");
+ dpaa2_eth_add_mc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
+
+ /* Now we can clear both ucast and mcast promisc, without risking
+ * to drop legitimate frames anymore.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear ucast promisc\n");
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
+ return;
+
+force_promisc:
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set ucast promisc\n");
+force_mc_promisc:
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set mcast promisc\n");
+}
+
+static int dpaa2_eth_set_features(struct net_device *net_dev,
+ netdev_features_t features)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ netdev_features_t changed = features ^ net_dev->features;
+ bool enable;
+ int err;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ enable = !!(features & NETIF_F_RXCSUM);
+ err = dpaa2_eth_set_rx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ err = dpaa2_eth_set_tx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->tx_tstamp_type = config.tx_type;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ priv->rx_tstamp = false;
+ } else {
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+ int mfl, linear_mfl;
+
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
+ dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
+
+ if (mfl > linear_mfl) {
+ netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+ linear_mfl - VLAN_ETH_HLEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+ int mfl, err;
+
+ /* We enforce a maximum Rx frame length based on MTU only if we have
+ * an XDP program attached (in order to avoid Rx S/G frames).
+ * Otherwise, we accept all incoming frames as long as they are not
+ * larger than maximum size supported in hardware
+ */
+ if (has_xdp)
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ else
+ mfl = DPAA2_ETH_MFL;
+
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!priv->xdp_prog)
+ goto out;
+
+ if (!xdp_mtu_valid(priv, new_mtu))
+ return -EINVAL;
+
+ err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
+ if (err)
+ return err;
+
+out:
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+ struct dpni_buffer_layout buf_layout = {0};
+ int err;
+
+ err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+ return err;
+ }
+
+ /* Reserve extra headroom for XDP header size changes */
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+ (has_xdp ? XDP_PACKET_HEADROOM : 0);
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch;
+ struct bpf_prog *old;
+ bool up, need_update;
+ int i, err;
+
+ if (prog && !xdp_mtu_valid(priv, dev->mtu))
+ return -EINVAL;
+
+ if (prog)
+ bpf_prog_add(prog, priv->num_channels);
+
+ up = netif_running(dev);
+ need_update = (!!priv->xdp_prog != !!prog);
+
+ if (up)
+ dpaa2_eth_stop(dev);
+
+ /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+ * Also, when switching between xdp/non-xdp modes we need to reconfigure
+ * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+ * so we are sure no old format buffers will be used from now on.
+ */
+ if (need_update) {
+ err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
+ if (err)
+ goto out_err;
+ err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
+ if (err)
+ goto out_err;
+ }
+
+ old = xchg(&priv->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ old = xchg(&ch->xdp.prog, prog);
+ if (old)
+ bpf_prog_put(old);
+ }
+
+ if (up) {
+ err = dpaa2_eth_open(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+out_err:
+ if (prog)
+ bpf_prog_sub(prog, priv->num_channels);
+ if (up)
+ dpaa2_eth_open(dev);
+
+ return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return dpaa2_eth_setup_xdp(dev, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
+ struct xdp_frame *xdpf,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = net_dev->dev.parent;
+ unsigned int needed_headroom;
+ struct dpaa2_eth_swa *swa;
+ void *buffer_start, *aligned_start;
+ dma_addr_t addr;
+
+ /* We require a minimum headroom to be able to transmit the frame.
+ * Otherwise return an error and let the original net_device handle it
+ */
+ needed_headroom = dpaa2_eth_needed_headroom(NULL);
+ if (xdpf->headroom < needed_headroom)
+ return -EINVAL;
+
+ /* Setup the FD fields */
+ memset(fd, 0, sizeof(*fd));
+
+ /* Align FD address, if possible */
+ buffer_start = xdpf->data - needed_headroom;
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= xdpf->data - xdpf->headroom)
+ buffer_start = aligned_start;
+
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+ /* fill in necessary fields here */
+ swa->type = DPAA2_ETH_SWA_XDP;
+ swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
+ swa->xdp.xdpf = xdpf;
+
+ addr = dma_map_single(dev, buffer_start,
+ swa->xdp.dma_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
+ dpaa2_fd_set_len(fd, xdpf->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+}
+
+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int enqueued, i, err;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!netif_running(net_dev))
+ return -ENETDOWN;
+
+ fq = &priv->fq[smp_processor_id()];
+ xdp_redirect_fds = &fq->xdp_redirect_fds;
+ fds = xdp_redirect_fds->fds;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ /* create a FD for each xdp_frame in the list received */
+ for (i = 0; i < n; i++) {
+ err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
+ if (err)
+ break;
+ }
+ xdp_redirect_fds->num = i;
+
+ /* enqueue all the frame descriptors */
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ for (i = 0; i < enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+
+ return enqueued;
+}
+
+static int update_xps(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct cpumask xps_mask;
+ struct dpaa2_eth_fq *fq;
+ int i, num_queues, netdev_queues;
+ int err = 0;
+
+ num_queues = dpaa2_eth_queue_count(priv);
+ netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+
+ /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
+ * queues, so only process those
+ */
+ for (i = 0; i < netdev_queues; i++) {
+ fq = &priv->fq[i % num_queues];
+
+ cpumask_clear(&xps_mask);
+ cpumask_set_cpu(fq->target_cpu, &xps_mask);
+
+ err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ if (err) {
+ netdev_warn_once(net_dev, "Error setting XPS queue\n");
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
+ struct tc_mqprio_qopt *mqprio)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 num_tc, num_queues;
+ int i;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_queues = dpaa2_eth_queue_count(priv);
+ num_tc = mqprio->num_tc;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ if (num_tc > dpaa2_eth_tc_count(priv)) {
+ netdev_err(net_dev, "Max %d traffic classes supported\n",
+ dpaa2_eth_tc_count(priv));
+ return -EOPNOTSUPP;
+ }
+
+ if (!num_tc) {
+ netdev_reset_tc(net_dev);
+ netif_set_real_num_tx_queues(net_dev, num_queues);
+ goto out;
+ }
+
+ netdev_set_num_tc(net_dev, num_tc);
+ netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
+
+out:
+ update_xps(priv);
+
+ return 0;
+}
+
+#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
+
+static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
+{
+ struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
+ struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
+ int err;
+
+ if (p->command == TC_TBF_STATS)
+ return -EOPNOTSUPP;
+
+ /* Only per port Tx shaping */
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_TBF_REPLACE) {
+ if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
+ netdev_err(net_dev, "burst size cannot be greater than %d\n",
+ DPAA2_ETH_MAX_BURST_SIZE);
+ return -EINVAL;
+ }
+
+ tx_cr_shaper.max_burst_size = cfg->max_size;
+ /* The TBF interface is in bytes/s, whereas DPAA2 expects the
+ * rate in Mbits/s
+ */
+ tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
+ }
+
+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
+ &tx_er_shaper, 0);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return dpaa2_eth_setup_mqprio(net_dev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return dpaa2_eth_setup_tbf(net_dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops dpaa2_eth_ops = {
+ .ndo_open = dpaa2_eth_open,
+ .ndo_start_xmit = dpaa2_eth_tx,
+ .ndo_stop = dpaa2_eth_stop,
+ .ndo_set_mac_address = dpaa2_eth_set_addr,
+ .ndo_get_stats64 = dpaa2_eth_get_stats,
+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
+ .ndo_set_features = dpaa2_eth_set_features,
+ .ndo_eth_ioctl = dpaa2_eth_ioctl,
+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_bpf = dpaa2_eth_xdp,
+ .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+ .ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+};
+
+static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_eth_channel *ch;
+
+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
+
+ /* Update NAPI statistics */
+ ch->stats.cdan++;
+
+ napi_schedule(&ch->napi);
+}
+
+/* Allocate and configure a DPCON object */
+static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpcon;
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
+ FSL_MC_POOL_DPCON, &dpcon);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return ERR_PTR(err);
+ }
+
+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_open() failed\n");
+ goto free;
+ }
+
+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_reset() failed\n");
+ goto close;
+ }
+
+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_enable() failed\n");
+ goto close;
+ }
+
+ return dpcon;
+
+close:
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+free:
+ fsl_mc_object_free(dpcon);
+
+ return ERR_PTR(err);
+}
+
+static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
+ struct fsl_mc_device *dpcon)
+{
+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+ fsl_mc_object_free(dpcon);
+}
+
+static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_attr attr;
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->dpcon = dpaa2_eth_setup_dpcon(priv);
+ if (IS_ERR(channel->dpcon)) {
+ err = PTR_ERR(channel->dpcon);
+ goto err_setup;
+ }
+
+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
+ &attr);
+ if (err) {
+ dev_err(dev, "dpcon_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ channel->dpcon_id = attr.id;
+ channel->ch_id = attr.qbman_ch_id;
+ channel->priv = priv;
+
+ return channel;
+
+err_get_attr:
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
+err_setup:
+ kfree(channel);
+ return ERR_PTR(err);
+}
+
+static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
+{
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
+ kfree(channel);
+}
+
+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
+ * and register data availability notifications
+ */
+static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_notification_cfg dpcon_notif_cfg;
+ struct device *dev = priv->net_dev->dev.parent;
+ int i, err;
+
+ /* We want the ability to spread ingress traffic (RX, TX conf) to as
+ * many cores as possible, so we need one channel for each core
+ * (unless there's fewer queues than cores, in which case the extra
+ * channels would be wasted).
+ * Allocate one channel per core and register it to the core's
+ * affine DPIO. If not enough channels are available for all cores
+ * or if some cores don't have an affine DPIO, there will be no
+ * ingress frame processing on those cores.
+ */
+ cpumask_clear(&priv->dpio_cpumask);
+ for_each_online_cpu(i) {
+ /* Try to allocate a channel */
+ channel = dpaa2_eth_alloc_channel(priv);
+ if (IS_ERR_OR_NULL(channel)) {
+ err = PTR_ERR_OR_ZERO(channel);
+ if (err != -EPROBE_DEFER)
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
+ goto err_alloc_ch;
+ }
+
+ priv->channel[priv->num_channels] = channel;
+
+ nctx = &channel->nctx;
+ nctx->is_cdan = 1;
+ nctx->cb = dpaa2_eth_cdan_cb;
+ nctx->id = channel->ch_id;
+ nctx->desired_cpu = i;
+
+ /* Register the new context */
+ channel->dpio = dpaa2_io_service_select(i);
+ err = dpaa2_io_service_register(channel->dpio, nctx, dev);
+ if (err) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
+ /* If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err_service_reg;
+ }
+
+ /* Register DPCON notification with MC */
+ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
+ dpcon_notif_cfg.priority = 0;
+ dpcon_notif_cfg.user_ctx = nctx->qman64;
+ err = dpcon_set_notification(priv->mc_io, 0,
+ channel->dpcon->mc_handle,
+ &dpcon_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpcon_set_notification failed()\n");
+ goto err_set_cdan;
+ }
+
+ /* If we managed to allocate a channel and also found an affine
+ * DPIO for this core, add it to the final mask
+ */
+ cpumask_set_cpu(i, &priv->dpio_cpumask);
+ priv->num_channels++;
+
+ /* Stop if we already have enough channels to accommodate all
+ * RX and TX conf queues
+ */
+ if (priv->num_channels == priv->dpni_attrs.num_queues)
+ break;
+ }
+
+ return 0;
+
+err_set_cdan:
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+err_service_reg:
+ dpaa2_eth_free_channel(priv, channel);
+err_alloc_ch:
+ if (err == -EPROBE_DEFER) {
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+ nctx = &channel->nctx;
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+ dpaa2_eth_free_channel(priv, channel);
+ }
+ priv->num_channels = 0;
+ return err;
+ }
+
+ if (cpumask_empty(&priv->dpio_cpumask)) {
+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
+ cpumask_pr_args(&priv->dpio_cpumask));
+
+ return 0;
+}
+
+static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ /* deregister CDAN notifications and free channels */
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
+ dpaa2_eth_free_channel(priv, ch);
+ }
+}
+
+static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
+ int cpu)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->channel[i]->nctx.desired_cpu == cpu)
+ return priv->channel[i];
+
+ /* We should never get here. Issue a warning and return
+ * the first channel, because it's still better than nothing
+ */
+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
+ return priv->channel[0];
+}
+
+static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_fq *fq;
+ int rx_cpu, txc_cpu;
+ int i;
+
+ /* For each FQ, pick one channel/CPU to deliver frames to.
+ * This may well change at runtime, either through irqbalance or
+ * through direct user intervention.
+ */
+ rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ case DPAA2_RX_ERR_FQ:
+ fq->target_cpu = rx_cpu;
+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
+ if (rx_cpu >= nr_cpu_ids)
+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ case DPAA2_TX_CONF_FQ:
+ fq->target_cpu = txc_cpu;
+ txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
+ if (txc_cpu >= nr_cpu_ids)
+ txc_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ default:
+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
+ }
+ fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
+ }
+
+ update_xps(priv);
+}
+
+static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
+{
+ int i, j;
+
+ /* We have one TxConf FQ per Tx flow.
+ * The number of Tx and Rx queues is the same.
+ * Tx queues come first in the fq array.
+ */
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
+
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs].tc = (u8)j;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
+ }
+
+ /* We have exactly one Rx error queue per DPNI */
+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
+
+ /* For each FQ, decide on which core to process incoming frames */
+ dpaa2_eth_set_fq_affinity(priv);
+}
+
+/* Allocate and configure one buffer pool for each interface */
+static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
+{
+ int err;
+ struct fsl_mc_device *dpbp_dev;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpbp_attr dpbp_attrs;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+
+ priv->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
+ &dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+ priv->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+
+ return err;
+}
+
+static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
+{
+ dpaa2_eth_drain_pool(priv);
+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ fsl_mc_object_free(priv->dpbp_dev);
+}
+
+static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_buffer_layout buf_layout = {0};
+ u16 rx_buf_align;
+ int err;
+
+ /* We need to check for WRIOP version 1.0.0, but depending on the MC
+ * version, this number is not always provided correctly on rev1.
+ * We need to check for both alternatives in this situation.
+ */
+ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ else
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
+ /* We need to ensure that the buffer size seen by WRIOP is a multiple
+ * of 64 or 256 bytes depending on the WRIOP version.
+ */
+ priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
+
+ /* tx buffer */
+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+ buf_layout.pass_timestamp = true;
+ buf_layout.pass_frame_status = true;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
+ return err;
+ }
+
+ /* tx-confirm buffer */
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
+ return err;
+ }
+
+ /* Now that we've set our tx buffer layout, retrieve the minimum
+ * required tx data offset.
+ */
+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
+ &priv->tx_data_offset);
+ if (err) {
+ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
+ return err;
+ }
+
+ if ((priv->tx_data_offset % 64) != 0)
+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
+ priv->tx_data_offset);
+
+ /* rx buffer */
+ buf_layout.pass_frame_status = true;
+ buf_layout.pass_parser_result = true;
+ buf_layout.data_align = rx_buf_align;
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
+ buf_layout.private_data_size = 0;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
+#define DPNI_ENQUEUE_FQID_VER_MINOR 9
+
+static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames __always_unused,
+ int *frames_enqueued)
+{
+ int err;
+
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+ if (!err && frames_enqueued)
+ *frames_enqueued = 1;
+ return err;
+}
+
+static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio, u32 num_frames,
+ int *frames_enqueued)
+{
+ int err;
+
+ err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
+ fq->tx_fqid[prio],
+ fd, num_frames);
+
+ if (err == 0)
+ return -EBUSY;
+
+ if (frames_enqueued)
+ *frames_enqueued = err;
+ return 0;
+}
+
+static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
+{
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+ else
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
+}
+
+static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_link_cfg link_cfg = {0};
+ int err;
+
+ /* Get the default link options so we don't override other flags */
+ err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_get_link_cfg() failed\n");
+ return err;
+ }
+
+ /* By default, enable both Rx and Tx pause frames */
+ link_cfg.options |= DPNI_LINK_OPT_PAUSE;
+ link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_link_cfg() failed\n");
+ return err;
+ }
+
+ priv->link_state.options = link_cfg.options;
+
+ return 0;
+}
+
+static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_queue_id qid = {0};
+ struct dpaa2_eth_fq *fq;
+ struct dpni_queue queue;
+ int i, j, err;
+
+ /* We only use Tx FQIDs for FQID-based enqueue, so check
+ * if DPNI version supports it before updating FQIDs
+ */
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ return;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_TX_CONF_FQ)
+ continue;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, j, fq->flowid,
+ &queue, &qid);
+ if (err)
+ goto out_err;
+
+ fq->tx_fqid[j] = qid.fqid;
+ if (fq->tx_fqid[j] == 0)
+ goto out_err;
+ }
+ }
+
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
+
+ return;
+
+out_err:
+ netdev_info(priv->net_dev,
+ "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
+/* Configure ingress classification based on VLAN PCP */
+static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpkg_profile_cfg kg_cfg = {0};
+ struct dpni_qos_tbl_cfg qos_cfg = {0};
+ struct dpni_rule_cfg key_params;
+ void *dma_mem, *key, *mask;
+ u8 key_size = 2; /* VLAN TCI field */
+ int i, pcp, err;
+
+ /* VLAN-based classification only makes sense if we have multiple
+ * traffic classes.
+ * Also, we need to extract just the 3-bit PCP field from the VLAN
+ * header and we can only do that by using a mask
+ */
+ if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
+ dev_dbg(dev, "VLAN-based QoS classification not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ kg_cfg.num_extracts = 1;
+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
+ err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg failed\n");
+ goto out_free_tbl;
+ }
+
+ /* set QoS table */
+ qos_cfg.default_tc = 0;
+ qos_cfg.discard_on_miss = 0;
+ qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
+ DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
+ dev_err(dev, "QoS table DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_tbl;
+ }
+
+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_qos_table failed\n");
+ goto out_unmap_tbl;
+ }
+
+ /* Add QoS table entries */
+ key = kzalloc(key_size * 2, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out_unmap_tbl;
+ }
+ mask = key + key_size;
+ *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
+
+ key_params.key_iova = dma_map_single(dev, key, key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.key_iova)) {
+ dev_err(dev, "Qos table entry DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_params.mask_iova = key_params.key_iova + key_size;
+ key_params.key_size = key_size;
+
+ /* We add rules for PCP-based distribution starting with highest
+ * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
+ * classes to accommodate all priority levels, the lowest ones end up
+ * on TC 0 which was configured as default
+ */
+ for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
+ *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
+ dma_sync_single_for_device(dev, key_params.key_iova,
+ key_size * 2, DMA_TO_DEVICE);
+
+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+ &key_params, i, i);
+ if (err) {
+ dev_err(dev, "dpni_add_qos_entry failed\n");
+ dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
+ goto out_unmap_key;
+ }
+ }
+
+ priv->vlan_cls_enabled = true;
+
+ /* Table and key memory is not persistent, clean everything up after
+ * configuration is finished
+ */
+out_unmap_key:
+ dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
+out_free_key:
+ kfree(key);
+out_unmap_tbl:
+ dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+out_free_tbl:
+ kfree(dma_mem);
+
+ return err;
+}
+
+/* Configure the DPNI object this interface is associated with */
+static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+ struct net_device *net_dev;
+ int err;
+
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ /* get a handle for the DPNI object */
+ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
+ return err;
+ }
+
+ /* Check if we can work with this DPNI object */
+ err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
+ &priv->dpni_ver_minor);
+ if (err) {
+ dev_err(dev, "dpni_get_api_version() failed\n");
+ goto close;
+ }
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
+ dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
+ priv->dpni_ver_major, priv->dpni_ver_minor,
+ DPNI_VER_MAJOR, DPNI_VER_MINOR);
+ err = -ENOTSUPP;
+ goto close;
+ }
+
+ ls_dev->mc_io = priv->mc_io;
+ ls_dev->mc_handle = priv->mc_token;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_reset() failed\n");
+ goto close;
+ }
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+ goto close;
+ }
+
+ err = dpaa2_eth_set_buffer_layout(priv);
+ if (err)
+ goto close;
+
+ dpaa2_eth_set_enqueue_mode(priv);
+
+ /* Enable pause frame support */
+ if (dpaa2_eth_has_pause_support(priv)) {
+ err = dpaa2_eth_set_pause(priv);
+ if (err)
+ goto close;
+ }
+
+ err = dpaa2_eth_set_vlan_qos(priv);
+ if (err && err != -EOPNOTSUPP)
+ goto close;
+
+ priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
+ sizeof(struct dpaa2_eth_cls_rule),
+ GFP_KERNEL);
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
+ goto close;
+ }
+
+ return 0;
+
+close:
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+
+ return err;
+}
+
+static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
+{
+ int err;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err)
+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
+ err);
+
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+}
+
+static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue queue;
+ struct dpni_queue_id qid;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(RX) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 1;
+ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ &queue);
+ if (err) {
+ dev_err(dev, "dpni_set_queue(RX) failed\n");
+ return err;
+ }
+
+ /* xdp_rxq setup */
+ /* only once for each channel */
+ if (fq->tc > 0)
+ return 0;
+
+ err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
+ fq->flowid, 0);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg failed\n");
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue queue;
+ struct dpni_queue_id qid;
+ int i, err;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, i, fq->flowid,
+ &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX) failed\n");
+ return err;
+ }
+ fq->tx_fqid[i] = qid.fqid;
+ }
+
+ /* All Tx queues belonging to the same flowid have the same qdbin */
+ fq->tx_qdbin = qid.qdbin;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 0;
+ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ &queue);
+ if (err) {
+ dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue q = { { 0 } };
+ struct dpni_queue_id qid;
+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ q.destination.id = fq->channel->dpcon_id;
+ q.destination.type = DPNI_DEST_DPCON;
+ q.destination.priority = 1;
+ q.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
+ if (err) {
+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/* Supported header fields for Rx hash distribution key */
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
+ {
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
+ .size = 6,
+ }, {
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
+ .size = 6,
+ }, {
+ /* This is the last ethertype field parsed:
+ * depending on frame format, it can be the MAC ethertype
+ * or the VLAN etype.
+ */
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
+ .size = 2,
+ }, {
+ /* VLAN header */
+ .rxnfc_field = RXH_VLAN,
+ .cls_prot = NET_PROT_VLAN,
+ .cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
+ .size = 2,
+ }, {
+ /* IP header */
+ .rxnfc_field = RXH_IP_SRC,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_IP_DST,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_L3_PROTO,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
+ .size = 1,
+ }, {
+ /* Using UDP ports, this is functionally equivalent to raw
+ * byte pairs from L4 header.
+ */
+ .rxnfc_field = RXH_L4_B_0_1,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
+ .size = 2,
+ }, {
+ .rxnfc_field = RXH_L4_B_2_3,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
+ .size = 2,
+ },
+};
+
+/* Configure the Rx hash key using the legacy API */
+static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_tc_dist_cfg dist_cfg;
+ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
+ i, &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ break;
+ }
+ }
+
+ return err;
+}
+
+/* Configure the Rx hash key using the new API */
+static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ break;
+ }
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
+ }
+
+ return err;
+}
+
+/* Configure the Rx flow classification key */
+static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ break;
+ }
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
+ }
+
+ return err;
+}
+
+/* Size of the Rx flow classification key */
+int dpaa2_eth_cls_key_size(u64 fields)
+{
+ int i, size = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (!(fields & dist_fields[i].id))
+ continue;
+ size += dist_fields[i].size;
+ }
+
+ return size;
+}
+
+/* Offset of header field in Rx classification key */
+int dpaa2_eth_cls_fld_off(int prot, int field)
+{
+ int i, off = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (dist_fields[i].cls_prot == prot &&
+ dist_fields[i].cls_field == field)
+ return off;
+ off += dist_fields[i].size;
+ }
+
+ WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
+ return 0;
+}
+
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+ int off = 0, new_off = 0;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ size = dist_fields[i].size;
+ if (dist_fields[i].id & fields) {
+ memcpy(key_mem + new_off, key_mem + off, size);
+ new_off += size;
+ }
+ off += size;
+ }
+}
+
+/* Set Rx distribution (hash or flow classification) key
+ * flags is a combination of RXH_ bits
+ */
+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+ enum dpaa2_eth_rx_dist type, u64 flags)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpkg_profile_cfg cls_cfg;
+ u32 rx_hash_fields = 0;
+ dma_addr_t key_iova;
+ u8 *dma_mem;
+ int i;
+ int err = 0;
+
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ struct dpkg_extract *key =
+ &cls_cfg.extracts[cls_cfg.num_extracts];
+
+ /* For both Rx hashing and classification keys
+ * we set only the selected fields.
+ */
+ if (!(flags & dist_fields[i].id))
+ continue;
+ if (type == DPAA2_ETH_RX_DIST_HASH)
+ rx_hash_fields |= dist_fields[i].rxnfc_field;
+
+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ dev_err(dev, "error adding key extraction rule, too many rules?\n");
+ return -E2BIG;
+ }
+
+ key->type = DPKG_EXTRACT_FROM_HDR;
+ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
+ key->extract.from_hdr.field = dist_fields[i].cls_field;
+ cls_cfg.num_extracts++;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
+ goto free_key;
+ }
+
+ /* Prepare for setting the rx dist */
+ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
+ dev_err(dev, "DMA mapping failed\n");
+ err = -ENOMEM;
+ goto free_key;
+ }
+
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (dpaa2_eth_has_legacy_dist(priv))
+ err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
+ else
+ err = dpaa2_eth_config_hash_key(priv, key_iova);
+ } else {
+ err = dpaa2_eth_config_cls_key(priv, key_iova);
+ }
+
+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
+ priv->rx_hash_fields = rx_hash_fields;
+
+free_key:
+ kfree(dma_mem);
+ return err;
+}
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 key = 0;
+ int i;
+
+ if (!dpaa2_eth_hash_enabled(priv))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ if (dist_fields[i].rxnfc_field & flags)
+ key |= dist_fields[i].id;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ /* Check if we actually support Rx flow classification */
+ if (dpaa2_eth_has_legacy_dist(priv)) {
+ dev_dbg(dev, "Rx cls not supported by current MC version\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!dpaa2_eth_fs_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!dpaa2_eth_hash_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
+
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
+ return err;
+
+out:
+ priv->rx_cls_enabled = 1;
+
+ return 0;
+}
+
+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+ * frame queues and channels
+ */
+static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_pools_cfg pools_params;
+ struct dpni_error_cfg err_cfg;
+ int err = 0;
+ int i;
+
+ pools_params.num_dpbp = 1;
+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].backup_pool = 0;
+ pools_params.pools[0].buffer_size = priv->rx_buf_size;
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ dev_err(dev, "dpni_set_pools() failed\n");
+ return err;
+ }
+
+ /* have the interface implicitly distribute traffic based on
+ * the default hash key
+ */
+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
+ if (err && err != -EOPNOTSUPP)
+ dev_err(dev, "Failed to configure hashing\n");
+
+ /* Configure the flow classification key; it includes all
+ * supported header fields and cannot be modified at runtime
+ */
+ err = dpaa2_eth_set_default_cls(priv);
+ if (err && err != -EOPNOTSUPP)
+ dev_err(dev, "Failed to configure Rx classification key\n");
+
+ /* Configure handling of error frames */
+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
+ &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ /* Configure Rx and Tx conf queues to generate CDANs */
+ for (i = 0; i < priv->num_fqs; i++) {
+ switch (priv->fq[i].type) {
+ case DPAA2_RX_FQ:
+ err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_TX_CONF_FQ:
+ err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_RX_ERR_FQ:
+ err = setup_rx_err_flow(priv, &priv->fq[i]);
+ break;
+ default:
+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
+ return -EINVAL;
+ }
+ if (err)
+ return err;
+ }
+
+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &priv->tx_qdid);
+ if (err) {
+ dev_err(dev, "dpni_get_qdid() failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Allocate rings for storing incoming frame descriptors */
+static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ priv->channel[i]->store =
+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
+ if (!priv->channel[i]->store) {
+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
+ goto err_ring;
+ }
+ }
+
+ return 0;
+
+err_ring:
+ for (i = 0; i < priv->num_channels; i++) {
+ if (!priv->channel[i]->store)
+ break;
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+ }
+
+ return -ENOMEM;
+}
+
+static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++)
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
+static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
+ int err;
+
+ /* Get firmware address, if any */
+ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Get DPNI attributes address, if any */
+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ dpni_mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ /* If the DPMAC addr != DPNI addr, update it */
+ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
+ priv->mc_token,
+ mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
+ }
+ eth_hw_addr_set(net_dev, mac_addr);
+ } else if (is_zero_ether_addr(dpni_mac_addr)) {
+ /* No MAC address configured, fill in net_dev->dev_addr
+ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
+ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ } else {
+ /* NET_ADDR_PERM is default, all we have to do is
+ * fill in the device addr.
+ */
+ eth_hw_addr_set(net_dev, dpni_mac_addr);
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_netdev_init(struct net_device *net_dev)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u32 options = priv->dpni_attrs.options;
+ u64 supported = 0, not_supported = 0;
+ u8 bcast_addr[ETH_ALEN];
+ u8 num_queues;
+ int err;
+
+ net_dev->netdev_ops = &dpaa2_eth_ops;
+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+
+ err = dpaa2_eth_set_mac_addr(priv);
+ if (err)
+ return err;
+
+ /* Explicitly add the broadcast address to the MAC filtering table */
+ eth_broadcast_addr(bcast_addr);
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
+ if (err) {
+ dev_err(dev, "dpni_add_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Set MTU upper limit; lower limit is 68B (default value) */
+ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+ DPAA2_ETH_MFL);
+ if (err) {
+ dev_err(dev, "dpni_set_max_frame_length() failed\n");
+ return err;
+ }
+
+ /* Set actual number of queues in the net device */
+ num_queues = dpaa2_eth_queue_count(priv);
+ err = netif_set_real_num_tx_queues(net_dev, num_queues);
+ if (err) {
+ dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
+ return err;
+ }
+ err = netif_set_real_num_rx_queues(net_dev, num_queues);
+ if (err) {
+ dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
+ return err;
+ }
+
+ dpaa2_eth_detect_features(priv);
+
+ /* Capabilities listing */
+ supported |= IFF_LIVE_ADDR_CHANGE;
+
+ if (options & DPNI_OPT_NO_MAC_FILTER)
+ not_supported |= IFF_UNICAST_FLT;
+ else
+ supported |= IFF_UNICAST_FLT;
+
+ net_dev->priv_flags |= supported;
+ net_dev->priv_flags &= ~not_supported;
+
+ /* Features */
+ net_dev->features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
+ net_dev->hw_features = net_dev->features;
+
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int dpaa2_eth_poll_link_state(void *arg)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+ int err;
+
+ while (!kthread_should_stop()) {
+ err = dpaa2_eth_link_state_update(priv);
+ if (unlikely(err))
+ return err;
+
+ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
+
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = priv->mc_io;
+ mac->net_dev = priv->net_dev;
+
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+ priv->mac = mac;
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err && err != -EPROBE_DEFER)
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
+ ERR_PTR(err));
+ if (err)
+ goto err_close_mac;
+ }
+
+ return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
+}
+
+static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
+{
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
+ return;
+
+ dpaa2_mac_close(priv->mac);
+ kfree(priv->mac);
+ priv->mac = NULL;
+}
+
+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
+{
+ u32 status = ~0;
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
+ struct net_device *net_dev = dev_get_drvdata(dev);
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+ DPNI_IRQ_INDEX, &status);
+ if (unlikely(err)) {
+ netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
+ return IRQ_HANDLED;
+ }
+
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
+ dpaa2_eth_link_state_update(netdev_priv(net_dev));
+
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
+ dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
+ dpaa2_eth_update_tx_fqids(priv);
+
+ rtnl_lock();
+ if (dpaa2_eth_has_mac(priv))
+ dpaa2_eth_disconnect_mac(priv);
+ else
+ dpaa2_eth_connect_mac(priv);
+ rtnl_unlock();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
+{
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+
+ err = fsl_mc_allocate_irqs(ls_dev);
+ if (err) {
+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ irq = ls_dev->irqs[0];
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
+ NULL, dpni_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&ls_dev->dev), &ls_dev->dev);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_mc_irq;
+ }
+
+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+ DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
+free_mc_irq:
+ fsl_mc_free_irqs(ls_dev);
+
+ return err;
+}
+
+static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
+ }
+}
+
+static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ netif_napi_del(&ch->napi);
+ }
+}
+
+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+{
+ struct device *dev;
+ struct net_device *net_dev = NULL;
+ struct dpaa2_eth_priv *priv = NULL;
+ int err = 0;
+
+ dev = &dpni_dev->dev;
+
+ /* Net device */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
+ priv->rx_tstamp = false;
+
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ if (!priv->dpaa2_ptp_wq) {
+ err = -ENOMEM;
+ goto err_wq_alloc;
+ }
+
+ INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
+ mutex_init(&priv->onestep_tstamp_lock);
+ skb_queue_head_init(&priv->tx_skbs);
+
+ priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_portal_alloc;
+ }
+
+ /* MC objects initialization and configuration */
+ err = dpaa2_eth_setup_dpni(dpni_dev);
+ if (err)
+ goto err_dpni_setup;
+
+ err = dpaa2_eth_setup_dpio(priv);
+ if (err)
+ goto err_dpio_setup;
+
+ dpaa2_eth_setup_fqs(priv);
+
+ err = dpaa2_eth_setup_dpbp(priv);
+ if (err)
+ goto err_dpbp_setup;
+
+ err = dpaa2_eth_bind_dpni(priv);
+ if (err)
+ goto err_bind;
+
+ /* Add a NAPI context for each channel */
+ dpaa2_eth_add_ch_napi(priv);
+
+ /* Percpu statistics */
+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
+ if (!priv->percpu_stats) {
+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_percpu_stats;
+ }
+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
+ if (!priv->percpu_extras) {
+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_percpu_extras;
+ }
+
+ priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
+ if (!priv->sgt_cache) {
+ dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_sgt_cache;
+ }
+
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
+ err = dpaa2_eth_netdev_init(net_dev);
+ if (err)
+ goto err_netdev_init;
+
+ /* Configure checksum offload based on current interface flags */
+ err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ if (err)
+ goto err_csum;
+
+ err = dpaa2_eth_set_tx_csum(priv,
+ !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ if (err)
+ goto err_csum;
+
+ err = dpaa2_eth_alloc_rings(priv);
+ if (err)
+ goto err_alloc_rings;
+
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
+ } else {
+ dev_dbg(dev, "PFC not supported\n");
+ }
+#endif
+
+ err = dpaa2_eth_setup_irqs(dpni_dev);
+ if (err) {
+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
+ priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
+ "%s_poll_link", net_dev->name);
+ if (IS_ERR(priv->poll_thread)) {
+ dev_err(dev, "Error starting polling thread\n");
+ goto err_poll_thread;
+ }
+ priv->do_link_poll = true;
+ }
+
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
+ err = dpaa2_eth_dl_alloc(priv);
+ if (err)
+ goto err_dl_register;
+
+ err = dpaa2_eth_dl_traps_register(priv);
+ if (err)
+ goto err_dl_trap_register;
+
+ err = dpaa2_eth_dl_port_add(priv);
+ if (err)
+ goto err_dl_port_add;
+
+ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+ goto err_netdev_reg;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_add(priv);
+#endif
+
+ dpaa2_eth_dl_register(priv);
+ dev_info(dev, "Probed interface %s\n", net_dev->name);
+ return 0;
+
+err_netdev_reg:
+ dpaa2_eth_dl_port_del(priv);
+err_dl_port_add:
+ dpaa2_eth_dl_traps_unregister(priv);
+err_dl_trap_register:
+ dpaa2_eth_dl_free(priv);
+err_dl_register:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
+ fsl_mc_free_irqs(dpni_dev);
+err_poll_thread:
+ dpaa2_eth_free_rings(priv);
+err_alloc_rings:
+err_csum:
+err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
+ free_percpu(priv->sgt_cache);
+err_alloc_sgt_cache:
+ free_percpu(priv->percpu_extras);
+err_alloc_percpu_extras:
+ free_percpu(priv->percpu_stats);
+err_alloc_percpu_stats:
+ dpaa2_eth_del_ch_napi(priv);
+err_bind:
+ dpaa2_eth_free_dpbp(priv);
+err_dpbp_setup:
+ dpaa2_eth_free_dpio(priv);
+err_dpio_setup:
+ dpaa2_eth_free_dpni(priv);
+err_dpni_setup:
+ fsl_mc_portal_free(priv->mc_io);
+err_portal_alloc:
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+err_wq_alloc:
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpaa2_eth_priv *priv;
+
+ dev = &ls_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ dpaa2_eth_dl_unregister(priv);
+
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_remove(priv);
+#endif
+
+ unregister_netdev(net_dev);
+ rtnl_lock();
+ dpaa2_eth_disconnect_mac(priv);
+ rtnl_unlock();
+
+ dpaa2_eth_dl_port_del(priv);
+ dpaa2_eth_dl_traps_unregister(priv);
+ dpaa2_eth_dl_free(priv);
+
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
+ fsl_mc_free_irqs(ls_dev);
+
+ dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
+ free_percpu(priv->sgt_cache);
+ free_percpu(priv->percpu_stats);
+ free_percpu(priv->percpu_extras);
+
+ dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpio(priv);
+ dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
+
+ fsl_mc_portal_free(priv->mc_io);
+
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+
+ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+ free_netdev(net_dev);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpni",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
+static struct fsl_mc_driver dpaa2_eth_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_eth_probe,
+ .remove = dpaa2_eth_remove,
+ .match_id_table = dpaa2_eth_match_id_table
+};
+
+static int __init dpaa2_eth_driver_init(void)
+{
+ int err;
+
+ dpaa2_eth_dbg_init();
+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
+ if (err) {
+ dpaa2_eth_dbg_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
+ dpaa2_eth_dbg_exit();
+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
new file mode 100644
index 000000000..e703846ad
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2020 NXP
+ */
+
+#ifndef __DPAA2_ETH_H
+#define __DPAA2_ETH_H
+
+#include <linux/dcbnl.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/fsl/mc.h>
+#include <linux/net_tstamp.h>
+#include <net/devlink.h>
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+#include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
+#include "dpaa2-mac.h"
+
+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
+#define DPAA2_ETH_STORE_SIZE 16
+
+/* Maximum number of scatter-gather entries in an ingress frame,
+ * considering the maximum receive frame size is 64K
+ */
+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
+/* Maximum acceptable MTU value. It is in direct relation with the hardware
+ * enforced Max Frame Length (currently 10k).
+ */
+#define DPAA2_ETH_MFL (10 * 1024)
+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
+/* Convert L3 MTU to L2 MFL */
+#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
+
+/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
+ * enough number of jumbo frames in the Rx queues (length of the current
+ * frame is not taken into account when making the taildrop decision)
+ */
+#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
+
+/* Maximum burst size value for Tx shaping */
+#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
+
+/* Maximum number of Tx confirmation frames to be processed
+ * in a single NAPI call
+ */
+#define DPAA2_ETH_TXCONF_PER_NAPI 256
+
+/* Buffer qouta per channel. We want to keep in check number of ingress frames
+ * in flight: for small sized frames, congestion group taildrop may kick in
+ * first; for large sizes, Rx FQ taildrop threshold will ensure only a
+ * reasonable number of frames will be pending at any given time.
+ * Ingress frame drop due to buffer pool depletion should be a corner case only
+ */
+#define DPAA2_ETH_NUM_BUFS 1280
+#define DPAA2_ETH_REFILL_THRESH \
+ (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
+
+/* Congestion group taildrop threshold: number of frames allowed to accumulate
+ * at any moment in a group of Rx queues belonging to the same traffic class.
+ * Choose value such that we don't risk depleting the buffer pool before the
+ * taildrop kicks in
+ */
+#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
+ (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
+
+/* Congestion group notification threshold: when this many frames accumulate
+ * on the Rx queues belonging to the same TC, the MAC is instructed to send
+ * PFC frames for that TC.
+ * When number of pending frames drops below exit threshold transmission of
+ * PFC frames is stopped.
+ */
+#define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
+ (DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2)
+#define DPAA2_ETH_CN_THRESH_EXIT(priv) \
+ (DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
+
+/* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+#define DPAA2_ETH_BUFS_PER_CMD 7
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_ETH_TX_BUF_ALIGN 64
+
+#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_ETH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_ETH_RX_BUF_SIZE \
+ (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
+
+/* Hardware annotation area in RX/TX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE 64
+#define DPAA2_ETH_TX_HWA_SIZE 128
+
+/* PTP nominal frequency 1GHz */
+#define DPAA2_PTP_CLK_PERIOD_NS 1
+
+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
+ * to 256B. For newer revisions, the requirement is only for 64B alignment
+ */
+#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
+#define DPAA2_ETH_RX_BUF_ALIGN 64
+
+/* We are accommodating a skb backpointer and some S/G info
+ * in the frame's software annotation. The hardware
+ * options are either 0 or 64, so we choose the latter.
+ */
+#define DPAA2_ETH_SWA_SIZE 64
+
+/* We store different information in the software annotation area of a Tx frame
+ * based on what type of frame it is
+ */
+enum dpaa2_eth_swa_type {
+ DPAA2_ETH_SWA_SINGLE,
+ DPAA2_ETH_SWA_SG,
+ DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_SW_TSO,
+};
+
+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
+struct dpaa2_eth_swa {
+ enum dpaa2_eth_swa_type type;
+ union {
+ struct {
+ struct sk_buff *skb;
+ int sgt_size;
+ } single;
+ struct {
+ struct sk_buff *skb;
+ struct scatterlist *scl;
+ int num_sg;
+ int sgt_size;
+ } sg;
+ struct {
+ int dma_size;
+ struct xdp_frame *xdpf;
+ } xdp;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
+ };
+};
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV 0x8000
+#define DPAA2_FD_FRC_FAEADV 0x4000
+#define DPAA2_FD_FRC_FAPRV 0x2000
+#define DPAA2_FD_FRC_FAIADV 0x1000
+#define DPAA2_FD_FRC_FASWOV 0x0800
+#define DPAA2_FD_FRC_FAICFDV 0x0400
+
+/* Error bits in FD CTRL */
+#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
+#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
+ FD_CTRL_SBE | \
+ FD_CTRL_FSE | \
+ FD_CTRL_FAERR)
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
+
+/* Frame annotation status */
+struct dpaa2_fas {
+ u8 reserved;
+ u8 ppid;
+ __le16 ifpid;
+ __le32 status;
+};
+
+/* Frame annotation status word is located in the first 8 bytes
+ * of the buffer's hardware annoatation area
+ */
+#define DPAA2_FAS_OFFSET 0
+#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+
+/* Timestamp is located in the next 8 bytes of the buffer's
+ * hardware annotation area
+ */
+#define DPAA2_TS_OFFSET 0x8
+
+/* Frame annotation parse results */
+struct dpaa2_fapr {
+ /* 64-bit word 1 */
+ __le32 faf_lo;
+ __le16 faf_ext;
+ __le16 nxt_hdr;
+ /* 64-bit word 2 */
+ __le64 faf_hi;
+ /* 64-bit word 3 */
+ u8 last_ethertype_offset;
+ u8 vlan_tci_offset_n;
+ u8 vlan_tci_offset_1;
+ u8 llc_snap_offset;
+ u8 eth_offset;
+ u8 ip1_pid_offset;
+ u8 shim_offset_2;
+ u8 shim_offset_1;
+ /* 64-bit word 4 */
+ u8 l5_offset;
+ u8 l4_offset;
+ u8 gre_offset;
+ u8 l3_offset_n;
+ u8 l3_offset_1;
+ u8 mpls_offset_n;
+ u8 mpls_offset_1;
+ u8 pppoe_offset;
+ /* 64-bit word 5 */
+ __le16 running_sum;
+ __le16 gross_running_sum;
+ u8 ipv6_frag_offset;
+ u8 nxt_hdr_offset;
+ u8 routing_hdr_offset_2;
+ u8 routing_hdr_offset_1;
+ /* 64-bit word 6 */
+ u8 reserved[5]; /* Soft-parsing context */
+ u8 ip_proto_offset_n;
+ u8 nxt_hdr_frag_offset;
+ u8 parse_error_code;
+};
+
+#define DPAA2_FAPR_OFFSET 0x10
+#define DPAA2_FAPR_SIZE sizeof((struct dpaa2_fapr))
+
+/* Frame annotation egress action descriptor */
+#define DPAA2_FAEAD_OFFSET 0x58
+
+struct dpaa2_faead {
+ __le32 conf_fqid;
+ __le32 ctrl;
+};
+
+#define DPAA2_FAEAD_A2V 0x20000000
+#define DPAA2_FAEAD_A4V 0x08000000
+#define DPAA2_FAEAD_UPDV 0x00001000
+#define DPAA2_FAEAD_EBDDV 0x00002000
+#define DPAA2_FAEAD_UPD 0x00000010
+
+struct ptp_tstamp {
+ u16 sec_msb;
+ u32 sec_lsb;
+ u32 nsec;
+};
+
+static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns)
+{
+ u64 sec, nsec;
+
+ sec = ns;
+ nsec = do_div(sec, 1000000000);
+
+ tstamp->sec_lsb = sec & 0xFFFFFFFF;
+ tstamp->sec_msb = (sec >> 32) & 0xFFFF;
+ tstamp->nsec = nsec;
+}
+
+/* Accessors for the hardware annotation fields that we use */
+static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
+{
+ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
+}
+
+static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
+}
+
+static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
+static inline struct dpaa2_fapr *dpaa2_get_fapr(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAPR_OFFSET;
+}
+
+static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
+}
+
+/* Error and status bits in the frame annotation status word */
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_FAS_DISC 0x80000000
+/* MACSEC frame */
+#define DPAA2_FAS_MS 0x40000000
+#define DPAA2_FAS_PTP 0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_FAS_MC 0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_FAS_BC 0x02000000
+#define DPAA2_FAS_KSE 0x00040000
+#define DPAA2_FAS_EOFHE 0x00020000
+#define DPAA2_FAS_MNLE 0x00010000
+#define DPAA2_FAS_TIDE 0x00008000
+#define DPAA2_FAS_PIEE 0x00004000
+/* Frame length error */
+#define DPAA2_FAS_FLE 0x00002000
+/* Frame physical error */
+#define DPAA2_FAS_FPE 0x00001000
+#define DPAA2_FAS_PTE 0x00000080
+#define DPAA2_FAS_ISP 0x00000040
+#define DPAA2_FAS_PHE 0x00000020
+#define DPAA2_FAS_BLE 0x00000010
+/* L3 csum validation performed */
+#define DPAA2_FAS_L3CV 0x00000008
+/* L3 csum error */
+#define DPAA2_FAS_L3CE 0x00000004
+/* L4 csum validation performed */
+#define DPAA2_FAS_L4CV 0x00000002
+/* L4 csum error */
+#define DPAA2_FAS_L4CE 0x00000001
+/* Possible errors on the ingress path */
+#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \
+ DPAA2_FAS_EOFHE | \
+ DPAA2_FAS_MNLE | \
+ DPAA2_FAS_TIDE | \
+ DPAA2_FAS_PIEE | \
+ DPAA2_FAS_FLE | \
+ DPAA2_FAS_FPE | \
+ DPAA2_FAS_PTE | \
+ DPAA2_FAS_ISP | \
+ DPAA2_FAS_PHE | \
+ DPAA2_FAS_BLE | \
+ DPAA2_FAS_L3CE | \
+ DPAA2_FAS_L4CE)
+
+/* Time in milliseconds between link state updates */
+#define DPAA2_ETH_LINK_STATE_REFRESH 1000
+
+/* Number of times to retry a frame enqueue before giving up.
+ * Value determined empirically, in order to minimize the number
+ * of frames dropped on Tx
+ */
+#define DPAA2_ETH_ENQUEUE_RETRIES 10
+
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_ETH_SWP_BUSY_RETRIES 1000
+
+/* Driver statistics, other than those in struct rtnl_link_stats64.
+ * These are usually collected per-CPU and aggregated by ethtool.
+ */
+struct dpaa2_eth_drv_stats {
+ __u64 tx_conf_frames;
+ __u64 tx_conf_bytes;
+ __u64 tx_sg_frames;
+ __u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
+ __u64 rx_sg_frames;
+ __u64 rx_sg_bytes;
+ /* Linear skbs sent as a S/G FD due to insufficient headroom */
+ __u64 tx_converted_sg_frames;
+ __u64 tx_converted_sg_bytes;
+ /* Enqueues retried due to portal busy */
+ __u64 tx_portal_busy;
+};
+
+/* Per-FQ statistics */
+struct dpaa2_eth_fq_stats {
+ /* Number of frames received on this queue */
+ __u64 frames;
+};
+
+/* Per-channel statistics */
+struct dpaa2_eth_ch_stats {
+ /* Volatile dequeues retried due to portal busy */
+ __u64 dequeue_portal_busy;
+ /* Pull errors */
+ __u64 pull_err;
+ /* Number of CDANs; useful to estimate avg NAPI len */
+ __u64 cdan;
+ /* XDP counters */
+ __u64 xdp_drop;
+ __u64 xdp_tx;
+ __u64 xdp_tx_err;
+ __u64 xdp_redirect;
+ /* Must be last, does not show up in ethtool stats */
+ __u64 frames;
+ __u64 frames_per_cdan;
+ __u64 bytes_per_cdan;
+};
+
+#define DPAA2_ETH_CH_STATS 7
+
+/* Maximum number of queues associated with a DPNI */
+#define DPAA2_ETH_MAX_TCS 8
+#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
+#define DPAA2_ETH_MAX_RX_QUEUES \
+ (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
+#define DPAA2_ETH_MAX_TX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
+ DPAA2_ETH_MAX_TX_QUEUES + \
+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
+#define DPAA2_ETH_MAX_NETDEV_QUEUES \
+ (DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
+
+#define DPAA2_ETH_MAX_DPCONS 16
+
+enum dpaa2_eth_fq_type {
+ DPAA2_RX_FQ = 0,
+ DPAA2_TX_CONF_FQ,
+ DPAA2_RX_ERR_FQ
+};
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_eth_xdp_fds {
+ struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
+ ssize_t num;
+};
+
+struct dpaa2_eth_fq {
+ u32 fqid;
+ u32 tx_qdbin;
+ u32 tx_fqid[DPAA2_ETH_MAX_TCS];
+ u16 flowid;
+ u8 tc;
+ int target_cpu;
+ u32 dq_frames;
+ u32 dq_bytes;
+ struct dpaa2_eth_channel *channel;
+ enum dpaa2_eth_fq_type type;
+
+ void (*consume)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+ struct dpaa2_eth_fq_stats stats;
+
+ struct dpaa2_eth_xdp_fds xdp_redirect_fds;
+ struct dpaa2_eth_xdp_fds xdp_tx_fds;
+};
+
+struct dpaa2_eth_ch_xdp {
+ struct bpf_prog *prog;
+ unsigned int res;
+};
+
+struct dpaa2_eth_channel {
+ struct dpaa2_io_notification_ctx nctx;
+ struct fsl_mc_device *dpcon;
+ int dpcon_id;
+ int ch_id;
+ struct napi_struct napi;
+ struct dpaa2_io *dpio;
+ struct dpaa2_io_store *store;
+ struct dpaa2_eth_priv *priv;
+ int buf_count;
+ struct dpaa2_eth_ch_stats stats;
+ struct dpaa2_eth_ch_xdp xdp;
+ struct xdp_rxq_info xdp_rxq;
+ struct list_head *rx_list;
+
+ /* Buffers to be recycled back in the buffer pool */
+ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int recycled_bufs_cnt;
+};
+
+struct dpaa2_eth_dist_fields {
+ u64 rxnfc_field;
+ enum net_prot cls_prot;
+ int cls_field;
+ int size;
+ u64 id;
+};
+
+struct dpaa2_eth_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ u8 in_use;
+};
+
+#define DPAA2_ETH_SGT_CACHE_SIZE 256
+struct dpaa2_eth_sgt_cache {
+ void *buf[DPAA2_ETH_SGT_CACHE_SIZE];
+ u16 count;
+};
+
+struct dpaa2_eth_trap_item {
+ void *trap_ctx;
+};
+
+struct dpaa2_eth_trap_data {
+ struct dpaa2_eth_trap_item *trap_items_arr;
+ struct dpaa2_eth_priv *priv;
+};
+
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
+#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
+/* Driver private data */
+struct dpaa2_eth_priv {
+ struct net_device *net_dev;
+
+ u8 num_fqs;
+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+ int (*enqueue)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames,
+ int *frames_enqueued);
+
+ u8 num_channels;
+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+ struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
+ unsigned long features;
+ struct dpni_attr dpni_attrs;
+ u16 dpni_ver_major;
+ u16 dpni_ver_minor;
+ u16 tx_data_offset;
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
+ struct fsl_mc_device *dpbp_dev;
+ u16 rx_buf_size;
+ u16 bpid;
+ struct iommu_domain *iommu_domain;
+
+ enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
+ bool rx_tstamp; /* Rx timestamping enabled */
+
+ u16 tx_qdid;
+ struct fsl_mc_io *mc_io;
+ /* Cores which have an affine DPIO/DPCON.
+ * This is the cpu set on which Rx and Tx conf frames are processed
+ */
+ struct cpumask dpio_cpumask;
+
+ /* Standard statistics */
+ struct rtnl_link_stats64 __percpu *percpu_stats;
+ /* Extra stats, in addition to the ones known by the kernel */
+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
+
+ u16 mc_token;
+ u8 rx_fqtd_enabled;
+ u8 rx_cgtd_enabled;
+
+ struct dpni_link_state link_state;
+ bool do_link_poll;
+ struct task_struct *poll_thread;
+
+ /* enabled ethtool hashing bits */
+ u64 rx_hash_fields;
+ u64 rx_cls_fields;
+ struct dpaa2_eth_cls_rule *cls_rules;
+ u8 rx_cls_enabled;
+ u8 vlan_cls_enabled;
+ u8 pfc_enabled;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ u8 dcbx_mode;
+ struct ieee_pfc pfc;
+#endif
+ struct bpf_prog *xdp_prog;
+#ifdef CONFIG_DEBUG_FS
+ struct dpaa2_debugfs dbg;
+#endif
+
+ struct dpaa2_mac *mac;
+ struct workqueue_struct *dpaa2_ptp_wq;
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
+ /* The one-step timestamping configuration on hardware
+ * registers could only be done when no one-step
+ * timestamping frames are in flight. So we use a mutex
+ * lock here to make sure the lock is released by last
+ * one-step timestamping packet through TX confirmation
+ * queue before transmit current packet.
+ */
+ struct mutex onestep_tstamp_lock;
+ struct devlink *devlink;
+ struct dpaa2_eth_trap_data *trap_data;
+ struct devlink_port devlink_port;
+
+ u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
+};
+
+struct dpaa2_eth_devlink_priv {
+ struct dpaa2_eth_priv *dpaa2_priv;
+};
+
+#define TX_TSTAMP 0x1
+#define TX_TSTAMP_ONESTEP_SYNC 0x2
+
+#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
+ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
+ | RXH_L4_B_2_3)
+
+/* default Rx hash options, set during probing */
+#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+#define dpaa2_eth_hash_enabled(priv) \
+ ((priv)->dpni_attrs.num_queues > 1)
+
+/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+extern const struct ethtool_ops dpaa2_ethtool_ops;
+extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
+
+static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
+ u16 ver_major, u16 ver_minor)
+{
+ if (priv->dpni_ver_major == ver_major)
+ return priv->dpni_ver_minor - ver_minor;
+ return priv->dpni_ver_major - ver_major;
+}
+
+/* Minimum firmware version that supports a more flexible API
+ * for configuring the Rx flow hash key
+ */
+#define DPNI_RX_DIST_KEY_VER_MAJOR 7
+#define DPNI_RX_DIST_KEY_VER_MINOR 5
+
+#define dpaa2_eth_has_legacy_dist(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
+ DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
+#define dpaa2_eth_fs_enabled(priv) \
+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv) \
+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
+#define dpaa2_eth_fs_count(priv) \
+ ((priv)->dpni_attrs.fs_entries)
+
+#define dpaa2_eth_tc_count(priv) \
+ ((priv)->dpni_attrs.num_tcs)
+
+/* We have exactly one {Rx, Tx conf} queue per channel */
+#define dpaa2_eth_queue_count(priv) \
+ ((priv)->num_channels)
+
+enum dpaa2_eth_rx_dist {
+ DPAA2_ETH_RX_DIST_HASH,
+ DPAA2_ETH_RX_DIST_CLS
+};
+
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
+#define DPAA2_ETH_DIST_VLAN BIT(3)
+#define DPAA2_ETH_DIST_IPSRC BIT(4)
+#define DPAA2_ETH_DIST_IPDST BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
+#define DPAA2_ETH_DIST_L4SRC BIT(7)
+#define DPAA2_ETH_DIST_L4DST BIT(8)
+#define DPAA2_ETH_DIST_ALL (~0ULL)
+
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
+#define DPNI_PAUSE_VER_MAJOR 7
+#define DPNI_PAUSE_VER_MINOR 13
+#define dpaa2_eth_has_pause_support(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
+ DPNI_PAUSE_VER_MINOR) >= 0)
+
+static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE) ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
+static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE);
+}
+
+static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
+{
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
+ * the software annotation area
+ */
+ if (!skb)
+ return headroom;
+
+ /* For non-linear skbs we have no headroom requirement, as we build a
+ * SG frame with a newly allocated SGT buffer
+ */
+ if (skb_is_nonlinear(skb))
+ return 0;
+
+ /* If we have Tx timestamping, need 128B hardware annotation */
+ if (skb->cb[0])
+ headroom += DPAA2_ETH_TX_HWA_SIZE;
+
+ return headroom;
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios
+ */
+static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+{
+ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
+}
+
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac &&
+ (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
+int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
+
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc);
+
+extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
+
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr);
+#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
new file mode 100644
index 000000000..598888264
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+
+#include <linux/net_tstamp.h>
+#include <linux/nospec.h>
+
+#include "dpni.h" /* DPNI_LINK_OPT_* */
+#include "dpaa2-eth.h"
+
+/* To be kept in sync with DPNI statistics */
+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+ "[hw] rx frames",
+ "[hw] rx bytes",
+ "[hw] rx mcast frames",
+ "[hw] rx mcast bytes",
+ "[hw] rx bcast frames",
+ "[hw] rx bcast bytes",
+ "[hw] tx frames",
+ "[hw] tx bytes",
+ "[hw] tx mcast frames",
+ "[hw] tx mcast bytes",
+ "[hw] tx bcast frames",
+ "[hw] tx bcast bytes",
+ "[hw] rx filtered frames",
+ "[hw] rx discarded frames",
+ "[hw] rx nobuffer discards",
+ "[hw] tx discarded frames",
+ "[hw] tx confirmed frames",
+ "[hw] tx dequeued bytes",
+ "[hw] tx dequeued frames",
+ "[hw] tx rejected bytes",
+ "[hw] tx rejected frames",
+ "[hw] tx pending frames",
+};
+
+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+ /* per-cpu stats */
+ "[drv] tx conf frames",
+ "[drv] tx conf bytes",
+ "[drv] tx sg frames",
+ "[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
+ "[drv] rx sg frames",
+ "[drv] rx sg bytes",
+ "[drv] tx converted sg frames",
+ "[drv] tx converted sg bytes",
+ "[drv] enqueue portal busy",
+ /* Channel stats */
+ "[drv] dequeue portal busy",
+ "[drv] channel pull errors",
+ "[drv] cdan",
+ "[drv] xdp drop",
+ "[drv] xdp tx",
+ "[drv] xdp tx errors",
+ "[drv] xdp redirect",
+ /* FQ stats */
+ "[qbman] rx pending frames",
+ "[qbman] rx pending bytes",
+ "[qbman] tx conf pending frames",
+ "[qbman] tx conf pending bytes",
+ "[qbman] buffer count",
+};
+
+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
+
+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
+
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int dpaa2_eth_nway_reset(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+
+ link_settings->base.autoneg = AUTONEG_DISABLE;
+ if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.duplex = DUPLEX_FULL;
+ link_settings->base.speed = priv->link_state.rate;
+
+ return 0;
+}
+
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!dpaa2_eth_is_type_phy(priv))
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+}
+
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 link_options = priv->link_state.options;
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ return;
+ }
+
+ pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
+ pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
+ pause->autoneg = AUTONEG_DISABLE;
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg cfg = {0};
+ int err;
+
+ if (!dpaa2_eth_has_pause_support(priv)) {
+ netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
+ DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
+ return -EOPNOTSUPP;
+ }
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ cfg.rate = priv->link_state.rate;
+ cfg.options = priv->link_state.options;
+ if (pause->rx_pause)
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ if (!!pause->rx_pause ^ !!pause->tx_pause)
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+ if (cfg.options == priv->link_state.options)
+ return 0;
+
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_state failed\n");
+ return err;
+ }
+
+ priv->link_state.options = cfg.options;
+
+ return 0;
+}
+
+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
+ strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
+ strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (dpaa2_eth_has_mac(priv))
+ dpaa2_mac_get_strings(p);
+ break;
+ }
+}
+
+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+{
+ int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (sset) {
+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
+ if (dpaa2_eth_has_mac(priv))
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/** Fill in hardware counters, as returned by MC.
+ */
+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ union dpni_statistics dpni_stats;
+ int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
+ sizeof(dpni_stats.page_0),
+ sizeof(dpni_stats.page_1),
+ sizeof(dpni_stats.page_2),
+ sizeof(dpni_stats.page_3),
+ sizeof(dpni_stats.page_4),
+ sizeof(dpni_stats.page_5),
+ sizeof(dpni_stats.page_6),
+ };
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ struct dpaa2_eth_ch_stats *ch_stats;
+ struct dpaa2_eth_drv_stats *extras;
+ int j, k, err, num_cnt, i = 0;
+ u32 fcnt, bcnt;
+ u32 buf_cnt;
+
+ memset(data, 0,
+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
+ /* Print standard counters, from DPNI statistics */
+ for (j = 0; j <= 6; j++) {
+ /* We're not interested in pages 4 & 5 for now */
+ if (j == 4 || j == 5)
+ continue;
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
+ j, &dpni_stats);
+ if (err == -EINVAL)
+ /* Older firmware versions don't support all pages */
+ memset(&dpni_stats, 0, sizeof(dpni_stats));
+ else if (err)
+ netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
+
+ num_cnt = dpni_stats_page_size[j] / sizeof(u64);
+ for (k = 0; k < num_cnt; k++)
+ *(data + i++) = dpni_stats.raw.counter[k];
+ }
+
+ /* Print per-cpu extra stats */
+ for_each_online_cpu(k) {
+ extras = per_cpu_ptr(priv->percpu_extras, k);
+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
+ }
+ i += j;
+
+ /* Per-channel stats */
+ for (k = 0; k < priv->num_channels; k++) {
+ ch_stats = &priv->channel[k]->stats;
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
+ *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
+ }
+ i += j;
+
+ for (j = 0; j < priv->num_fqs; j++) {
+ /* Print FQ instantaneous counts */
+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+ &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(net_dev, "FQ query error %d", err);
+ return;
+ }
+
+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
+ fcnt_tx_total += fcnt;
+ bcnt_tx_total += bcnt;
+ } else {
+ fcnt_rx_total += fcnt;
+ bcnt_rx_total += bcnt;
+ }
+ }
+
+ *(data + i++) = fcnt_rx_total;
+ *(data + i++) = bcnt_rx_total;
+ *(data + i++) = fcnt_tx_total;
+ *(data + i++) = bcnt_tx_total;
+
+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ *(data + i++) = buf_cnt;
+
+ if (dpaa2_eth_has_mac(priv))
+ dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
+}
+
+static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+
+ if (eth_mask->h_proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = eth_value->h_proto;
+ *(__be16 *)(mask + off) = eth_mask->h_proto;
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
+ ether_addr_copy(key + off, eth_value->h_source);
+ ether_addr_copy(mask + off, eth_mask->h_source);
+ *fields |= DPAA2_ETH_DIST_ETHSRC;
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, eth_value->h_dest);
+ ether_addr_copy(mask + off, eth_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+ u32 tmp_value, tmp_mask;
+
+ if (uip_mask->tos || uip_mask->ip_ver)
+ return -EOPNOTSUPP;
+
+ if (uip_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = uip_value->ip4src;
+ *(__be32 *)(mask + off) = uip_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
+ }
+
+ if (uip_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = uip_value->ip4dst;
+ *(__be32 *)(mask + off) = uip_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
+ }
+
+ if (uip_mask->proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = uip_value->proto;
+ *(u8 *)(mask + off) = uip_mask->proto;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
+ }
+
+ if (uip_mask->l4_4_bytes) {
+ tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
+ tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = htons(tmp_value >> 16);
+ *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+ *fields |= DPAA2_ETH_DIST_L4SRC;
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
+ *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ *fields |= DPAA2_ETH_DIST_L4DST;
+ }
+
+ /* Only apply the rule for IPv4 frames */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto, u64 *fields)
+{
+ int off;
+
+ if (l4_mask->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = l4_value->ip4src;
+ *(__be32 *)(mask + off) = l4_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
+ }
+
+ if (l4_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = l4_value->ip4dst;
+ *(__be32 *)(mask + off) = l4_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
+ }
+
+ if (l4_mask->psrc) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = l4_value->psrc;
+ *(__be16 *)(mask + off) = l4_mask->psrc;
+ *fields |= DPAA2_ETH_DIST_L4SRC;
+ }
+
+ if (l4_mask->pdst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = l4_value->pdst;
+ *(__be16 *)(mask + off) = l4_mask->pdst;
+ *fields |= DPAA2_ETH_DIST_L4DST;
+ }
+
+ /* Only apply the rule for IPv4 frames with the specified L4 proto */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = l4_proto;
+ *(u8 *)(mask + off) = 0xFF;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+
+ if (ext_mask->vlan_etype)
+ return -EOPNOTSUPP;
+
+ if (ext_mask->vlan_tci) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+ *(__be16 *)(key + off) = ext_value->vlan_tci;
+ *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ *fields |= DPAA2_ETH_DIST_VLAN;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+
+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, ext_value->h_dest);
+ ether_addr_copy(mask + off, ext_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
+ void *mask, u64 *fields)
+{
+ int err;
+
+ switch (fs->flow_type & 0xFF) {
+ case ETHER_FLOW:
+ err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask, fields);
+ break;
+ case IP_USER_FLOW:
+ err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
+ break;
+ case TCP_V4_FLOW:
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP, fields);
+ break;
+ case UDP_V4_FLOW:
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP, fields);
+ break;
+ case SCTP_V4_FLOW:
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP, fields);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+
+ if (fs->flow_type & FLOW_EXT) {
+ err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+ if (err)
+ return err;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
+ mask, fields);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_rule_cfg rule_cfg = { 0 };
+ struct dpni_fs_action_cfg fs_act = { 0 };
+ dma_addr_t key_iova;
+ u64 fields = 0;
+ void *key_buf;
+ int i, err;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+ return -EINVAL;
+
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
+
+ /* allocate twice the key size, for the actual key and for mask */
+ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ /* Fill the key and mask memory areas */
+ err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+ if (err)
+ goto free_mem;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
+ /* Masking allows us to configure a maximal key during init and
+ * use it for all flow steering rules. Without it, we include
+ * in the key only the fields actually used, so we need to
+ * extract the others from the final key buffer.
+ *
+ * Program the FS key if needed, or return error if previously
+ * set key can't be used for the current rule. User needs to
+ * delete existing rules in this case to allow for the new one.
+ */
+ if (!priv->rx_cls_fields) {
+ err = dpaa2_eth_set_cls(net_dev, fields);
+ if (err)
+ goto free_mem;
+
+ priv->rx_cls_fields = fields;
+ } else if (priv->rx_cls_fields != fields) {
+ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+ err = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ dpaa2_eth_cls_trim_rule(key_buf, fields);
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+ }
+
+ key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ rule_cfg.key_iova = key_iova;
+ if (dpaa2_eth_fs_mask_enabled(priv))
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
+ if (add) {
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ fs_act.options |= DPNI_FS_OPT_DISCARD;
+ else
+ fs_act.flow_id = fs->ring_cookie;
+ }
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+ i, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
+ priv->mc_token, i,
+ &rule_cfg);
+ if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
+ }
+
+ dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+free_mem:
+ kfree(key_buf);
+
+ return err;
+}
+
+static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
+{
+ int i, rules = 0;
+
+ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+ if (priv->cls_rules[i].in_use)
+ rules++;
+
+ return rules;
+}
+
+static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ unsigned int location)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_cls_rule *rule;
+ int err = -EINVAL;
+
+ if (!priv->rx_cls_enabled)
+ return -EOPNOTSUPP;
+
+ if (location >= dpaa2_eth_fs_count(priv))
+ return -EINVAL;
+
+ rule = &priv->cls_rules[location];
+
+ /* If a rule is present at the specified location, delete it. */
+ if (rule->in_use) {
+ err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
+ if (err)
+ return err;
+
+ rule->in_use = 0;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv) &&
+ !dpaa2_eth_num_cls_rules(priv))
+ priv->rx_cls_fields = 0;
+ }
+
+ /* If no new entry to add, return here */
+ if (!new_fs)
+ return err;
+
+ err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
+ if (err)
+ return err;
+
+ rule->in_use = 1;
+ rule->fs = *new_fs;
+
+ return 0;
+}
+
+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int max_rules = dpaa2_eth_fs_count(priv);
+ int i, j = 0;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = dpaa2_eth_queue_count(priv);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rxnfc->rule_cnt = 0;
+ rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
+ rxnfc->data = max_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= max_rules)
+ return -EINVAL;
+ rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
+ max_rules);
+ if (!priv->cls_rules[rxnfc->fs.location].in_use)
+ return -EINVAL;
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < max_rules; i++) {
+ if (!priv->cls_rules[i].in_use)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ rxnfc->rule_cnt = j;
+ rxnfc->data = max_rules;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc)
+{
+ int err = 0;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXFH:
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+int dpaa2_phc_index = -1;
+EXPORT_SYMBOL(dpaa2_phc_index);
+
+static int dpaa2_eth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ if (!dpaa2_ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = dpaa2_phc_index;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static int dpaa2_eth_get_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_set_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio = priv->channel[0]->dpio;
+
+ dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
+ ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio;
+ int prev_adaptive;
+ u32 prev_rx_usecs;
+ int i, j, err;
+
+ /* Keep track of the previous value, just in case we fail */
+ dpio = priv->channel[0]->dpio;
+ dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
+ prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ /* Setup new value for rx coalescing */
+ for (i = 0; i < priv->num_channels; i++) {
+ dpio = priv->channel[i]->dpio;
+
+ dpaa2_io_set_adaptive_coalescing(dpio,
+ ic->use_adaptive_rx_coalesce);
+ err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
+ if (err)
+ goto restore_rx_usecs;
+ }
+
+ return 0;
+
+restore_rx_usecs:
+ for (j = 0; j < i; j++) {
+ dpio = priv->channel[j]->dpio;
+
+ dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
+ dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
+ }
+
+ return err;
+}
+
+const struct ethtool_ops dpaa2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_drvinfo = dpaa2_eth_get_drvinfo,
+ .nway_reset = dpaa2_eth_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_pauseparam = dpaa2_eth_get_pauseparam,
+ .set_pauseparam = dpaa2_eth_set_pauseparam,
+ .get_sset_count = dpaa2_eth_get_sset_count,
+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
+ .get_strings = dpaa2_eth_get_strings,
+ .get_rxnfc = dpaa2_eth_get_rxnfc,
+ .set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_ts_info = dpaa2_eth_get_ts_info,
+ .get_tunable = dpaa2_eth_get_tunable,
+ .set_tunable = dpaa2_eth_set_tunable,
+ .get_coalesce = dpaa2_eth_get_coalesce,
+ .set_coalesce = dpaa2_eth_set_coalesce,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
new file mode 100644
index 000000000..49ff85633
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/acpi.h>
+#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
+#include <linux/property.h>
+
+#include "dpaa2-eth.h"
+#include "dpaa2-mac.h"
+
+#define phylink_to_dpaa2_mac(config) \
+ container_of((config), struct dpaa2_mac, phylink_config)
+
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
+static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
+{
+ *if_mode = PHY_INTERFACE_MODE_NA;
+
+ switch (eth_if) {
+ case DPMAC_ETH_IF_RGMII:
+ *if_mode = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case DPMAC_ETH_IF_USXGMII:
+ *if_mode = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case DPMAC_ETH_IF_QSGMII:
+ *if_mode = PHY_INTERFACE_MODE_QSGMII;
+ break;
+ case DPMAC_ETH_IF_SGMII:
+ *if_mode = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case DPMAC_ETH_IF_XFI:
+ *if_mode = PHY_INTERFACE_MODE_10GBASER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
+static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
+ u16 dpmac_id)
+{
+ struct fwnode_handle *fwnode, *parent = NULL, *child = NULL;
+ struct device_node *dpmacs = NULL;
+ int err;
+ u32 id;
+
+ fwnode = dev_fwnode(dev->parent);
+ if (is_of_node(fwnode)) {
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+ parent = of_fwnode_handle(dpmacs);
+ } else if (is_acpi_node(fwnode)) {
+ parent = fwnode;
+ } else {
+ /* The root dprc device didn't yet get to finalize it's probe,
+ * thus the fwnode field is not yet set. Defer probe if we are
+ * facing this situation.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ fwnode_for_each_child_node(parent, child) {
+ err = -EINVAL;
+ if (is_acpi_device_node(child))
+ err = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &id);
+ else if (is_of_node(child))
+ err = of_property_read_u32(to_of_node(child), "reg", &id);
+ if (err)
+ continue;
+
+ if (id == dpmac_id) {
+ of_node_put(dpmacs);
+ return child;
+ }
+ }
+ of_node_put(dpmacs);
+ return NULL;
+}
+
+static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
+ struct dpmac_attr attr)
+{
+ phy_interface_t if_mode;
+ int err;
+
+ err = fwnode_get_phy_mode(dpmac_node);
+ if (err > 0)
+ return err;
+
+ err = phy_mode(attr.eth_if, &if_mode);
+ if (!err)
+ return if_mode;
+
+ return err;
+}
+
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+
+ return mac->pcs;
+}
+
+static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ if (state->an_enabled)
+ dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
+}
+
+static void dpaa2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 1;
+
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
+}
+
+static void dpaa2_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 0;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
+ .mac_config = dpaa2_mac_config,
+ .mac_link_up = dpaa2_mac_link_up,
+ .mac_link_down = dpaa2_mac_link_down,
+};
+
+static int dpaa2_pcs_create(struct dpaa2_mac *mac,
+ struct fwnode_handle *dpmac_node,
+ int id)
+{
+ struct mdio_device *mdiodev;
+ struct fwnode_handle *node;
+
+ node = fwnode_find_reference(dpmac_node, "pcs-handle", 0);
+ if (IS_ERR(node)) {
+ /* do not error out on old DTS files */
+ netdev_warn(mac->net_dev, "pcs-handle node not found\n");
+ return 0;
+ }
+
+ if (!fwnode_device_is_available(node)) {
+ netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ fwnode_handle_put(node);
+ return -ENODEV;
+ }
+
+ mdiodev = fwnode_mdio_find_device(node);
+ fwnode_handle_put(node);
+ if (!mdiodev)
+ return -EPROBE_DEFER;
+
+ mac->pcs = lynx_pcs_create(mdiodev);
+ if (!mac->pcs) {
+ netdev_err(mac->net_dev, "lynx_pcs_create() failed\n");
+ put_device(&mdiodev->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
+{
+ struct phylink_pcs *phylink_pcs = mac->pcs;
+
+ if (phylink_pcs) {
+ struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs);
+ struct device *dev = &mdio->dev;
+
+ lynx_pcs_destroy(phylink_pcs);
+ put_device(dev);
+ mac->pcs = NULL;
+ }
+}
+
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac)
+{
+ struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
+ struct phylink *phylink;
+ int err;
+
+ mac->if_link_type = mac->attr.link_type;
+
+ dpmac_node = mac->fw_node;
+ if (!dpmac_node) {
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
+ }
+
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
+ if (err < 0)
+ return -EINVAL;
+ mac->if_mode = err;
+
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
+ /* The MAC does not have the capability to add RGMII delays so
+ * error out if the interface mode requests them and there is no PHY
+ * to act upon them
+ */
+ if (of_phy_is_fixed_link(to_of_node(dpmac_node)) &&
+ (mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_err(net_dev, "RGMII delay not supported\n");
+ return -EINVAL;
+ }
+
+ if ((mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
+ if (err)
+ return err;
+ }
+
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
+ mac->phylink_config.dev = &net_dev->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+
+ dpaa2_mac_set_supported_interfaces(mac);
+
+ phylink = phylink_create(&mac->phylink_config,
+ dpmac_node, mac->if_mode,
+ &dpaa2_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_pcs_destroy;
+ }
+ mac->phylink = phylink;
+
+ err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
+ if (err) {
+ netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
+ goto err_phylink_destroy;
+ }
+
+ return 0;
+
+err_phylink_destroy:
+ phylink_destroy(mac->phylink);
+err_pcs_destroy:
+ dpaa2_pcs_destroy(mac);
+
+ return err;
+}
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
+{
+ if (!mac->phylink)
+ return;
+
+ phylink_disconnect_phy(mac->phylink);
+ phylink_destroy(mac->phylink);
+ dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *fw_node;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
+ /* Find the device node representing the MAC device and link the device
+ * behind the associated netdev to it.
+ */
+ fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ if (IS_ERR(fw_node)) {
+ err = PTR_ERR(fw_node);
+ goto err_close_dpmac;
+ }
+
+ mac->fw_node = fw_node;
+ net_dev->dev.of_node = to_of_node(mac->fw_node);
+
+ return 0;
+
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ if (mac->fw_node)
+ fwnode_handle_put(mac->fw_node);
+}
+
+static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
+ [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames",
+ [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok",
+ [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors",
+ [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards",
+ [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast",
+ [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast",
+ [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast",
+ [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes",
+ [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes",
+ [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes",
+ [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes",
+ [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes",
+ [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes",
+ [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes",
+ [DPMAC_CNT_ING_FRAG] = "[mac] rx frags",
+ [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber",
+ [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors",
+ [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized",
+ [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause",
+ [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes",
+ [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok",
+ [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast",
+ [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast",
+ [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast",
+ [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors",
+ [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized",
+ [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause",
+ [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes",
+};
+
+#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
+
+int dpaa2_mac_get_sset_count(void)
+{
+ return DPAA2_MAC_NUM_STATS;
+}
+
+void dpaa2_mac_get_strings(u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ int i, err;
+ u64 value;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle,
+ i, &value);
+ if (err) {
+ netdev_err_once(mac->net_dev,
+ "dpmac_get_counter error %d\n", err);
+ *(data + i) = U64_MAX;
+ continue;
+ }
+ *(data + i) = value;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
new file mode 100644
index 000000000..a58cab188
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+#ifndef DPAA2_MAC_H
+#define DPAA2_MAC_H
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac {
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_link_state state;
+ struct net_device *net_dev;
+ struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
+
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ phy_interface_t if_mode;
+ enum dpmac_link_type if_link_type;
+ struct phylink_pcs *pcs;
+ struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io);
+
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac);
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+int dpaa2_mac_get_sset_count(void);
+
+void dpaa2_mac_get_strings(u8 *data);
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
+#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
new file mode 100644
index 000000000..c8cb54157
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ * Copyright 2020 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-ptp.h"
+
+static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct fsl_mc_device *mc_dev;
+ struct device *dev;
+ u32 mask = 0;
+ u32 bit;
+ int err;
+
+ dev = ptp_qoriq->dev;
+ mc_dev = to_fsl_mc_device(dev);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = DPRTC_EVENT_ETS1;
+ break;
+ case 1:
+ bit = DPRTC_EVENT_ETS2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (on)
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
+ break;
+ case PTP_CLK_REQ_PPS:
+ bit = DPRTC_EVENT_PPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = dprtc_get_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, &mask);
+ if (err < 0) {
+ dev_err(dev, "dprtc_get_irq_mask(): %d\n", err);
+ return err;
+ }
+
+ if (on)
+ mask |= bit;
+ else
+ mask &= ~bit;
+
+ err = dprtc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, mask);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_mask(): %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ptp_clock_info dpaa2_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "DPAA2 PTP Clock",
+ .max_adj = 512000,
+ .n_alarm = 2,
+ .n_ext_ts = 2,
+ .n_per_out = 3,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = ptp_qoriq_adjfine,
+ .adjtime = ptp_qoriq_adjtime,
+ .gettime64 = ptp_qoriq_gettime,
+ .settime64 = ptp_qoriq_settime,
+ .enable = dpaa2_ptp_enable,
+};
+
+static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
+{
+ struct ptp_qoriq *ptp_qoriq = priv;
+ struct ptp_clock_event event;
+ struct fsl_mc_device *mc_dev;
+ struct device *dev;
+ u32 status = 0;
+ int err;
+
+ dev = ptp_qoriq->dev;
+ mc_dev = to_fsl_mc_device(dev);
+
+ err = dprtc_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, &status);
+ if (unlikely(err)) {
+ dev_err(dev, "dprtc_get_irq_status err %d\n", err);
+ return IRQ_NONE;
+ }
+
+ if (status & DPRTC_EVENT_PPS) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(ptp_qoriq->clock, &event);
+ }
+
+ if (status & DPRTC_EVENT_ETS1)
+ extts_clean_up(ptp_qoriq, 0, true);
+
+ if (status & DPRTC_EVENT_ETS2)
+ extts_clean_up(ptp_qoriq, 1, true);
+
+ err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, status);
+ if (unlikely(err)) {
+ dev_err(dev, "dprtc_clear_irq_status err %d\n", err);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct ptp_qoriq *ptp_qoriq;
+ struct device_node *node;
+ void __iomem *base;
+ int err;
+
+ ptp_qoriq = devm_kzalloc(dev, sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq)
+ return -ENOMEM;
+
+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_exit;
+ }
+
+ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dprtc_open err %d\n", err);
+ goto err_free_mcp;
+ }
+
+ ptp_qoriq->dev = dev;
+
+ node = of_find_compatible_node(NULL, NULL, "fsl,dpaa2-ptp");
+ if (!node) {
+ err = -ENODEV;
+ goto err_close;
+ }
+
+ dev->of_node = node;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ err = fsl_mc_allocate_irqs(mc_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ goto err_unmap;
+ }
+
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
+
+ err = request_threaded_irq(ptp_qoriq->irq, NULL,
+ dpaa2_ptp_irq_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), ptp_qoriq);
+ if (err < 0) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto err_free_mc_irq;
+ }
+
+ err = dprtc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
+ goto err_free_threaded_irq;
+ }
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
+ if (err)
+ goto err_free_threaded_irq;
+
+ dpaa2_phc_index = ptp_qoriq->phc_index;
+ dpaa2_ptp = ptp_qoriq;
+ dev_set_drvdata(dev, ptp_qoriq);
+
+ return 0;
+
+err_free_threaded_irq:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+err_free_mc_irq:
+ fsl_mc_free_irqs(mc_dev);
+err_unmap:
+ iounmap(base);
+err_put:
+ of_node_put(node);
+err_close:
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+ fsl_mc_portal_free(mc_dev->mc_io);
+err_exit:
+ return err;
+}
+
+static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct ptp_qoriq *ptp_qoriq;
+
+ ptp_qoriq = dev_get_drvdata(dev);
+
+ dpaa2_phc_index = -1;
+ ptp_qoriq_free(ptp_qoriq);
+
+ fsl_mc_free_irqs(mc_dev);
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ fsl_mc_portal_free(mc_dev->mc_io);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprtc",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table);
+
+static struct fsl_mc_driver dpaa2_ptp_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_ptp_probe,
+ .remove = dpaa2_ptp_remove,
+ .match_id_table = dpaa2_ptp_match_id_table,
+};
+
+module_fsl_mc_driver(dpaa2_ptp_drv);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
new file mode 100644
index 000000000..e1023538b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 NXP
+ * Copyright 2020 NXP
+ */
+
+#ifndef __RTC_H
+#define __RTC_H
+
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
+
+#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
new file mode 100644
index 000000000..720c9230c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch ethtool support
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <linux/ethtool.h>
+
+#include "dpaa2-switch.h"
+
+static struct {
+ enum dpsw_counter id;
+ char name[ETH_GSTRING_LEN];
+} dpaa2_switch_ethtool_counters[] = {
+ {DPSW_CNT_ING_FRAME, "[hw] rx frames"},
+ {DPSW_CNT_ING_BYTE, "[hw] rx bytes"},
+ {DPSW_CNT_ING_FLTR_FRAME, "[hw] rx filtered frames"},
+ {DPSW_CNT_ING_FRAME_DISCARD, "[hw] rx discarded frames"},
+ {DPSW_CNT_ING_BCAST_FRAME, "[hw] rx bcast frames"},
+ {DPSW_CNT_ING_BCAST_BYTES, "[hw] rx bcast bytes"},
+ {DPSW_CNT_ING_MCAST_FRAME, "[hw] rx mcast frames"},
+ {DPSW_CNT_ING_MCAST_BYTE, "[hw] rx mcast bytes"},
+ {DPSW_CNT_EGR_FRAME, "[hw] tx frames"},
+ {DPSW_CNT_EGR_BYTE, "[hw] tx bytes"},
+ {DPSW_CNT_EGR_FRAME_DISCARD, "[hw] tx discarded frames"},
+ {DPSW_CNT_ING_NO_BUFF_DISCARD, "[hw] rx nobuffer discards"},
+};
+
+#define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters)
+
+static void dpaa2_switch_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u16 version_major, version_minor;
+ int err;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err)
+ strscpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", version_major, version_minor);
+
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+dpaa2_switch_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state = {0};
+ int err = 0;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+ link_ksettings);
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &state);
+ if (err) {
+ netdev_err(netdev, "ERROR %d getting link state\n", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPSW side or there may not exist a DPMAC at all.
+ * Report only autoneg state, duplexity and speed.
+ */
+ if (state.options & DPSW_LINK_OPT_AUTONEG)
+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
+ link_ksettings->base.duplex = DUPLEX_FULL;
+ link_ksettings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+static int
+dpaa2_switch_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_link_cfg cfg = {0};
+ bool if_running;
+ int err = 0, ret;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+ link_ksettings);
+
+ /* Interface needs to be down to change link settings */
+ if_running = netif_running(netdev);
+ if (if_running) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ cfg.rate = link_ksettings->base.speed;
+ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
+ if (link_ksettings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
+
+ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &cfg);
+
+ if (if_running) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+ return err;
+}
+
+static int
+dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (port_priv->mac)
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
+ memcpy(p, dpaa2_switch_ethtool_counters[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (port_priv->mac)
+ dpaa2_mac_get_strings(p);
+ break;
+ }
+}
+
+static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int i, err;
+
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ dpaa2_switch_ethtool_counters[i].id,
+ &data[i]);
+ if (err)
+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+
+ if (port_priv->mac)
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+}
+
+const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
+ .get_drvinfo = dpaa2_switch_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_switch_get_link_ksettings,
+ .set_link_ksettings = dpaa2_switch_set_link_ksettings,
+ .get_strings = dpaa2_switch_ethtool_get_strings,
+ .get_ethtool_stats = dpaa2_switch_ethtool_get_stats,
+ .get_sset_count = dpaa2_switch_ethtool_get_sset_count,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
new file mode 100644
index 000000000..16d3c3610
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch flower support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include "dpaa2-switch.h"
+
+static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
+ struct dpsw_acl_key *acl_key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpsw_acl_fields *acl_h, *acl_m;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h = &acl_key->match;
+ acl_m = &acl_key->mask;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ acl_h->l3_protocol = match.key->ip_proto;
+ acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
+ acl_m->l3_protocol = match.mask->ip_proto;
+ acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
+ ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
+ ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
+ ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ acl_h->l2_vlan_id = match.key->vlan_id;
+ acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
+ acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
+ match.key->vlan_dei;
+
+ acl_m->l2_vlan_id = match.mask->vlan_id;
+ acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
+ acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
+ match.mask->vlan_dei;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ acl_h->l3_source_ip = be32_to_cpu(match.key->src);
+ acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
+ acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
+ acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ acl_h->l4_source_port = be16_to_cpu(match.key->src);
+ acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
+ acl_m->l4_source_port = be16_to_cpu(match.mask->src);
+ acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if ((match.mask->tos & 0x3) != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on ECN not supported, only DSCP");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h->l3_dscp = match.key->tos >> 2;
+ acl_m->l3_dscp = match.mask->tos >> 2;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct ethsw_core *ethsw = filter_block->ethsw;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ filter_block->acl_id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+ kfree(cmd_buff);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ kfree(cmd_buff);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ struct list_head *pos, *n;
+ int index = 0;
+
+ if (list_empty(&block->acl_entries)) {
+ list_add(&entry->list, &block->acl_entries);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &block->acl_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
+ if (entry->prio < tmp->prio)
+ break;
+ index++;
+ }
+ list_add(&entry->list, pos->prev);
+ return index;
+}
+
+static struct dpaa2_switch_acl_entry*
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
+ int index)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &block->acl_entries, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry,
+ int precedence)
+{
+ int err;
+
+ err = dpaa2_switch_acl_entry_remove(block, entry);
+ if (err)
+ return err;
+
+ entry->cfg.precedence = precedence;
+ return dpaa2_switch_acl_entry_add(block, entry);
+}
+
+static int
+dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ /* Add the new ACL entry to the linked list and get its index */
+ index = dpaa2_switch_acl_entry_add_to_list(block, entry);
+
+ /* Move up in priority the ACL entries to make space
+ * for the new filter.
+ */
+ precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
+ for (i = 0; i < index; i++) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
+
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence++;
+ }
+
+ /* Add the new entry to hardware */
+ entry->cfg.precedence = precedence;
+ err = dpaa2_switch_acl_entry_add(block, entry);
+ block->num_acl_rules++;
+
+ return err;
+}
+
+static struct dpaa2_switch_acl_entry *
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+ int index = 0;
+
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
+ if (tmp->cookie == entry->cookie)
+ return index;
+ index++;
+ }
+ return -ENOENT;
+}
+
+static struct dpaa2_switch_mirror_entry *
+dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_mirror_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ index = dpaa2_switch_acl_entry_get_index(block, entry);
+
+ /* Remove from hardware the ACL entry */
+ err = dpaa2_switch_acl_entry_remove(block, entry);
+ if (err)
+ return err;
+
+ block->num_acl_rules--;
+
+ /* Remove it from the list also */
+ list_del(&entry->list);
+
+ /* Move down in priority the entries over the deleted one */
+ precedence = entry->cfg.precedence;
+ for (i = index - 1; i >= 0; i--) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence--;
+ }
+
+ kfree(entry);
+
+ return 0;
+}
+
+static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
+ struct flow_action_entry *cls_act,
+ struct dpsw_acl_result *dpsw_act,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (cls_act->id) {
+ case FLOW_ACTION_TRAP:
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+
+ dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
+ break;
+ case FLOW_ACTION_DROP:
+ dpsw_act->action = DPSW_ACL_ACTION_DROP;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static int
+dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry,
+ u16 to, struct netlink_ext_ack *extack)
+{
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct ethsw_port_priv *port_priv;
+ unsigned long ports_added = 0;
+ u16 vlan = entry->cfg.vlan_id;
+ bool mirror_port_enabled;
+ int err, port;
+
+ /* Setup the mirroring port */
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ if (!mirror_port_enabled) {
+ err = dpsw_set_reflection_if(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, to);
+ if (err)
+ return err;
+ ethsw->mirror_port = to;
+ }
+
+ /* Setup the same egress mirroring configuration on all the switch
+ * ports that share the same filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
+ port_priv = ethsw->ports[port];
+
+ /* We cannot add a per VLAN mirroring rule if the VLAN in
+ * question is not installed on the switch port.
+ */
+ if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
+ NL_SET_ERR_MSG(extack,
+ "VLAN must be installed on the switch port");
+ err = -EINVAL;
+ goto err_remove_filters;
+ }
+
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port, &entry->cfg);
+ if (err)
+ goto err_remove_filters;
+
+ ports_added |= BIT(port);
+ }
+
+ list_add(&entry->list, &block->mirror_entries);
+
+ return 0;
+
+err_remove_filters:
+ for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, &entry->cfg);
+ }
+
+ if (!mirror_port_enabled)
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return err;
+}
+
+static int
+dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry)
+{
+ struct dpsw_reflection_cfg *cfg = &entry->cfg;
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ int port;
+
+ /* Remove this mirroring configuration from all the ports belonging to
+ * the filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, cfg);
+
+ /* Also remove it from the list of mirror filters */
+ list_del(&entry->list);
+ kfree(entry);
+
+ /* If this was the last mirror filter, then unset the mirror port */
+ if (list_empty(&block->mirror_entries))
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct flow_action_entry *act;
+ int err;
+
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
+ if (err)
+ goto free_acl_entry;
+
+ act = &rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
+ u16 *vlan)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring is supported only per VLAN");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_priority != 0 ||
+ match.mask->vlan_dei != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only matching on VLAN ID supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id != 0xFFF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *vlan = (u16)match.key->vlan_id;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id, vlan;
+ int err;
+
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Parse the key */
+ err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
+ if (err)
+ return err;
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ tmp->cfg.vlan_id == vlan) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
+ mirror_entry->cfg.vlan_id = vlan;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_flower_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_flower_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
+
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ act = &cls->rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+static int
+dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id;
+
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration. One matchall rule per block is the maximum.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matchall mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&cls->rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &cls->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_matchall_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_add;
+ }
+
+ return 0;
+
+unwind_add:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_remove;
+ }
+
+ return 0;
+
+unwind_remove:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block,
+ acl_entry);
+
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
new file mode 100644
index 000000000..b98ef4ba1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -0,0 +1,3531 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch driver
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/iommu.h>
+#include <net/pkt_cls.h>
+
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-switch.h"
+
+/* Minimal supported DPSW version */
+#define DPSW_MIN_VER_MAJOR 8
+#define DPSW_MIN_VER_MINOR 9
+
+#define DEFAULT_VLAN_ID 1
+
+static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->fdb->fdb_id;
+}
+
+static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->fdbs[i].in_use)
+ return &ethsw->fdbs[i];
+ return NULL;
+}
+
+static struct dpaa2_switch_filter_block *
+dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->filter_blocks[i].in_use)
+ return &ethsw->filter_blocks[i];
+ return NULL;
+}
+
+static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
+ struct net_device *bridge_dev)
+{
+ struct ethsw_port_priv *other_port_priv = NULL;
+ struct dpaa2_switch_fdb *fdb;
+ struct net_device *other_dev;
+ struct list_head *iter;
+
+ /* If we leave a bridge (bridge_dev is NULL), find an unused
+ * FDB and use that.
+ */
+ if (!bridge_dev) {
+ fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
+
+ /* If there is no unused FDB, we must be the last port that
+ * leaves the last bridge, all the others are standalone. We
+ * can just keep the FDB that we already have.
+ */
+
+ if (!fdb) {
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ port_priv->fdb = fdb;
+ port_priv->fdb->in_use = true;
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
+ * being held. Assert on it so that it's easier to catch new code
+ * paths that reach this point without the RTNL lock.
+ */
+ ASSERT_RTNL();
+
+ /* If part of a bridge, use the FDB of the first dpaa2 switch interface
+ * to be present in that bridge
+ */
+ netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ if (other_dev == port_priv->netdev)
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ break;
+ }
+
+ /* The current port is about to change its FDB to the one used by the
+ * first port that joined the bridge.
+ */
+ if (other_port_priv) {
+ /* The previous FDB is about to become unused, since the
+ * interface is no longer standalone.
+ */
+ port_priv->fdb->in_use = false;
+ port_priv->fdb->bridge_dev = NULL;
+
+ /* Get a reference to the new FDB */
+ port_priv->fdb = other_port_priv->fdb;
+ }
+
+ /* Keep track of the new upper bridge device */
+ port_priv->fdb->bridge_dev = bridge_dev;
+
+ return 0;
+}
+
+static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
+ enum dpsw_flood_type type,
+ struct dpsw_egress_flood_cfg *cfg)
+{
+ int i = 0, j;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ /* Add all the DPAA2 switch ports found in the same bridging domain to
+ * the egress flooding domain
+ */
+ for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
+ if (!ethsw->ports[j])
+ continue;
+ if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
+ continue;
+
+ if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ }
+
+ /* Add the CTRL interface to the egress flooding domain */
+ cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
+
+ cfg->fdb_id = fdb_id;
+ cfg->flood_type = type;
+ cfg->num_ifs = i;
+}
+
+static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
+{
+ struct dpsw_egress_flood_cfg flood_cfg;
+ int err;
+
+ /* Setup broadcast flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ /* Setup unknown flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_vlan_cfg vcfg = {0};
+ int err;
+
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_vlan_add(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
+static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
+{
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_link_state state;
+ int err;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return true;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ return state.up ? true : false;
+}
+
+static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_tci_cfg tci_cfg = { 0 };
+ bool up;
+ int err, ret;
+
+ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
+
+ tci_cfg.vlan_id = pvid;
+
+ /* Interface needs to be down to change PVID */
+ up = dpaa2_switch_port_is_up(port_priv);
+ if (up) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ goto set_tci_error;
+ }
+
+ /* Delete previous PVID info and mark the new one */
+ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
+ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
+ port_priv->pvid = pvid;
+
+set_tci_error:
+ if (up) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
+ u16 vid, u16 flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ int err;
+
+ if (port_priv->vlans[vid]) {
+ netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ return -EEXIST;
+ }
+
+ /* If hit, this VLAN rule will lead the packet into the FDB table
+ * specified in the vlan configuration below
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
+ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
+ return err;
+ }
+
+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_add_if_untagged err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ err = dpaa2_switch_port_set_pvid(port_priv, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
+{
+ switch (state) {
+ case BR_STATE_DISABLED:
+ return DPSW_STP_STATE_DISABLED;
+ case BR_STATE_LISTENING:
+ return DPSW_STP_STATE_LISTENING;
+ case BR_STATE_LEARNING:
+ return DPSW_STP_STATE_LEARNING;
+ case BR_STATE_FORWARDING:
+ return DPSW_STP_STATE_FORWARDING;
+ case BR_STATE_BLOCKING:
+ return DPSW_STP_STATE_BLOCKING;
+ default:
+ return DPSW_STP_STATE_DISABLED;
+ }
+}
+
+static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+{
+ struct dpsw_stp_cfg stp_cfg = {0};
+ int err;
+ u16 vid;
+
+ if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
+ return 0; /* Nothing to do */
+
+ stp_cfg.state = br_stp_state_to_dpsw(state);
+ for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ stp_cfg.vlan_id = vid;
+ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &stp_cfg);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "dpsw_if_set_stp err %d\n", err);
+ return err;
+ }
+ }
+ }
+
+ port_priv->stp_state = state;
+
+ return 0;
+}
+
+static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
+{
+ struct ethsw_port_priv *ppriv_local = NULL;
+ int i, err;
+
+ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = 0;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ ppriv_local = ethsw->ports[i];
+ if (ppriv_local)
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_add_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the add command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
+ err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENAVAIL)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_multicast err %d\n", err);
+ return err;
+}
+
+static void dpaa2_switch_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME_DISCARD,
+ &stats->rx_dropped);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FLTR_FRAME,
+ &tmp);
+ if (err)
+ goto error;
+ stats->rx_dropped += tmp;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME_DISCARD,
+ &stats->tx_dropped);
+ if (err)
+ goto error;
+
+ return;
+
+error:
+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
+static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
+ int attr_id)
+{
+ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
+}
+
+static int dpaa2_switch_port_get_offload_stats(int attr_id,
+ const struct net_device *netdev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
+ 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ (u16)ETHSW_L2_MAX_FRM(mtu));
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_if_set_max_frame_length() err %d\n", err);
+ return err;
+ }
+
+ netdev->mtu = mtu;
+ return 0;
+}
+
+static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state;
+ int err;
+
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return 0;
+
+ /* Interrupts are received even though no one issued an 'ifconfig up'
+ * on the switch interface. Ignore these link state update interrupts
+ */
+ if (!netif_running(netdev))
+ return 0;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ if (state.up != port_priv->link_state) {
+ if (state.up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ port_priv->link_state = state.up;
+ }
+
+ return 0;
+}
+
+/* Manage all NAPI instances for the control interface.
+ *
+ * We only have one RX queue and one Tx Conf queue for all
+ * switch ports. Therefore, we only need to enable the NAPI instance once, the
+ * first time one of the switch ports runs .dev_open().
+ */
+
+static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* a new interface is using the NAPI instance */
+ ethsw->napi_users++;
+
+ /* if there is already a user of the instance, return */
+ if (ethsw->napi_users > 1)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_enable(&ethsw->fq[i].napi);
+}
+
+static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* If we are not the last interface using the NAPI, return */
+ ethsw->napi_users--;
+ if (ethsw->napi_users)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_disable(&ethsw->fq[i].napi);
+}
+
+static int dpaa2_switch_port_open(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ if (!dpaa2_switch_port_is_type_phy(port_priv)) {
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(netdev);
+ }
+
+ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ dpaa2_switch_enable_ctrl_if_napi(ethsw);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ dpaa2_mac_start(port_priv->mac);
+ phylink_start(port_priv->mac->phylink);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_stop(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+
+ dpaa2_switch_disable_ctrl_if_napi(ethsw);
+
+ return 0;
+}
+
+static int dpaa2_switch_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+
+ ppid->id_len = 1;
+ ppid->id[0] = port_priv->ethsw_data->dev_id;
+
+ return 0;
+}
+
+static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
+ size_t len)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->idx);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct ethsw_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
+ struct ethsw_dump_ctx *dump)
+{
+ int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
+ struct ethsw_port_priv *port_priv)
+{
+ int idx = port_priv->idx;
+ int valid;
+
+ if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ valid = entry->if_info == port_priv->idx;
+ else
+ valid = entry->if_mask[idx / 8] & BIT(idx % 8);
+
+ return valid;
+}
+
+static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
+ dpaa2_switch_fdb_cb_t cb, void *data)
+{
+ struct net_device *net_dev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct device *dev = net_dev->dev.parent;
+ struct fdb_dump_entry *fdb_entries;
+ struct fdb_dump_entry fdb_entry;
+ dma_addr_t fdb_dump_iova;
+ u16 num_fdb_entries;
+ u32 fdb_dump_size;
+ int err = 0, i;
+ u8 *dma_mem;
+ u16 fdb_id;
+
+ fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
+ dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, fdb_dump_iova)) {
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ err = -ENOMEM;
+ goto err_map;
+ }
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
+ fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
+ if (err) {
+ netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
+ goto err_dump;
+ }
+
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
+
+ fdb_entries = (struct fdb_dump_entry *)dma_mem;
+ for (i = 0; i < num_fdb_entries; i++) {
+ fdb_entry = fdb_entries[i];
+
+ err = cb(port_priv, &fdb_entry, data);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(dma_mem);
+
+ return 0;
+
+err_dump:
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
+err_map:
+ kfree(dma_mem);
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
+}
+
+static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *net_dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_dump_ctx dump = {
+ .dev = net_dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int err;
+
+ err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
+ *idx = dump.idx;
+
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data __always_unused)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
+ return 0;
+
+ if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
+
+ return 0;
+}
+
+static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
+{
+ dpaa2_switch_fdb_iterate(port_priv,
+ dpaa2_switch_fdb_entry_fast_age, NULL);
+}
+
+static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_add(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_del(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *net_dev = port_priv->netdev;
+ struct device *dev = net_dev->dev.parent;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
+ return 0;
+
+ /* Get firmware address, if any */
+ err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, mac_addr);
+ if (err) {
+ dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ eth_hw_addr_set(net_dev, mac_addr);
+ } else {
+ /* No MAC address configured, fill in net_dev->dev_addr
+ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
+ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh, *skb;
+ dma_addr_t fd_addr;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
+
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ u8 *buff_start;
+ void *hwa;
+
+ buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
+ DPAA2_SWITCH_TX_BUF_ALIGN,
+ DPAA2_SWITCH_TX_BUF_ALIGN);
+
+ /* Clear FAS to have consistent values for TX confirmation. It is
+ * located in the first 8 bytes of the buffer's hardware annotation
+ * area
+ */
+ hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
+ memset(hwa, 0, 8);
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buff_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buff_start,
+ skb_tail_pointer(skb) - buff_start,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ /* Setup the FD fields */
+ memset(fd, 0, sizeof(*fd));
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+
+ return 0;
+}
+
+static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
+ struct dpaa2_fd fd;
+ int err;
+
+ if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
+ if (unlikely(!ns)) {
+ net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
+ goto err_free_skb;
+ }
+ dev_consume_skb_any(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx confirmation */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
+ goto err_exit;
+ }
+
+ /* At this stage, we do not support non-linear skbs so just try to
+ * linearize the skb and if that's not working, just drop the packet.
+ */
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ do {
+ err = dpaa2_io_service_enqueue_qd(NULL,
+ port_priv->tx_qdid,
+ 8, 0, &fd);
+ retries--;
+ } while (err == -EBUSY && retries);
+
+ if (unlikely(err < 0)) {
+ dpaa2_switch_free_fd(ethsw, &fd);
+ goto err_exit;
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+err_exit:
+ return NETDEV_TX_OK;
+}
+
+static int
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return dpaa2_switch_cls_flower_replace(filter_block, f);
+ case FLOW_CLS_DESTROY:
+ return dpaa2_switch_cls_flower_destroy(filter_block, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return dpaa2_switch_cls_matchall_replace(block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ return dpaa2_switch_cls_matchall_destroy(block, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(dpaa2_switch_block_cb_list);
+
+static int
+dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->filter_block)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ block->ports |= BIT(port_priv->idx);
+ port_priv->filter_block = block;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->filter_block != block)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ block->ports &= ~BIT(port_priv->idx);
+ port_priv->filter_block = NULL;
+ return 0;
+}
+
+static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
+ int err;
+
+ /* Offload all the mirror entries found in the block on this new port
+ * joining it.
+ */
+ err = dpaa2_switch_block_offload_mirror(block, port_priv);
+ if (err)
+ return err;
+
+ /* If the port is already bound to this ACL table then do nothing. This
+ * can happen when this port is the first one to join a tc block
+ */
+ if (port_priv->filter_block == block)
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
+ if (err)
+ return err;
+
+ /* Mark the previous ACL table as being unused if this was the last
+ * port that was using it.
+ */
+ if (old_block->ports == 0)
+ old_block->in_use = false;
+
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
+}
+
+static int
+dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *new_block;
+ int err;
+
+ /* Unoffload all the mirror entries found in the block from the
+ * port leaving it.
+ */
+ err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
+ if (err)
+ return err;
+
+ /* We are the last port that leaves a block (an ACL table).
+ * We'll continue to use this table.
+ */
+ if (block->ports == BIT(port_priv->idx))
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
+ if (err)
+ return err;
+
+ if (block->ports == 0)
+ block->in_use = false;
+
+ new_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ new_block->in_use = true;
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
+}
+
+static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+
+ if (!block_cb) {
+ /* If the filter block is not already known, then this port
+ * must be the first to join it. In this case, we can just
+ * continue to use our private table
+ */
+ filter_block = port_priv->filter_block;
+
+ block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw, filter_block, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ register_block = true;
+ } else {
+ filter_block = flow_block_cb_priv(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ err = dpaa2_switch_port_block_bind(port_priv, filter_block);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list,
+ &dpaa2_switch_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+ return err;
+}
+
+static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+ if (!block_cb)
+ return;
+
+ filter_block = flow_block_cb_priv(block_cb);
+ err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &dpaa2_switch_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return dpaa2_switch_setup_tc_block_bind(netdev, f);
+ case FLOW_BLOCK_UNBIND:
+ dpaa2_switch_setup_tc_block_unbind(netdev, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK: {
+ return dpaa2_switch_setup_tc_block(netdev, type_data);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops dpaa2_switch_port_ops = {
+ .ndo_open = dpaa2_switch_port_open,
+ .ndo_stop = dpaa2_switch_port_stop,
+
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = dpaa2_switch_port_get_stats,
+ .ndo_change_mtu = dpaa2_switch_port_change_mtu,
+ .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
+ .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
+ .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
+ .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
+ .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
+
+ .ndo_start_xmit = dpaa2_switch_port_tx,
+ .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
+ .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
+ .ndo_setup_tc = dpaa2_switch_port_setup_tc,
+};
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
+{
+ return netdev->netdev_ops == &dpaa2_switch_port_ops;
+}
+
+static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+{
+ struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
+
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ mac = kzalloc(sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = port_priv->ethsw_data->mc_io;
+ mac->net_dev = port_priv->netdev;
+
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+ port_priv->mac = mac;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "Error connecting to the MAC endpoint %pe\n",
+ ERR_PTR(err));
+ goto err_close_mac;
+ }
+ }
+
+ return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ port_priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
+}
+
+static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+{
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ dpaa2_mac_disconnect(port_priv->mac);
+
+ if (!dpaa2_switch_port_has_mac(port_priv))
+ return;
+
+ dpaa2_mac_close(port_priv->mac);
+ kfree(port_priv->mac);
+ port_priv->mac = NULL;
+}
+
+static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct ethsw_port_priv *port_priv;
+ u32 status = ~0;
+ int err, if_id;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+ if (err) {
+ dev_err(dev, "Can't get irq status (err %d)\n", err);
+ goto out;
+ }
+
+ if_id = (status & 0xFFFF0000) >> 16;
+ port_priv = ethsw->ports[if_id];
+
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
+ dpaa2_switch_port_link_state_update(port_priv->netdev);
+ dpaa2_switch_port_set_mac_addr(port_priv);
+ }
+
+ if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ rtnl_lock();
+ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ else
+ dpaa2_switch_port_connect_mac(port_priv);
+ rtnl_unlock();
+ }
+
+out:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
+ struct fsl_mc_device_irq *irq;
+ int err;
+
+ err = fsl_mc_allocate_irqs(sw_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
+ err = -EINVAL;
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
+ dpaa2_switch_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, mask);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 1);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ return 0;
+
+free_devm_irq:
+ devm_free_irq(dev, irq->virq, dev);
+free_irq:
+ fsl_mc_free_irqs(sw_dev);
+ return err;
+}
+
+static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err)
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
+ fsl_mc_free_irqs(sw_dev);
+}
+
+static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ enum dpsw_learning_mode learn_mode;
+ int err;
+
+ if (enable)
+ learn_mode = DPSW_LEARNING_MODE_HW;
+ else
+ learn_mode = DPSW_LEARNING_MODE_DIS;
+
+ err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, learn_mode);
+ if (err)
+ netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
+
+ if (!enable)
+ dpaa2_switch_port_fast_age(port_priv);
+
+ return err;
+}
+
+static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
+ u8 state)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpaa2_switch_port_set_stp_state(port_priv, state);
+ if (err)
+ return err;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ err = dpaa2_switch_port_set_learning(port_priv,
+ port_priv->learn_ena);
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
+ struct switchdev_brport_flags flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
+
+ if (flags.mask & BR_FLOOD)
+ port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
+
+ return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+}
+
+static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
+ BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ if (err)
+ return err;
+ port_priv->learn_ena = learn_ena;
+ }
+
+ if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
+ err = dpaa2_switch_port_flood(port_priv, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ if (!attr->u.vlan_filtering) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The DPAA2 switch does not support VLAN-unaware operation");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_attr *attr = &ethsw->sw_attr;
+ int err = 0;
+
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
+ if (err)
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
+ }
+
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
+}
+
+static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
+ const unsigned char *addr)
+{
+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+ list_for_each_entry(ha, &list->list, list) {
+ if (ether_addr_equal(ha->addr, addr)) {
+ netif_addr_unlock_bh(netdev);
+ return 1;
+ }
+ }
+ netif_addr_unlock_bh(netdev);
+ return 0;
+}
+
+static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ /* Check if address is already set on this port */
+ if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -EEXIST;
+
+ err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_add(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_add err %d\n", err);
+ dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_add(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_add(netdev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_add(netdev,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg;
+ int i, err;
+
+ if (!port_priv->vlans[vid])
+ return -ENOENT;
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
+ /* If we are deleting the PVID of a port, use VLAN 4095 instead
+ * as we are sure that neither the bridge nor the 8021q module
+ * will use it
+ */
+ err = dpaa2_switch_port_set_pvid(port_priv, 4095);
+ if (err)
+ return err;
+ }
+
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
+ /* Delete VLAN from switch if it is no longer configured on
+ * any port
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ if (ethsw->ports[i] &&
+ ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ return 0; /* Found a port member in VID */
+ }
+
+ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
+ err = dpaa2_switch_dellink(ethsw, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ if (netif_is_bridge_master(vlan->obj.orig_dev))
+ return -EOPNOTSUPP;
+
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
+}
+
+static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -ENOENT;
+
+ err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_del(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_del err %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_del(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *ptr)
+{
+ int err;
+
+ err = switchdev_handle_port_attr_set(netdev, ptr,
+ dpaa2_switch_port_dev_check,
+ dpaa2_switch_port_attr_set);
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
+ bool learn_ena;
+ int err;
+
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
+ return -EINVAL;
+ }
+ }
+
+ /* Delete the previously manually installed VLAN 1 */
+ err = dpaa2_switch_port_del_vlan(port_priv, 1);
+ if (err)
+ return err;
+
+ dpaa2_switch_port_set_fdb(port_priv, upper_dev);
+
+ /* Inherit the initial bridge port learning state */
+ learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ port_priv->learn_ena = learn_ena;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast) */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
+ err = switchdev_bridge_port_offload(netdev, netdev, NULL,
+ NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
+ return 0;
+
+err_switchdev_offload:
+err_egress_flood:
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+ return err;
+}
+
+static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
+}
+
+static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
+}
+
+static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
+{
+ switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
+}
+
+static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ /* First of all, fast age any learn FDB addresses on this switch port */
+ dpaa2_switch_port_fast_age(port_priv);
+
+ /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
+ * upper devices or otherwise from the FDB table that we are about to
+ * leave
+ */
+ err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
+
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+
+ /* Restore all RX VLANs into the new FDB table that we just joined */
+ err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
+
+ /* Reset the flooding state to denote that this port can send any
+ * packet in standalone mode. With this, we are also ensuring that any
+ * later bridge join will have the flooding flag on.
+ */
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast).
+ * When the port is not under a bridge, only the CTRL interface is part
+ * of the flooding domain besides the actual port
+ */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Recreate the egress flood domain of the FDB that we just left */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* No HW learning when not under a bridge */
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ return err;
+ port_priv->learn_ena = false;
+
+ /* Add the VLAN 1 as PVID when not under a bridge. We need this since
+ * the dpaa2 switch interfaces are not capable to be VLAN unaware
+ */
+ return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
+ BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
+}
+
+static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ /* RCU read lock not necessary because we have write-side protection
+ * (rtnl_mutex), however a non-rcu iterator does not exist.
+ */
+ netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
+ if (is_vlan_dev(upper_dev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!br_vlan_enabled(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot join a bridge while VLAN uppers are present");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ int err = 0;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ break;
+
+ err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
+ upper_dev,
+ extack);
+ if (err)
+ goto out;
+
+ if (!info->linking)
+ dpaa2_switch_port_pre_bridge_leave(netdev);
+
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
+ else
+ err = dpaa2_switch_port_bridge_leave(netdev);
+ }
+ break;
+ }
+
+out:
+ return notifier_from_errno(err);
+}
+
+struct ethsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void dpaa2_switch_event_work(struct work_struct *work)
+{
+ struct ethsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct ethsw_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
+
+ rtnl_lock();
+ fdb_info = &switchdev_work->fdb_info;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
+ fdb_info->addr);
+ else
+ err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
+ fdb_info->addr);
+ if (err)
+ break;
+ fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
+ &fdb_info->info, NULL);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int dpaa2_switch_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+ struct ethsw_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+
+ if (!dpaa2_switch_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+
+ /* Take a reference on the device to avoid being freed. */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(ethsw->workqueue, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int dpaa2_switch_port_obj_event(unsigned long event,
+ struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return dpaa2_switch_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct device *dev = ethsw->dev;
+ struct sk_buff *skb = NULL;
+ void *fd_vaddr;
+
+ fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
+ dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ dev_err(dev, "build_skb() failed\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ ethsw->buf_count--;
+
+ return skb;
+}
+
+static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ dpaa2_switch_free_fd(fq->ethsw, fd);
+}
+
+static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ struct ethsw_port_priv *port_priv;
+ struct net_device *netdev;
+ struct vlan_ethhdr *hdr;
+ struct sk_buff *skb;
+ u16 vlan_tci, vid;
+ int if_id, err;
+
+ /* get switch ingress interface ID */
+ if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
+
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "Frame received from unknown interface!\n");
+ goto err_free_fd;
+ }
+ port_priv = ethsw->ports[if_id];
+ netdev = port_priv->netdev;
+
+ /* build the SKB based on the FD received */
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
+ if (net_ratelimit()) {
+ netdev_err(netdev, "Received invalid frame format\n");
+ goto err_free_fd;
+ }
+ }
+
+ skb = dpaa2_switch_build_linear_skb(ethsw, fd);
+ if (unlikely(!skb))
+ goto err_free_fd;
+
+ skb_reset_mac_header(skb);
+
+ /* Remove the VLAN header if the packet that we just received has a vid
+ * equal to the port PVIDs. Since the dpaa2-switch can operate only in
+ * VLAN-aware mode and no alterations are made on the packet when it's
+ * redirected/mirrored to the control interface, we are sure that there
+ * will always be a VLAN header present.
+ */
+ hdr = vlan_eth_hdr(skb);
+ vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
+ if (vid == port_priv->pvid) {
+ err = __skb_vlan_pop(skb, &vlan_tci);
+ if (err) {
+ dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
+ goto err_free_fd;
+ }
+ }
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ /* Setup the offload_fwd_mark only if the port is under a bridge */
+ skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
+
+ netif_receive_skb(skb);
+
+ return;
+
+err_free_fd:
+ dpaa2_switch_free_fd(ethsw, fd);
+}
+
+static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
+{
+ ethsw->features = 0;
+
+ if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
+ ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
+}
+
+static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_attr ctrl_if_attr;
+ struct device *dev = ethsw->dev;
+ int i = 0;
+ int err;
+
+ err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ctrl_if_attr);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
+ return err;
+ }
+
+ ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_RX;
+
+ ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
+
+ return 0;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
+{
+ struct device *dev = ethsw->dev;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
+ dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
+{
+ struct device *dev = ethsw->dev;
+ u64 buf_array[BUFS_PER_CMD];
+ struct page *page;
+ int retries = 0;
+ dma_addr_t addr;
+ int err;
+ int i;
+
+ for (i = 0; i < BUFS_PER_CMD; i++) {
+ /* Allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page) {
+ dev_err(dev, "buffer allocation failed\n");
+ goto err_alloc;
+ }
+
+ addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ dev_err(dev, "dma_map_single() failed\n");
+ goto err_map;
+ }
+ buf_array[i] = addr;
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful or
+ * max retries hit.
+ */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
+ break;
+
+ cpu_relax();
+ }
+
+ /* If release command failed, clean up and bail out. */
+ if (err) {
+ dpaa2_switch_free_bufs(ethsw, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ __free_pages(page, 0);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
+{
+ int *count = &ethsw->buf_count;
+ int new_count;
+ int err = 0;
+
+ if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
+ do {
+ new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll
+ * try later on
+ */
+ break;
+ }
+ *count += new_count;
+ } while (*count < DPAA2_ETHSW_NUM_BUFS);
+
+ if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
+{
+ int *count, i;
+
+ for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ count = &ethsw->buf_count;
+ *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+
+ if (unlikely(*count < BUFS_PER_CMD))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
+{
+ u64 buf_array[BUFS_PER_CMD];
+ int ret;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
+ buf_array, BUFS_PER_CMD);
+ if (ret < 0) {
+ dev_err(ethsw->dev,
+ "dpaa2_io_service_acquire() = %d\n", ret);
+ return;
+ }
+ dpaa2_switch_free_bufs(ethsw, buf_array, ret);
+
+ } while (ret);
+}
+
+static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
+ struct device *dev = ethsw->dev;
+ struct fsl_mc_device *dpbp_dev;
+ struct dpbp_attr dpbp_attrs;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+ ethsw->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
+ &dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
+ dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
+ dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
+ dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
+
+ err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &dpsw_ctrl_if_pools_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+ ethsw->bpid = dpbp_attrs.id;
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+ dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+ return err;
+}
+
+static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
+{
+ dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ fsl_mc_object_free(ethsw->dpbp_dev);
+}
+
+static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ ethsw->fq[i].store =
+ dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
+ ethsw->dev);
+ if (!ethsw->fq[i].store) {
+ dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
+ while (--i >= 0)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+}
+
+static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
+{
+ int err, retries = 0;
+
+ /* Try to pull from the FQ while the portal is busy and we didn't hit
+ * the maximum number fo retries
+ */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ if (unlikely(err))
+ dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+/* Consume all frames pull-dequeued into the store */
+static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ int cleaned = 0, is_last;
+ struct dpaa2_dq *dq;
+ int retries = 0;
+
+ do {
+ /* Get the next available FD from the store */
+ dq = dpaa2_io_store_next(fq->store, &is_last);
+ if (unlikely(!dq)) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
+ dev_err_once(ethsw->dev,
+ "No valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ if (fq->type == DPSW_QUEUE_RX)
+ dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
+ else
+ dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
+ cleaned++;
+
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* NAPI poll routine */
+static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
+{
+ int err, cleaned = 0, store_cleaned, work_done;
+ struct dpaa2_switch_fq *fq;
+ int retries = 0;
+
+ fq = container_of(napi, struct dpaa2_switch_fq, napi);
+
+ do {
+ err = dpaa2_switch_pull_fq(fq);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ dpaa2_switch_refill_bp(fq->ethsw);
+
+ store_cleaned = dpaa2_switch_store_consume(fq);
+ cleaned += store_cleaned;
+
+ if (cleaned >= budget) {
+ work_done = budget;
+ goto out;
+ }
+
+ } while (store_cleaned);
+
+ /* We didn't consume the entire budget, so finish napi and re-enable
+ * data availability notifications
+ */
+ napi_complete_done(napi, cleaned);
+ do {
+ err = dpaa2_io_service_rearm(NULL, &fq->nctx);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ work_done = max(cleaned, 1);
+out:
+
+ return work_done;
+}
+
+static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_switch_fq *fq;
+
+ fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
+
+ napi_schedule(&fq->napi);
+}
+
+static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_queue_cfg queue_cfg;
+ struct dpaa2_io_notification_ctx *nctx;
+ int err, i, j;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ nctx = &ethsw->fq[i].nctx;
+
+ /* Register a new software context for the FQID.
+ * By using NULL as the first parameter, we specify that we do
+ * not care on which cpu are interrupts received for this queue
+ */
+ nctx->is_cdan = 0;
+ nctx->id = ethsw->fq[i].fqid;
+ nctx->desired_cpu = DPAA2_IO_ANY_CPU;
+ nctx->cb = dpaa2_switch_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
+ if (err) {
+ err = -EPROBE_DEFER;
+ goto err_register;
+ }
+
+ queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
+ DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
+ queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
+ queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
+ queue_cfg.dest_cfg.priority = 0;
+ queue_cfg.user_ctx = nctx->qman64;
+
+ err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ ethsw->fq[i].type,
+ &queue_cfg);
+ if (err)
+ goto err_set_queue;
+ }
+
+ return 0;
+
+err_set_queue:
+ dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
+err_register:
+ for (j = 0; j < i; j++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[j].nctx,
+ ethsw->dev);
+
+ return err;
+}
+
+static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[i].nctx,
+ ethsw->dev);
+}
+
+static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
+{
+ int err;
+
+ /* setup FQs for Rx and Tx Conf */
+ err = dpaa2_switch_setup_fqs(ethsw);
+ if (err)
+ return err;
+
+ /* setup the buffer pool needed on the Rx path */
+ err = dpaa2_switch_setup_dpbp(ethsw);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_alloc_rings(ethsw);
+ if (err)
+ goto err_free_dpbp;
+
+ err = dpaa2_switch_setup_dpio(ethsw);
+ if (err)
+ goto err_destroy_rings;
+
+ err = dpaa2_switch_seed_bp(ethsw);
+ if (err)
+ goto err_deregister_dpio;
+
+ err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
+ goto err_drain_dpbp;
+ }
+
+ return 0;
+
+err_drain_dpbp:
+ dpaa2_switch_drain_bp(ethsw);
+err_deregister_dpio:
+ dpaa2_switch_free_dpio(ethsw);
+err_destroy_rings:
+ dpaa2_switch_destroy_rings(ethsw);
+err_free_dpbp:
+ dpaa2_switch_free_dpbp(ethsw);
+
+ return err;
+}
+
+static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
+ u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
+
+ rtnl_lock();
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ rtnl_unlock();
+ free_netdev(port_priv->netdev);
+ ethsw->ports[port_idx] = NULL;
+}
+
+static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ struct dpsw_tci_cfg tci_cfg = {0};
+ struct dpsw_stp_cfg stp_cfg;
+ int err;
+ u16 i;
+
+ ethsw->dev_id = sw_dev->obj_desc.id;
+
+ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
+ return err;
+ }
+
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &ethsw->major,
+ &ethsw->minor);
+ if (err) {
+ dev_err(dev, "dpsw_get_api_version err %d\n", err);
+ goto err_close;
+ }
+
+ /* Minimum supported DPSW version check */
+ if (ethsw->major < DPSW_MIN_VER_MAJOR ||
+ (ethsw->major == DPSW_MIN_VER_MAJOR &&
+ ethsw->minor < DPSW_MIN_VER_MINOR)) {
+ dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
+ ethsw->major, ethsw->minor);
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ dpaa2_switch_detect_features(ethsw);
+
+ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
+ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
+ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
+ if (err) {
+ dev_err(dev, "dpsw_if_disable err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ err, i);
+ goto err_close;
+ }
+
+ /* Switch starts with all ports configured to VLAN 1. Need to
+ * remove this setting to allow configuration at bridge join
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = i;
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ goto err_close;
+ }
+
+ tci_cfg.vlan_id = 4095;
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_tci err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
+ goto err_close;
+ }
+ }
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove err %d\n", err);
+ goto err_close;
+ }
+
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
+ err = -ENOMEM;
+ goto err_close;
+ }
+
+ err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ err = dpaa2_switch_ctrl_if_setup(ethsw);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ return 0;
+
+err_destroy_ordered_workqueue:
+ destroy_workqueue(ethsw->workqueue);
+
+err_close:
+ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
+/* Add an ACL to redirect frames with specific destination MAC address to
+ * control interface
+ */
+static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
+ const char *mac)
+{
+ struct dpaa2_switch_acl_entry acl_entry = {0};
+
+ /* Match on the destination MAC address */
+ ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
+ eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
+
+ /* Trap to CPU */
+ acl_entry.cfg.precedence = 0;
+ acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+
+ return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
+}
+
+static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
+ const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = DEFAULT_VLAN_ID,
+ .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
+ };
+ struct net_device *netdev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpsw_fdb_cfg fdb_cfg = {0};
+ struct dpsw_if_attr dpsw_if_attr;
+ struct dpaa2_switch_fdb *fdb;
+ struct dpsw_acl_cfg acl_cfg;
+ u16 fdb_id, acl_tbl_id;
+ int err;
+
+ /* Get the Tx queue for this specific port */
+ err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &dpsw_if_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
+ return err;
+ }
+ port_priv->tx_qdid = dpsw_if_attr.qdid;
+
+ /* Create a FDB table for this particular switch port */
+ fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
+ err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &fdb_id, &fdb_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
+ return err;
+ }
+
+ /* Find an unused dpaa2_switch_fdb structure and use it */
+ fdb = dpaa2_switch_fdb_get_unused(ethsw);
+ fdb->fdb_id = fdb_id;
+ fdb->in_use = true;
+ fdb->bridge_dev = NULL;
+ port_priv->fdb = fdb;
+
+ /* We need to add VLAN 1 as the PVID on this port until it is under a
+ * bridge since the DPAA2 switch is not able to handle the traffic in a
+ * VLAN unaware fashion
+ */
+ err = dpaa2_switch_port_vlans_add(netdev, &vlan);
+ if (err)
+ return err;
+
+ /* Setup the egress flooding domains (broadcast, unknown unicast */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Create an ACL table to be used by this switch port */
+ acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
+ err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &acl_tbl_id, &acl_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add err %d\n", err);
+ return err;
+ }
+
+ filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ filter_block->ethsw = ethsw;
+ filter_block->acl_id = acl_tbl_id;
+ filter_block->in_use = true;
+ filter_block->num_acl_rules = 0;
+ INIT_LIST_HEAD(&filter_block->acl_entries);
+ INIT_LIST_HEAD(&filter_block->mirror_entries);
+
+ err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ destroy_workqueue(ethsw->workqueue);
+
+ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err)
+ dev_warn(dev, "dpsw_close err %d\n", err);
+}
+
+static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
+{
+ struct ethsw_port_priv *port_priv;
+ struct ethsw_core *ethsw;
+ struct device *dev;
+ int i;
+
+ dev = &sw_dev->dev;
+ ethsw = dev_get_drvdata(dev);
+
+ dpaa2_switch_teardown_irqs(sw_dev);
+
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ dpaa2_switch_remove_port(ethsw, i);
+ }
+
+ kfree(ethsw->fdbs);
+ kfree(ethsw->filter_blocks);
+ kfree(ethsw->ports);
+
+ dpaa2_switch_teardown(sw_dev);
+
+ fsl_mc_portal_free(ethsw->mc_io);
+
+ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv;
+ struct device *dev = ethsw->dev;
+ struct net_device *port_netdev;
+ int err;
+
+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
+ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
+
+ port_priv = netdev_priv(port_netdev);
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+ SET_NETDEV_DEV(port_netdev, dev);
+ port_netdev->netdev_ops = &dpaa2_switch_port_ops;
+ port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
+
+ port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
+
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Set MTU limits */
+ port_netdev->min_mtu = ETH_MIN_MTU;
+ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
+ /* Populate the private port structure so that later calls to
+ * dpaa2_switch_port_init() can use it.
+ */
+ ethsw->ports[port_idx] = port_priv;
+
+ /* The DPAA2 switch's ingress path depends on the VLAN table,
+ * thus we are not able to disable VLAN filtering.
+ */
+ port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_FILTER |
+ NETIF_F_HW_TC;
+
+ err = dpaa2_switch_port_init(port_priv, port_idx);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_mac_addr(port_priv);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ goto err_port_probe;
+ port_priv->learn_ena = false;
+
+ err = dpaa2_switch_port_connect_mac(port_priv);
+ if (err)
+ goto err_port_probe;
+
+ return 0;
+
+err_port_probe:
+ free_netdev(port_netdev);
+ ethsw->ports[port_idx] = NULL;
+
+ return err;
+}
+
+static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw;
+ int i, err;
+
+ /* Allocate switch core*/
+ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
+ if (!ethsw)
+ return -ENOMEM;
+
+ ethsw->dev = dev;
+ ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
+ dev_set_drvdata(dev, ethsw);
+
+ err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &ethsw->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_drvdata;
+ }
+
+ err = dpaa2_switch_init(sw_dev);
+ if (err)
+ goto err_free_cmdport;
+
+ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
+ GFP_KERNEL);
+ if (!(ethsw->ports)) {
+ err = -ENOMEM;
+ goto err_teardown;
+ }
+
+ ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
+ GFP_KERNEL);
+ if (!ethsw->fdbs) {
+ err = -ENOMEM;
+ goto err_free_ports;
+ }
+
+ ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
+ sizeof(*ethsw->filter_blocks),
+ GFP_KERNEL);
+ if (!ethsw->filter_blocks) {
+ err = -ENOMEM;
+ goto err_free_fdbs;
+ }
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpaa2_switch_probe_port(ethsw, i);
+ if (err)
+ goto err_free_netdev;
+ }
+
+ /* Add a NAPI instance for each of the Rx queues. The first port's
+ * net_device will be associated with the instances since we do not have
+ * different queues for each switch ports.
+ */
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
+
+ /* Setup IRQs */
+ err = dpaa2_switch_setup_irqs(sw_dev);
+ if (err)
+ goto err_stop;
+
+ /* By convention, if the mirror port is equal to the number of switch
+ * interfaces, then mirroring of any kind is disabled.
+ */
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ /* Register the netdev only when the entire setup is done and the
+ * switch port interfaces are ready to receive traffic
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = register_netdev(ethsw->ports[i]->netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ goto err_unregister_ports;
+ }
+ }
+
+ return 0;
+
+err_unregister_ports:
+ for (i--; i >= 0; i--)
+ unregister_netdev(ethsw->ports[i]->netdev);
+ dpaa2_switch_teardown_irqs(sw_dev);
+err_stop:
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+err_free_netdev:
+ for (i--; i >= 0; i--)
+ dpaa2_switch_remove_port(ethsw, i);
+ kfree(ethsw->filter_blocks);
+err_free_fdbs:
+ kfree(ethsw->fdbs);
+err_free_ports:
+ kfree(ethsw->ports);
+
+err_teardown:
+ dpaa2_switch_teardown(sw_dev);
+
+err_free_cmdport:
+ fsl_mc_portal_free(ethsw->mc_io);
+
+err_free_drvdata:
+ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
+
+ return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
+
+static struct fsl_mc_driver dpaa2_switch_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_switch_probe,
+ .remove = dpaa2_switch_remove,
+ .match_id_table = dpaa2_switch_match_id_table
+};
+
+static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
+ .notifier_call = dpaa2_switch_port_netdevice_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_nb = {
+ .notifier_call = dpaa2_switch_port_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
+ .notifier_call = dpaa2_switch_port_blocking_event,
+};
+
+static int dpaa2_switch_register_notifiers(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
+ return err;
+ }
+
+ err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
+ goto err_switchdev_nb;
+ }
+
+ err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
+ goto err_switchdev_blocking_nb;
+ }
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+
+ return err;
+}
+
+static void dpaa2_switch_unregister_notifiers(void)
+{
+ int err;
+
+ err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
+ err);
+
+ err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
+
+ err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
+}
+
+static int __init dpaa2_switch_driver_init(void)
+{
+ int err;
+
+ err = fsl_mc_driver_register(&dpaa2_switch_drv);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_register_notifiers();
+ if (err) {
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_switch_driver_exit(void)
+{
+ dpaa2_switch_unregister_notifiers();
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+}
+
+module_init(dpaa2_switch_driver_init);
+module_exit(dpaa2_switch_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
new file mode 100644
index 000000000..0002dca4d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DPAA2 Ethernet Switch declarations
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __ETHSW_H
+#define __ETHSW_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <uapi/linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/fsl/mc.h>
+#include <net/pkt_cls.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "dpaa2-mac.h"
+#include "dpsw.h"
+
+/* Number of IRQs supported */
+#define DPSW_IRQ_NUM 2
+
+/* Port is member of VLAN */
+#define ETHSW_VLAN_MEMBER 1
+/* VLAN to be treated as untagged on egress */
+#define ETHSW_VLAN_UNTAGGED 2
+/* Untagged frames will be assigned to this VLAN */
+#define ETHSW_VLAN_PVID 4
+/* VLAN configured on the switch */
+#define ETHSW_VLAN_GLOBAL 8
+
+/* Maximum Frame Length supported by HW (currently 10k) */
+#define DPAA2_MFL (10 * 1024)
+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+
+#define ETHSW_FEATURE_MAC_ADDR BIT(0)
+
+/* Number of receive queues (one RX and one TX_CONF) */
+#define DPAA2_SWITCH_RX_NUM_FQS 2
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_SWITCH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_SWITCH_RX_BUF_SIZE \
+ (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
+
+#define DPAA2_SWITCH_STORE_SIZE 16
+
+/* Buffer management */
+#define BUFS_PER_CMD 7
+#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD)
+#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6)
+
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000
+
+/* Hardware annotation buffer size */
+#define DPAA2_SWITCH_HWA_SIZE 64
+/* Software annotation buffer size */
+#define DPAA2_SWITCH_SWA_SIZE 64
+
+#define DPAA2_SWITCH_TX_BUF_ALIGN 64
+
+#define DPAA2_SWITCH_TX_DATA_OFFSET \
+ (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
+
+#define DPAA2_SWITCH_NEEDED_HEADROOM \
+ (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
+
+#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16
+#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1
+
+#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256
+
+extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
+
+struct ethsw_core;
+
+struct dpaa2_switch_fq {
+ struct ethsw_core *ethsw;
+ enum dpsw_queue_type type;
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+ struct napi_struct napi;
+ u32 fqid;
+};
+
+struct dpaa2_switch_fdb {
+ struct net_device *bridge_dev;
+ u16 fdb_id;
+ bool in_use;
+};
+
+struct dpaa2_switch_acl_entry {
+ struct list_head list;
+ u16 prio;
+ unsigned long cookie;
+
+ struct dpsw_acl_entry_cfg cfg;
+ struct dpsw_acl_key key;
+};
+
+struct dpaa2_switch_mirror_entry {
+ struct list_head list;
+ struct dpsw_reflection_cfg cfg;
+ unsigned long cookie;
+ u16 if_id;
+};
+
+struct dpaa2_switch_filter_block {
+ struct ethsw_core *ethsw;
+ u64 ports;
+ bool in_use;
+
+ struct list_head acl_entries;
+ u16 acl_id;
+ u8 num_acl_rules;
+
+ struct list_head mirror_entries;
+};
+
+static inline bool
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
+{
+ if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+ DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
+ return true;
+ return false;
+}
+
+/* Per port private data */
+struct ethsw_port_priv {
+ struct net_device *netdev;
+ u16 idx;
+ struct ethsw_core *ethsw_data;
+ u8 link_state;
+ u8 stp_state;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+ u16 pvid;
+ u16 tx_qdid;
+
+ struct dpaa2_switch_fdb *fdb;
+ bool bcast_flood;
+ bool ucast_flood;
+ bool learn_ena;
+
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpaa2_mac *mac;
+};
+
+/* Switch data */
+struct ethsw_core {
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ u16 dpsw_handle;
+ struct dpsw_attr sw_attr;
+ u16 major, minor;
+ unsigned long features;
+ int dev_id;
+ struct ethsw_port_priv **ports;
+ struct iommu_domain *iommu_domain;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+
+ struct workqueue_struct *workqueue;
+
+ struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS];
+ struct fsl_mc_device *dpbp_dev;
+ int buf_count;
+ u16 bpid;
+ int napi_users;
+
+ struct dpaa2_switch_fdb *fdbs;
+ struct dpaa2_switch_filter_block *filter_blocks;
+ u16 mirror_port;
+};
+
+static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
+ struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->netdev == netdev)
+ return ethsw->ports[i]->idx;
+
+ return -EINVAL;
+}
+
+static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
+{
+ if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
+ dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
+ dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
+ dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool
+dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
+{
+ if (port_priv->mac &&
+ (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->mac ? true : false;
+}
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data);
+
+/* TC offload */
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry);
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
+#endif /* __ETHSW_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
new file mode 100644
index 000000000..6f596a5fb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
@@ -0,0 +1,481 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <linux/types.h>
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * DPKG_NUM_OF_MASKS - Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+
+/**
+ * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ u8 mask;
+ u8 offset;
+};
+
+/* Protocol fields */
+
+/* Ethernet fields */
+#define NH_FLD_ETH_DA BIT(0)
+#define NH_FLD_ETH_SA BIT(1)
+#define NH_FLD_ETH_LENGTH BIT(2)
+#define NH_FLD_ETH_TYPE BIT(3)
+#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
+#define NH_FLD_ETH_PADDING BIT(5)
+#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
+
+/* VLAN fields */
+#define NH_FLD_VLAN_VPRI BIT(0)
+#define NH_FLD_VLAN_CFI BIT(1)
+#define NH_FLD_VLAN_VID BIT(2)
+#define NH_FLD_VLAN_LENGTH BIT(3)
+#define NH_FLD_VLAN_TYPE BIT(4)
+#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/* IP (generic) fields */
+#define NH_FLD_IP_VER BIT(0)
+#define NH_FLD_IP_DSCP BIT(2)
+#define NH_FLD_IP_ECN BIT(3)
+#define NH_FLD_IP_PROTO BIT(4)
+#define NH_FLD_IP_SRC BIT(5)
+#define NH_FLD_IP_DST BIT(6)
+#define NH_FLD_IP_TOS_TC BIT(7)
+#define NH_FLD_IP_ID BIT(8)
+#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
+
+/* IPV4 fields */
+#define NH_FLD_IPV4_VER BIT(0)
+#define NH_FLD_IPV4_HDR_LEN BIT(1)
+#define NH_FLD_IPV4_TOS BIT(2)
+#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
+#define NH_FLD_IPV4_ID BIT(4)
+#define NH_FLD_IPV4_FLAG_D BIT(5)
+#define NH_FLD_IPV4_FLAG_M BIT(6)
+#define NH_FLD_IPV4_OFFSET BIT(7)
+#define NH_FLD_IPV4_TTL BIT(8)
+#define NH_FLD_IPV4_PROTO BIT(9)
+#define NH_FLD_IPV4_CKSUM BIT(10)
+#define NH_FLD_IPV4_SRC_IP BIT(11)
+#define NH_FLD_IPV4_DST_IP BIT(12)
+#define NH_FLD_IPV4_OPTS BIT(13)
+#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
+#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
+
+/* IPV6 fields */
+#define NH_FLD_IPV6_VER BIT(0)
+#define NH_FLD_IPV6_TC BIT(1)
+#define NH_FLD_IPV6_SRC_IP BIT(2)
+#define NH_FLD_IPV6_DST_IP BIT(3)
+#define NH_FLD_IPV6_NEXT_HDR BIT(4)
+#define NH_FLD_IPV6_FL BIT(5)
+#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
+#define NH_FLD_IPV6_ID BIT(7)
+#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
+
+/* ICMP fields */
+#define NH_FLD_ICMP_TYPE BIT(0)
+#define NH_FLD_ICMP_CODE BIT(1)
+#define NH_FLD_ICMP_CKSUM BIT(2)
+#define NH_FLD_ICMP_ID BIT(3)
+#define NH_FLD_ICMP_SQ_NUM BIT(4)
+#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
+
+/* IGMP fields */
+#define NH_FLD_IGMP_VERSION BIT(0)
+#define NH_FLD_IGMP_TYPE BIT(1)
+#define NH_FLD_IGMP_CKSUM BIT(2)
+#define NH_FLD_IGMP_DATA BIT(3)
+#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
+
+/* TCP fields */
+#define NH_FLD_TCP_PORT_SRC BIT(0)
+#define NH_FLD_TCP_PORT_DST BIT(1)
+#define NH_FLD_TCP_SEQ BIT(2)
+#define NH_FLD_TCP_ACK BIT(3)
+#define NH_FLD_TCP_OFFSET BIT(4)
+#define NH_FLD_TCP_FLAGS BIT(5)
+#define NH_FLD_TCP_WINDOW BIT(6)
+#define NH_FLD_TCP_CKSUM BIT(7)
+#define NH_FLD_TCP_URGPTR BIT(8)
+#define NH_FLD_TCP_OPTS BIT(9)
+#define NH_FLD_TCP_OPTS_COUNT BIT(10)
+#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
+
+/* UDP fields */
+#define NH_FLD_UDP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_PORT_DST BIT(1)
+#define NH_FLD_UDP_LEN BIT(2)
+#define NH_FLD_UDP_CKSUM BIT(3)
+#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
+
+/* UDP-lite fields */
+#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
+#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
+
+/* UDP-encap-ESP fields */
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
+#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
+#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_PORT_SRC BIT(0)
+#define NH_FLD_SCTP_PORT_DST BIT(1)
+#define NH_FLD_SCTP_VER_TAG BIT(2)
+#define NH_FLD_SCTP_CKSUM BIT(3)
+#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
+
+/* DCCP fields */
+#define NH_FLD_DCCP_PORT_SRC BIT(0)
+#define NH_FLD_DCCP_PORT_DST BIT(1)
+#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
+
+/* IPHC fields */
+#define NH_FLD_IPHC_CID BIT(0)
+#define NH_FLD_IPHC_CID_TYPE BIT(1)
+#define NH_FLD_IPHC_HCINDEX BIT(2)
+#define NH_FLD_IPHC_GEN BIT(3)
+#define NH_FLD_IPHC_D_BIT BIT(4)
+#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
+#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
+
+/* L2TPV2 fields */
+#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
+#define NH_FLD_L2TPV2_VERSION BIT(5)
+#define NH_FLD_L2TPV2_LEN BIT(6)
+#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
+#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
+#define NH_FLD_L2TPV2_NS BIT(9)
+#define NH_FLD_L2TPV2_NR BIT(10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
+#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
+#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
+
+/* L2TPV3 fields */
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
+#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
+#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
+#define NH_FLD_L2TPV3_SESS_ID BIT(2)
+#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
+
+/* PPP fields */
+#define NH_FLD_PPP_PID BIT(0)
+#define NH_FLD_PPP_COMPRESSED BIT(1)
+#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
+
+/* PPPoE fields */
+#define NH_FLD_PPPOE_VER BIT(0)
+#define NH_FLD_PPPOE_TYPE BIT(1)
+#define NH_FLD_PPPOE_CODE BIT(2)
+#define NH_FLD_PPPOE_SID BIT(3)
+#define NH_FLD_PPPOE_LEN BIT(4)
+#define NH_FLD_PPPOE_SESSION BIT(5)
+#define NH_FLD_PPPOE_PID BIT(6)
+#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
+
+/* PPP-Mux fields */
+#define NH_FLD_PPPMUX_PID BIT(0)
+#define NH_FLD_PPPMUX_CKSUM BIT(1)
+#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
+#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
+
+/* PPP-Mux sub-frame fields */
+#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
+#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
+#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
+
+/* LLC fields */
+#define NH_FLD_LLC_DSAP BIT(0)
+#define NH_FLD_LLC_SSAP BIT(1)
+#define NH_FLD_LLC_CTRL BIT(2)
+#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
+
+/* NLPID fields */
+#define NH_FLD_NLPID_NLPID BIT(0)
+#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
+
+/* SNAP fields */
+#define NH_FLD_SNAP_OUI BIT(0)
+#define NH_FLD_SNAP_PID BIT(1)
+#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
+
+/* LLC SNAP fields */
+#define NH_FLD_LLC_SNAP_TYPE BIT(0)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
+
+/* ARP fields */
+#define NH_FLD_ARP_HTYPE BIT(0)
+#define NH_FLD_ARP_PTYPE BIT(1)
+#define NH_FLD_ARP_HLEN BIT(2)
+#define NH_FLD_ARP_PLEN BIT(3)
+#define NH_FLD_ARP_OPER BIT(4)
+#define NH_FLD_ARP_SHA BIT(5)
+#define NH_FLD_ARP_SPA BIT(6)
+#define NH_FLD_ARP_THA BIT(7)
+#define NH_FLD_ARP_TPA BIT(8)
+#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
+
+/* RFC2684 fields */
+#define NH_FLD_RFC2684_LLC BIT(0)
+#define NH_FLD_RFC2684_NLPID BIT(1)
+#define NH_FLD_RFC2684_OUI BIT(2)
+#define NH_FLD_RFC2684_PID BIT(3)
+#define NH_FLD_RFC2684_VPN_OUI BIT(4)
+#define NH_FLD_RFC2684_VPN_IDX BIT(5)
+#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
+
+/* User defined fields */
+#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
+#define NH_FLD_USER_DEFINED_PCDID BIT(1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
+
+/* Payload fields */
+#define NH_FLD_PAYLOAD_BUFFER BIT(0)
+#define NH_FLD_PAYLOAD_SIZE BIT(1)
+#define NH_FLD_MAX_FRM_SIZE BIT(2)
+#define NH_FLD_MIN_FRM_SIZE BIT(3)
+#define NH_FLD_PAYLOAD_TYPE BIT(4)
+#define NH_FLD_FRAME_SIZE BIT(5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
+
+/* GRE fields */
+#define NH_FLD_GRE_TYPE BIT(0)
+#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
+
+/* MINENCAP fields */
+#define NH_FLD_MINENCAP_SRC_IP BIT(0)
+#define NH_FLD_MINENCAP_DST_IP BIT(1)
+#define NH_FLD_MINENCAP_TYPE BIT(2)
+#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
+
+/* IPSEC AH fields */
+#define NH_FLD_IPSEC_AH_SPI BIT(0)
+#define NH_FLD_IPSEC_AH_NH BIT(1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
+
+/* IPSEC ESP fields */
+#define NH_FLD_IPSEC_ESP_SPI BIT(0)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
+
+/* MPLS fields */
+#define NH_FLD_MPLS_LABEL_STACK BIT(0)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
+
+/* MACSEC fields */
+#define NH_FLD_MACSEC_SECTAG BIT(0)
+#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
+
+/* GTP fields */
+#define NH_FLD_GTP_TEID BIT(0)
+
+/* Supported protocols */
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @extract.from_hdr.prot: Any of the supported headers
+ * @extract.from_hdr.type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @extract.from_hdr.field: One of the supported fields (NH_FLD_)
+ * @extract.from_hdr.size: Size in bytes
+ * @extract.from_hdr.offset: Byte offset
+ * @extract.from_hdr.hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ * @extract.from_data.size: Size in bytes
+ * @extract.from_data.offset: Byte offset
+ * @extract.from_parse.size: Size in bytes
+ * @extract.from_parse.offset: Byte offset
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ union {
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ u32 field;
+ u8 size;
+ u8 offset;
+ u8 hdr_index;
+ } from_hdr;
+ struct {
+ u8 size;
+ u8 offset;
+ } from_data;
+ struct {
+ u8 size;
+ u8 offset;
+ } from_parse;
+ } extract;
+
+ u8 num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ u8 num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
new file mode 100644
index 000000000..e9ac2ecef
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 4
+#define DPMAC_CMD_BASE_VERSION 1
+#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
+
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field) \
+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+ DPMAC_##field##_SHIFT)
+
+#define dpmac_set_field(var, field, val) \
+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field) \
+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+ __le32 dpmac_id;
+};
+
+struct dpmac_rsp_get_attributes {
+ u8 eth_if;
+ u8 link_type;
+ __le16 id;
+ __le32 max_rate;
+};
+
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ __le64 options;
+ __le32 rate;
+ __le32 pad0;
+ /* from lsb: up:1, state_valid:1 */
+ u8 state;
+ u8 pad1[7];
+ __le64 supported;
+ __le64 advertising;
+};
+
+struct dpmac_cmd_get_counter {
+ u8 id;
+};
+
+struct dpmac_rsp_get_counter {
+ __le64 pad;
+ __le64 counter;
+};
+
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
new file mode 100644
index 000000000..f440a4c3b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id: DPMAC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+ attr->eth_if = rsp_params->eth_if;
+ attr->link_type = rsp_params->link_type;
+ attr->id = le16_to_cpu(rsp_params->id);
+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
+ dpmac_set_field(cmd_params->state, STATE_VALID,
+ link_state->state_valid);
+ cmd_params->supported = cpu_to_le64(link_state->supported);
+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_counter() - Read a specific DPMAC counter
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @id: The requested counter ID
+ * @value: Returned counter value
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value)
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
+ dpmac_cmd->id = id;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
+ *value = le64_to_cpu(dpmac_rsp->counter);
+
+ return 0;
+}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
new file mode 100644
index 000000000..17488819e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpmac_link_type - DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+ DPMAC_LINK_TYPE_NONE,
+ DPMAC_LINK_TYPE_FIXED,
+ DPMAC_LINK_TYPE_PHY,
+ DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ * @DPMAC_ETH_IF_CAUI: CAUI interface
+ * @DPMAC_ETH_IF_1000BASEX: 1000BASEX interface
+ * @DPMAC_ETH_IF_USXGMII: USXGMII interface
+ */
+enum dpmac_eth_if {
+ DPMAC_ETH_IF_MII,
+ DPMAC_ETH_IF_RMII,
+ DPMAC_ETH_IF_SMII,
+ DPMAC_ETH_IF_GMII,
+ DPMAC_ETH_IF_RGMII,
+ DPMAC_ETH_IF_SGMII,
+ DPMAC_ETH_IF_QSGMII,
+ DPMAC_ETH_IF_XAUI,
+ DPMAC_ETH_IF_XFI,
+ DPMAC_ETH_IF_CAUI,
+ DPMAC_ETH_IF_1000BASEX,
+ DPMAC_ETH_IF_USXGMII,
+};
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id: DPMAC object ID
+ * @max_rate: Maximum supported rate - in Mbps
+ * @eth_if: Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+ u16 id;
+ u32 max_rate;
+ enum dpmac_eth_if eth_if;
+ enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr);
+
+/* DPMAC link configuration/state options */
+
+#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
+#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
+#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
+#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
+
+/* Advertised link speeds */
+#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
+#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
+#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
+#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
+#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
+
+/* Advertise auto-negotiation enable */
+#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+ int state_valid;
+ u64 supported;
+ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+/**
+ * enum dpmac_counter_id - DPMAC counter types
+ *
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
+ * (up to max frame length specified),
+ * good or bad.
+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
+ * with a wrong CRC
+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
+ * specified, with a bad frame check sequence.
+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
+ * Occurs when a receive FIFO overflows.
+ * Includes also frames truncated as a result of
+ * the receive FIFO overflow.
+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
+ * (optional used for wrong SFD).
+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
+ * bytes long with a good CRC.
+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
+ * specified, with a good frame check sequence.
+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
+ * (regular and PFC).
+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
+ * frames and valid pause frames.
+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
+ * (except for undersized/fragment frame).
+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
+ * frames and valid pause frames transmitted.
+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
+ * pause frames.
+ * @DPMAC_CNT_EGR_GOOD_FRAME: counts frames transmitted without error, including
+ * pause frames.
+ */
+enum dpmac_counter_id {
+ DPMAC_CNT_ING_FRAME_64,
+ DPMAC_CNT_ING_FRAME_127,
+ DPMAC_CNT_ING_FRAME_255,
+ DPMAC_CNT_ING_FRAME_511,
+ DPMAC_CNT_ING_FRAME_1023,
+ DPMAC_CNT_ING_FRAME_1518,
+ DPMAC_CNT_ING_FRAME_1519_MAX,
+ DPMAC_CNT_ING_FRAG,
+ DPMAC_CNT_ING_JABBER,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ DPMAC_CNT_ING_ALIGN_ERR,
+ DPMAC_CNT_EGR_UNDERSIZED,
+ DPMAC_CNT_ING_OVERSIZED,
+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
+ DPMAC_CNT_ING_BYTE,
+ DPMAC_CNT_ING_MCAST_FRAME,
+ DPMAC_CNT_ING_BCAST_FRAME,
+ DPMAC_CNT_ING_ALL_FRAME,
+ DPMAC_CNT_ING_UCAST_FRAME,
+ DPMAC_CNT_ING_ERR_FRAME,
+ DPMAC_CNT_EGR_BYTE,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ DPMAC_CNT_EGR_BCAST_FRAME,
+ DPMAC_CNT_EGR_UCAST_FRAME,
+ DPMAC_CNT_EGR_ERR_FRAME,
+ DPMAC_CNT_ING_GOOD_FRAME,
+ DPMAC_CNT_EGR_GOOD_FRAME
+};
+
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value);
+
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
+#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
new file mode 100644
index 000000000..828f53809
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+#include "dpni.h"
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
+#define DPNI_CMD_BASE_VERSION 1
+#define DPNI_CMD_2ND_VERSION 2
+#define DPNI_CMD_ID_OFFSET 4
+
+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+
+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
+#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
+#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
+#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
+#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
+#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
+
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+
+#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+
+#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field) \
+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
+ DPNI_##field##_SHIFT)
+
+#define dpni_set_field(var, field, val) \
+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
+#define dpni_get_field(var, field) \
+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
+
+struct dpni_cmd_open {
+ __le32 dpni_id;
+};
+
+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpni_cmd_set_pools {
+ /* cmd word 0 */
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ /* cmd word 0..4 */
+ __le32 dpbp_id[DPNI_MAX_DPBP];
+ /* cmd word 4..6 */
+ __le16 buffer_size[DPNI_MAX_DPBP];
+};
+
+/* The enable indication is always the least significant bit */
+#define DPNI_ENABLE_SHIFT 0
+#define DPNI_ENABLE_SIZE 1
+
+struct dpni_rsp_is_enabled {
+ u8 enabled;
+};
+
+struct dpni_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dpni_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpni_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpni_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_attr {
+ /* response word 0 */
+ __le32 options;
+ u8 num_queues;
+ u8 num_tcs;
+ u8 mac_filter_entries;
+ u8 pad0;
+ /* response word 1 */
+ u8 vlan_filter_entries;
+ u8 pad1;
+ u8 qos_entries;
+ u8 pad2;
+ __le16 fs_entries;
+ __le16 pad3;
+ /* response word 2 */
+ u8 qos_key_size;
+ u8 fs_key_size;
+ __le16 wriop_version;
+};
+
+#define DPNI_ERROR_ACTION_SHIFT 0
+#define DPNI_ERROR_ACTION_SIZE 4
+#define DPNI_FRAME_ANN_SHIFT 4
+#define DPNI_FRAME_ANN_SIZE 1
+
+struct dpni_cmd_set_errors_behavior {
+ __le32 errors;
+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
+ u8 flags;
+};
+
+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
+ * buffer layouts, but they all share the same parameters.
+ * If one of the functions changes, below structure needs to be split.
+ */
+
+#define DPNI_PASS_TS_SHIFT 0
+#define DPNI_PASS_TS_SIZE 1
+#define DPNI_PASS_PR_SHIFT 1
+#define DPNI_PASS_PR_SIZE 1
+#define DPNI_PASS_FS_SHIFT 2
+#define DPNI_PASS_FS_SIZE 1
+
+struct dpni_cmd_get_buffer_layout {
+ u8 qtype;
+};
+
+struct dpni_rsp_get_buffer_layout {
+ /* response word 0 */
+ u8 pad0[6];
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ u8 flags;
+ u8 pad1;
+ /* response word 1 */
+ __le16 private_data_size;
+ __le16 data_align;
+ __le16 head_room;
+ __le16 tail_room;
+};
+
+struct dpni_cmd_set_buffer_layout {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 pad0[3];
+ __le16 options;
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ u8 flags;
+ u8 pad1;
+ /* cmd word 1 */
+ __le16 private_data_size;
+ __le16 data_align;
+ __le16 head_room;
+ __le16 tail_room;
+};
+
+struct dpni_cmd_set_offload {
+ u8 pad[3];
+ u8 dpni_offload;
+ __le32 config;
+};
+
+struct dpni_cmd_get_offload {
+ u8 pad[3];
+ u8 dpni_offload;
+};
+
+struct dpni_rsp_get_offload {
+ __le32 pad;
+ __le32 config;
+};
+
+struct dpni_cmd_get_qdid {
+ u8 qtype;
+};
+
+struct dpni_rsp_get_qdid {
+ __le16 qdid;
+};
+
+struct dpni_rsp_get_tx_data_offset {
+ __le16 data_offset;
+};
+
+struct dpni_cmd_get_statistics {
+ u8 page_number;
+};
+
+struct dpni_rsp_get_statistics {
+ __le64 counter[DPNI_STATISTICS_CNT];
+};
+
+struct dpni_cmd_link_cfg {
+ /* cmd word 0 */
+ __le64 pad0;
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+#define DPNI_LINK_STATE_SHIFT 0
+#define DPNI_LINK_STATE_SIZE 1
+
+struct dpni_rsp_get_link_state {
+ /* response word 0 */
+ __le32 pad0;
+ /* from LSB: up:1 */
+ u8 flags;
+ u8 pad1[3];
+ /* response word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* response word 2 */
+ __le64 options;
+};
+
+struct dpni_cmd_set_max_frame_length {
+ __le16 max_frame_length;
+};
+
+struct dpni_rsp_get_max_frame_length {
+ __le16 max_frame_length;
+};
+
+struct dpni_cmd_set_multicast_promisc {
+ u8 enable;
+};
+
+struct dpni_rsp_get_multicast_promisc {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_unicast_promisc {
+ u8 enable;
+};
+
+struct dpni_rsp_get_unicast_promisc {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_primary_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_primary_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_port_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_cmd_add_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_cmd_remove_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+#define DPNI_UNICAST_FILTERS_SHIFT 0
+#define DPNI_UNICAST_FILTERS_SIZE 1
+#define DPNI_MULTICAST_FILTERS_SHIFT 1
+#define DPNI_MULTICAST_FILTERS_SIZE 1
+
+struct dpni_cmd_clear_mac_filters {
+ /* from LSB: unicast:1, multicast:1 */
+ u8 flags;
+};
+
+#define DPNI_DIST_MODE_SHIFT 0
+#define DPNI_DIST_MODE_SIZE 4
+#define DPNI_MISS_ACTION_SHIFT 4
+#define DPNI_MISS_ACTION_SIZE 4
+
+struct dpni_cmd_set_rx_tc_dist {
+ /* cmd word 0 */
+ __le16 dist_size;
+ u8 tc_id;
+ /* from LSB: dist_mode:4, miss_action:4 */
+ u8 flags;
+ __le16 pad0;
+ __le16 default_flow_id;
+ /* cmd word 1..5 */
+ __le64 pad1[5];
+ /* cmd word 6 */
+ __le64 key_cfg_iova;
+};
+
+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
+ * key_cfg_iova)
+ */
+struct dpni_mask_cfg {
+ u8 mask;
+ u8 offset;
+};
+
+#define DPNI_EFH_TYPE_SHIFT 0
+#define DPNI_EFH_TYPE_SIZE 4
+#define DPNI_EXTRACT_TYPE_SHIFT 0
+#define DPNI_EXTRACT_TYPE_SIZE 4
+
+struct dpni_dist_extract {
+ /* word 0 */
+ u8 prot;
+ /* EFH type stored in the 4 least significant bits */
+ u8 efh_type;
+ u8 size;
+ u8 offset;
+ __le32 field;
+ /* word 1 */
+ u8 hdr_index;
+ u8 constant;
+ u8 num_of_repeats;
+ u8 num_of_byte_masks;
+ /* Extraction type is stored in the 4 LSBs */
+ u8 extract_type;
+ u8 pad[3];
+ /* word 2 */
+ struct dpni_mask_cfg masks[4];
+};
+
+struct dpni_ext_set_rx_tc_dist {
+ /* extension word 0 */
+ u8 num_extracts;
+ u8 pad[7];
+ /* words 1..25 */
+ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+struct dpni_cmd_get_queue {
+ u8 qtype;
+ u8 tc;
+ u8 index;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_STASH_CTRL_SHIFT 6
+#define DPNI_STASH_CTRL_SIZE 1
+#define DPNI_HOLD_ACTIVE_SHIFT 7
+#define DPNI_HOLD_ACTIVE_SIZE 1
+
+struct dpni_rsp_get_queue {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le32 dest_id;
+ __le16 pad1;
+ u8 dest_prio;
+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
+ u8 flags;
+ /* response word 2 */
+ __le64 flc;
+ /* response word 3 */
+ __le64 user_context;
+ /* response word 4 */
+ __le32 fqid;
+ __le16 qdbin;
+};
+
+struct dpni_cmd_set_queue {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 index;
+ u8 options;
+ __le32 pad0;
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 pad1;
+ u8 dest_prio;
+ u8 flags;
+ /* cmd word 2 */
+ __le64 flc;
+ /* cmd word 3 */
+ __le64 user_context;
+};
+
+struct dpni_cmd_set_taildrop {
+ /* cmd word 0 */
+ u8 congestion_point;
+ u8 qtype;
+ u8 tc;
+ u8 index;
+ __le32 pad0;
+ /* cmd word 1 */
+ /* Only least significant bit is relevant */
+ u8 enable;
+ u8 pad1;
+ u8 units;
+ u8 pad2;
+ __le32 threshold;
+};
+
+struct dpni_cmd_get_taildrop {
+ u8 congestion_point;
+ u8 qtype;
+ u8 tc;
+ u8 index;
+};
+
+struct dpni_rsp_get_taildrop {
+ /* cmd word 0 */
+ __le64 pad0;
+ /* cmd word 1 */
+ /* only least significant bit is relevant */
+ u8 enable;
+ u8 pad1;
+ u8 units;
+ u8 pad2;
+ __le32 threshold;
+};
+
+struct dpni_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le16 miss_flow_id;
+ __le16 pad;
+ __le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le32 pad;
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+ /* cmd word 0 */
+ __le16 options;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 flow_id;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+ /* cmd word 3 */
+ __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+ /* cmd word 0 */
+ __le16 pad0;
+ u8 tc_id;
+ u8 key_size;
+ __le32 pad1;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+};
+
+#define DPNI_DISCARD_ON_MISS_SHIFT 0
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+
+struct dpni_cmd_set_qos_table {
+ __le32 pad;
+ u8 default_tc;
+ /* only the LSB */
+ u8 discard_on_miss;
+ __le16 pad1[21];
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+ __le16 pad;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ u8 pad[3];
+ u8 key_size;
+ __le32 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
+#define DPNI_CONG_UNITS_SIZE 2
+
+struct dpni_cmd_set_congestion_notification {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
+ __le64 message_iova;
+ /* cmd word 3 */
+ __le64 message_ctx;
+ /* cmd word 4 */
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#define DPNI_COUPLED_SHIFT 0
+#define DPNI_COUPLED_SIZE 1
+
+struct dpni_cmd_set_tx_shaping {
+ __le16 tx_cr_max_burst_size;
+ __le16 tx_er_max_burst_size;
+ __le32 pad;
+ __le32 tx_cr_rate_limit;
+ __le32 tx_er_rate_limit;
+ /* from LSB: coupled:1 */
+ u8 coupled;
+};
+
+#define DPNI_PTP_ENABLE_SHIFT 0
+#define DPNI_PTP_ENABLE_SIZE 1
+#define DPNI_PTP_CH_UPDATE_SHIFT 1
+#define DPNI_PTP_CH_UPDATE_SIZE 1
+
+struct dpni_cmd_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
+};
+
+struct dpni_rsp_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
+};
+
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
new file mode 100644
index 000000000..6c3b36f20
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -0,0 +1,2181 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fsl/mc.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+/**
+ * dpni_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
+{
+ int i, j;
+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
+ struct dpni_dist_extract *extr;
+
+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
+ dpni_ext->num_extracts = cfg->num_extracts;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ extr = &dpni_ext->extracts[i];
+
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+ dpni_set_field(extr->efh_type, EFH_TYPE,
+ cfg->extracts[i].extract.from_hdr.type);
+ extr->size = cfg->extracts[i].extract.from_hdr.size;
+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+ extr->field = cpu_to_le32(
+ cfg->extracts[i].extract.from_hdr.field);
+ extr->hdr_index =
+ cfg->extracts[i].extract.from_hdr.hdr_index;
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ extr->size = cfg->extracts[i].extract.from_data.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_data.offset;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ extr->size = cfg->extracts[i].extract.from_parse.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_parse.offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
+ cfg->extracts[i].type);
+
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+ extr->masks[j].offset =
+ cfg->extracts[i].masks[j].offset;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpni_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_open *cmd_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpni_cmd_open *)cmd.params;
+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_pools *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_is_enabled *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state: - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_enable *cmd_params;
+ struct dpni_rsp_get_irq_enable *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_mask *cmd_params;
+ struct dpni_rsp_get_irq_mask *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_status *cmd_params;
+ struct dpni_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpni_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_attr *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->num_queues = rsp_params->num_queues;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->qos_entries = rsp_params->qos_entries;
+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
+ attr->qos_key_size = rsp_params->qos_key_size;
+ attr->fs_key_size = rsp_params->fs_key_size;
+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
+
+ return 0;
+}
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * this function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_error_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_errors_behavior *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
+ cmd_params->errors = cpu_to_le32(cfg->errors);
+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to retrieve configuration for
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_buffer_layout *cmd_params;
+ struct dpni_rsp_get_buffer_layout *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
+ layout->data_align = le16_to_cpu(rsp_params->data_align);
+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
+
+ return 0;
+}
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue this configuration applies to
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_buffer_layout *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->options = cpu_to_le16(layout->options);
+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
+ cmd_params->data_align = cpu_to_le16(layout->data_align);
+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables the offload
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 config)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_offload *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+ cmd_params->config = cpu_to_le32(config);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 *config)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_offload *cmd_params;
+ struct dpni_rsp_get_offload *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
+ *config = le32_to_cpu(rsp_params->config);
+
+ return 0;
+}
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to receive QDID for
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u16 *qdid)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_qdid *cmd_params;
+ struct dpni_rsp_get_qdid *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
+ *qdid = le16_to_cpu(rsp_params->qdid);
+
+ return 0;
+}
+
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *data_offset)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_tx_data_offset *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
+ *data_offset = le16_to_cpu(rsp_params->data_offset);
+
+ return 0;
+}
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_link_cfg() - return the link configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+ cfg->options = le64_to_cpu(rsp_params->options);
+
+ return err;
+}
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_state *state)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *max_frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_max_frame_length *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
+
+ return 0;
+}
+
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_multicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_multicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_unicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_unicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_port_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_add_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_remove_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int unicast,
+ int multicast)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_clear_mac_filters *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ cmd_params->tc_id = tc_id;
+ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
+ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header =
+ mc_encode_cmd_header(DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
+ * configuration options are set on the queue
+ * @queue: Queue structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ u8 options,
+ const struct dpni_queue *queue)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_queue *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
+ cmd_params->dest_prio = queue->destination.priority;
+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
+ queue->destination.hold_active);
+ cmd_params->flc = cpu_to_le64(queue->flc.value);
+ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_queue *cmd_params;
+ struct dpni_rsp_get_queue *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+ queue->destination.priority = rsp_params->dest_prio;
+ queue->destination.type = dpni_get_field(rsp_params->flags,
+ DEST_TYPE);
+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+ STASH_CTRL);
+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
+ HOLD_ACTIVE);
+ queue->flc.value = le64_to_cpu(rsp_params->flc);
+ queue->user_context = le64_to_cpu(rsp_params->user_context);
+ qid->fqid = le32_to_cpu(rsp_params->fqid);
+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
+ return 0;
+}
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
+ union dpni_statistics *stat)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_statistics *cmd_params;
+ struct dpni_rsp_get_statistics *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+ cmd_params->page_number = page;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
+
+ return 0;
+}
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @qtype: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_taildrop *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
+ cmd_params->units = taildrop->units;
+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @qtype: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_taildrop *cmd_params;
+ struct dpni_rsp_get_taildrop *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
+ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
+ taildrop->units = rsp_params->units;
+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+
+ return 0;
+}
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct dpni_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx flow steering distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ *
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_fs_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx hash distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the default queue
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_hash_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the FS table where to insert the entry.
+ * Only relevant if MASKING is enabled for FS
+ * classification on this DPNI, it is ignored for exact match.
+ * @cfg: Flow steering rule to add
+ * @action: Action to be taken as result of a classification hit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action)
+{
+ struct dpni_cmd_add_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+ cmd_params->options = cpu_to_le16(action->options);
+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
+ cmd_params->flc = cpu_to_le64(action->flc);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Flow steering rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ * prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+ cmd_params->default_tc = cfg->default_tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->discard_on_miss, DISCARD_ON_MISS,
+ cfg->discard_on_miss);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to add
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification on
+ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_qos_table() - Clear all QoS mapping entries
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Following this function call, all frames are directed to
+ * the default traffic class (0)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_tx_shaping() - Set the transmit shaping
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tx_cr_shaper: TX committed rate shaping configuration
+ * @tx_er_shaper: TX excess rate shaping configuration
+ * @coupled: Committed and excess rate shapers are coupled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
+ int coupled)
+{
+ struct dpni_cmd_set_tx_shaping *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
+ cmd_params->tx_cr_max_burst_size = cpu_to_le16(tx_cr_shaper->max_burst_size);
+ cmd_params->tx_er_max_burst_size = cpu_to_le16(tx_er_shaper->max_burst_size);
+ cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
+ cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
+ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_single_step_cfg() - return current configuration for
+ * single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ */
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_rsp_single_step_cfg *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* read command response */
+ rsp_params = (struct dpni_rsp_single_step_cfg *)cmd.params;
+ ptp_cfg->offset = le16_to_cpu(rsp_params->offset);
+ ptp_cfg->en = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_ENABLE) ? 1 : 0;
+ ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_CH_UPDATE) ? 1 : 0;
+ ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
+
+ return err;
+}
+
+/**
+ * dpni_set_single_step_cfg() - enable/disable and configure single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * The function has effect only when dpni object is connected to a dpmac
+ * object. If the dpni is not connected to a dpmac the configuration will
+ * be stored inside and applied when connection is made.
+ */
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_cmd_single_step_cfg *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ u16 flags;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ cmd_params = (struct dpni_cmd_single_step_cfg *)cmd.params;
+ cmd_params->offset = cpu_to_le16(ptp_cfg->offset);
+ cmd_params->peer_delay = cpu_to_le32(ptp_cfg->peer_delay);
+
+ flags = le16_to_cpu(cmd_params->flags);
+ dpni_set_field(flags, PTP_ENABLE, !!ptp_cfg->en);
+ dpni_set_field(flags, PTP_CH_UPDATE, !!ptp_cfg->ch_update);
+ cmd_params->flags = cpu_to_le16(flags);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
new file mode 100644
index 000000000..6fffd519a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -0,0 +1,1110 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include "dpkg.h"
+
+struct fsl_mc_io;
+
+/* Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * DPNI_MAX_TC - Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC 8
+/**
+ * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP 8
+
+/**
+ * DPNI_ALL_TCS - All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS (u8)(-1)
+/**
+ * DPNI_ALL_TC_FLOWS - All flows within traffic class considered; see
+ * dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS (u16)(-1)
+/**
+ * DPNI_NEW_FLOW_ID - Generate new flow ID; see dpni_set_queue()
+ */
+#define DPNI_NEW_FLOW_ID (u16)(-1)
+
+/**
+ * DPNI_OPT_TX_FRM_RELEASE - Tx traffic is always released to a buffer pool on
+ * transmit, there are no resources allocated to have the frames confirmed back
+ * to the source after transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
+/**
+ * DPNI_OPT_NO_MAC_FILTER - Disables support for MAC address filtering for
+ * addresses other than primary MAC address. This affects both unicast and
+ * multicast. Promiscuous mode can still be enabled/disabled for both unicast
+ * and multicast. If promiscuous mode is disabled, only traffic matching the
+ * primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER 0x000002
+/**
+ * DPNI_OPT_HAS_POLICING - Allocate policers for this DPNI. They can be used to
+ * rate-limit traffic per traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING 0x000004
+/**
+ * DPNI_OPT_SHARED_CONGESTION - Congestion can be managed in several ways,
+ * allowing the buffer pool to deplete on ingress, taildrop on each queue or
+ * use congestion groups for sets of queues. If set, it configures a single
+ * congestion groups across all TCs. If reset, a congestion group is allocated
+ * for each TC. Only relevant if the DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION 0x000008
+/**
+ * DPNI_OPT_HAS_KEY_MASKING - Enables TCAM for Flow Steering and QoS look-ups.
+ * If not specified, all look-ups are exact match. Note that TCAM is not
+ * available on LS1088 and its variants. Setting this bit on these SoCs will
+ * trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
+/**
+ * DPNI_OPT_NO_FS - Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS 0x000020
+/**
+ * DPNI_OPT_SHARED_FS - Flow steering table is shared between all traffic
+ * classes
+ */
+#define DPNI_OPT_SHARED_FS 0x001000
+
+int dpni_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpni_id,
+ u16 *token);
+
+int dpni_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpni_pools_cfg {
+ u8 num_dpbp;
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPNI_MAX_DPBP];
+};
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_pools_cfg *cfg);
+
+int dpni_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPNI IRQ Index and Events */
+
+#define DPNI_IRQ_INDEX 0
+
+/* DPNI_IRQ_EVENT_LINK_CHANGED - indicates a change in link state */
+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+/* DPNI_IRQ_EVENT_ENDPOINT_CHANGED - indicates a change in endpoint */
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
+
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ * than this when adding QoS entries will result in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ * key larger than this when composing the hash + FS key will
+ * result in an error.
+ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
+ * on 6, 5, 5 bits respectively.
+ */
+struct dpni_attr {
+ u32 options;
+ u8 num_queues;
+ u8 num_tcs;
+ u8 mac_filter_entries;
+ u8 vlan_filter_entries;
+ u8 qos_entries;
+ u16 fs_entries;
+ u8 qos_key_size;
+ u8 fs_key_size;
+ u16 wriop_version;
+};
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_attr *attr);
+
+/* DPNI errors */
+
+/**
+ * DPNI_ERROR_EOFHE - Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE 0x00020000
+/**
+ * DPNI_ERROR_FLE - Frame length error
+ */
+#define DPNI_ERROR_FLE 0x00002000
+/**
+ * DPNI_ERROR_FPE - Frame physical error
+ */
+#define DPNI_ERROR_FPE 0x00001000
+/**
+ * DPNI_ERROR_PHE - Parsing header error
+ */
+#define DPNI_ERROR_PHE 0x00000020
+/**
+ * DPNI_ERROR_L3CE - Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE 0x00000004
+/**
+ * DPNI_ERROR_L4CE - Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE 0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+ DPNI_ERROR_ACTION_DISCARD = 0,
+ DPNI_ERROR_ACTION_CONTINUE = 1,
+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
+ * status (FAS); relevant only for the non-discard action
+ */
+struct dpni_error_cfg {
+ u32 errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
+};
+
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_error_cfg *cfg);
+
+/* DPNI buffer layout modification options */
+
+/**
+ * DPNI_BUF_LAYOUT_OPT_TIMESTAMP - Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/**
+ * DPNI_BUF_LAYOUT_OPT_PARSER_RESULT - Select to modify the parser-result
+ * setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/**
+ * DPNI_BUF_LAYOUT_OPT_FRAME_STATUS - Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/**
+ * DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE - Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/**
+ * DPNI_BUF_LAYOUT_OPT_DATA_ALIGN - Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/**
+ * DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM - Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/**
+ * DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM - Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ u32 options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ u16 private_data_size;
+ u16 data_align;
+ u16 data_head_room;
+ u16 data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */
+enum dpni_queue_type {
+ DPNI_QUEUE_RX,
+ DPNI_QUEUE_TX,
+ DPNI_QUEUE_TX_CONFIRM,
+ DPNI_QUEUE_RX_ERR,
+};
+
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
+
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ */
+enum dpni_offload {
+ DPNI_OFF_RX_L3_CSUM,
+ DPNI_OFF_RX_L4_CSUM,
+ DPNI_OFF_TX_L3_CSUM,
+ DPNI_OFF_TX_L4_CSUM,
+};
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 config);
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 *config);
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u16 *qdid);
+
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *data_offset);
+
+#define DPNI_STATISTICS_CNT 7
+
+/**
+ * union dpni_statistics - Union describing the DPNI statistics
+ * @page_0: Page_0 statistics structure
+ * @page_0.ingress_all_frames: Ingress frame count
+ * @page_0.ingress_all_bytes: Ingress byte count
+ * @page_0.ingress_multicast_frames: Ingress multicast frame count
+ * @page_0.ingress_multicast_bytes: Ingress multicast byte count
+ * @page_0.ingress_broadcast_frames: Ingress broadcast frame count
+ * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count
+ * @page_1: Page_1 statistics structure
+ * @page_1.egress_all_frames: Egress frame count
+ * @page_1.egress_all_bytes: Egress byte count
+ * @page_1.egress_multicast_frames: Egress multicast frame count
+ * @page_1.egress_multicast_bytes: Egress multicast byte count
+ * @page_1.egress_broadcast_frames: Egress broadcast frame count
+ * @page_1.egress_broadcast_bytes: Egress broadcast byte count
+ * @page_2: Page_2 statistics structure
+ * @page_2.ingress_filtered_frames: Ingress filtered frame count
+ * @page_2.ingress_discarded_frames: Ingress discarded frame count
+ * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @page_2.egress_discarded_frames: Egress discarded frame count
+ * @page_2.egress_confirmed_frames: Egress confirmed frame count
+ * @page_3: Page_3 statistics structure
+ * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes
+ * dequeued from egress FQs
+ * @page_3.egress_dequeue_frames: Cumulative count of the number of frames
+ * dequeued from egress FQs
+ * @page_3.egress_reject_bytes: Cumulative count of the number of bytes in
+ * egress frames whose enqueue was rejected
+ * @page_3.egress_reject_frames: Cumulative count of the number of egress
+ * frames whose enqueue was rejected
+ * @page_4: Page_4 statistics structure: congestion points
+ * @page_4.cgr_reject_frames: number of rejected frames due to congestion point
+ * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
+ * @page_5: Page_5 statistics structure: policer
+ * @page_5.policer_cnt_red: NUmber of red colored frames
+ * @page_5.policer_cnt_yellow: number of yellow colored frames
+ * @page_5.policer_cnt_green: number of green colored frames
+ * @page_5.policer_cnt_re_red: number of recolored red frames
+ * @page_5.policer_cnt_re_yellow: number of recolored yellow frames
+ * @page_6: Page_6 statistics structure
+ * @page_6.tx_pending_frames: total number of frames pending in egress FQs
+ * @raw: raw statistics structure, used to index counters
+ */
+union dpni_statistics {
+ struct {
+ u64 ingress_all_frames;
+ u64 ingress_all_bytes;
+ u64 ingress_multicast_frames;
+ u64 ingress_multicast_bytes;
+ u64 ingress_broadcast_frames;
+ u64 ingress_broadcast_bytes;
+ } page_0;
+ struct {
+ u64 egress_all_frames;
+ u64 egress_all_bytes;
+ u64 egress_multicast_frames;
+ u64 egress_multicast_bytes;
+ u64 egress_broadcast_frames;
+ u64 egress_broadcast_bytes;
+ } page_1;
+ struct {
+ u64 ingress_filtered_frames;
+ u64 ingress_discarded_frames;
+ u64 ingress_nobuffer_discards;
+ u64 egress_discarded_frames;
+ u64 egress_confirmed_frames;
+ } page_2;
+ struct {
+ u64 egress_dequeue_bytes;
+ u64 egress_dequeue_frames;
+ u64 egress_reject_bytes;
+ u64 egress_reject_frames;
+ } page_3;
+ struct {
+ u64 cgr_reject_frames;
+ u64 cgr_reject_bytes;
+ } page_4;
+ struct {
+ u64 policer_cnt_red;
+ u64 policer_cnt_yellow;
+ u64 policer_cnt_green;
+ u64 policer_cnt_re_red;
+ u64 policer_cnt_re_yellow;
+ } page_5;
+ struct {
+ u64 tx_pending_frames;
+ } page_6;
+ struct {
+ u64 counter[DPNI_STATISTICS_CNT];
+ } raw;
+};
+
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
+ union dpni_statistics *stat);
+
+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
+/**
+ * struct dpni_link_cfg - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_link_cfg *cfg);
+
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg);
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+};
+
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_state *state);
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length);
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *max_frame_length);
+
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en);
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en);
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6]);
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cm_flags,
+ u16 token,
+ u8 mac_addr[6]);
+
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int unicast,
+ int multicast);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+ DPNI_DIST_MODE_NONE = 0,
+ DPNI_DIST_MODE_HASH = 1,
+ DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+ DPNI_FS_MISS_DROP = 0,
+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+ DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+ enum dpni_fs_miss_action miss_action;
+ u16 default_flow_id;
+};
+
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ u8 *key_cfg_buf);
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ * 112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg() relevant only when
+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ * 'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+ u16 dist_size;
+ enum dpni_dist_mode dist_mode;
+ u64 key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
+/**
+ * DPNI_FS_MISS_DROP - When used for fs_miss_flow_id in function
+ * dpni_set_rx_dist, will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - Rx distribution configuration
+ * @dist_size: distribution size
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ u16 dist_size;
+ u64 key_cfg_iova;
+ u8 enable;
+ u8 tc;
+ u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * key extractions to be used as the QoS criteria by calling
+ * dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ * '0' to use the 'default_tc' in such cases
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+ u64 key_cfg_iova;
+ int discard_on_miss;
+ u8 default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpni_queue - Queue structure
+ * @destination: - Destination structure
+ * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @destination.type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @destination.hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ * @flc: FD FLow Context structure
+ * @flc.value: Default FLC value for traffic dequeued from
+ * this queue. Please check description of FD
+ * structure for more information.
+ * Note that FLC values set using dpni_add_fs_entry,
+ * if any, take precedence over values per queue.
+ * @flc.stash_control: Boolean, indicates whether the 6 lowest
+ * - significant bits are used for stash control.
+ * significant bits are used for stash control. If set, the 6
+ * least significant bits in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context
+ * that are stashed. FLC value is interpreted as a memory address
+ * in this case, excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame
+ * annotation to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame
+ * data to be stashed. Frame data is placed at FD[ADDR] +
+ * FD[OFFSET].
+ * For more details check the Frame Descriptor section in the
+ * hardware documentation.
+ */
+struct dpni_queue {
+ struct {
+ u16 id;
+ enum dpni_dest type;
+ char hold_active;
+ u8 priority;
+ } destination;
+ u64 user_context;
+ struct {
+ u64 value;
+ char stash_control;
+ } flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ * or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
+ * for Tx queues.
+ */
+struct dpni_queue_id {
+ u32 fqid;
+ u16 qdbin;
+};
+
+/* Set User Context */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+#define DPNI_QUEUE_OPT_FLC 0x00000004
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ u8 options,
+ const struct dpni_queue *queue);
+
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid);
+
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+ DPNI_CONGESTION_UNIT_BYTES = 0,
+ DPNI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ * QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
+ * define the DPNI this can be either per TC (default) or per
+ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ * QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+ DPNI_CP_QUEUE,
+ DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * DPNI_CONG_OPT_FLOW_CONTROL - This congestion will trigger flow control or
+ * priority flow control. This will have effect only if flow control is
+ * enabled with dpni_set_link_cfg().
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: Units type
+ * @threshold_entry: Above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: Below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
+ * is contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable: Indicates whether the taildrop is active or not.
+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
+ * byte units, this field is ignored and assumed = 0 if
+ * CONGESTION_POINT is 0.
+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
+ * cannot be used as a valid taildrop threshold, THRESHOLD must
+ * be > 0 if the taildrop is enabled.
+ */
+struct dpni_taildrop {
+ char enable;
+ enum dpni_congestion_unit units;
+ u32 threshold;
+};
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ u8 tc,
+ u8 q_index,
+ struct dpni_taildrop *taildrop);
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ u8 tc,
+ u8 q_index,
+ struct dpni_taildrop *taildrop);
+
+/**
+ * struct dpni_rule_cfg - Rule configuration for table lookup
+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
+ * @key_size: key and mask size (in bytes)
+ */
+struct dpni_rule_cfg {
+ u64 key_iova;
+ u64 mask_iova;
+ u8 key_size;
+};
+
+/**
+ * DPNI_FS_OPT_DISCARD - Discard matching traffic. If set, this takes
+ * precedence over any other configuration and matching traffic is always
+ * discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD 0x1
+
+/**
+ * DPNI_FS_OPT_SET_FLC - Set FLC value. If set, flc member of struct
+ * dpni_fs_action_cfg is used to override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC 0x2
+
+/**
+ * DPNI_FS_OPT_SET_STASH_CONTROL - Indicates whether the 6 lowest significant
+ * bits of FLC are used for stash control. If set, the 6 least significant bits
+ * in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context that are
+ * stashed. FLC value is interpreted as a memory address in this case,
+ * excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
+ * to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc: FLC value for traffic matching this rule. Please check the
+ * Frame Descriptor section in the hardware documentation for
+ * more information.
+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
+ * values are in range 0 to num_queue-1.
+ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+ u64 flc;
+ u16 flow_id;
+ u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+/**
+ * struct dpni_tx_shaping_cfg - Structure representing DPNI tx shaping configuration
+ * @rate_limit: Rate in Mbps
+ * @max_burst_size: Burst size in bytes (up to 64KB)
+ */
+struct dpni_tx_shaping_cfg {
+ u32 rate_limit;
+ u16 max_burst_size;
+};
+
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
+ int coupled);
+
+/**
+ * struct dpni_single_step_cfg - configure single step PTP (IEEE 1588)
+ * @en: enable single step PTP. When enabled the PTPv1 functionality
+ * will not work. If the field is zero, offset and ch_update
+ * parameters will be ignored
+ * @offset: start offset from the beginning of the frame where
+ * timestamp field is found. The offset must respect all MAC
+ * headers, VLAN tags and other protocol headers
+ * @ch_update: when set UDP checksum will be updated inside packet
+ * @peer_delay: For peer-to-peer transparent clocks add this value to the
+ * correction field in addition to the transient time update.
+ * The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
+ */
+struct dpni_single_step_cfg {
+ u8 en;
+ u8 ch_update;
+ u16 offset;
+ u32 peer_delay;
+ u32 ptp_onestep_reg_base;
+};
+
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
+#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
new file mode 100644
index 000000000..96ffeb948
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_VERSION_2 2
+#define DPRTC_CMD_ID_OFFSET 4
+
+#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+#define DPRTC_CMD_V2(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_VERSION_2)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
+
+#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
+#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD_V2(0x014)
+#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
+#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
+#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+ __le32 dprtc_id;
+};
+
+struct dprtc_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprtc_cmd_set_irq_enable {
+ u8 en;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_enable {
+ u8 en;
+};
+
+struct dprtc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dprtc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprtc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
new file mode 100644
index 000000000..ed52a34fa
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id: DPRTC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token)
+{
+ struct dprtc_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dprtc_cmd_open *)cmd.params;
+ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct dprtc_cmd_set_irq_enable *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->en = en;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->en;
+
+ return 0;
+}
+
+/**
+ * dprtc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @status: Bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct dprtc_cmd_clear_irq_status *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
new file mode 100644
index 000000000..01d77c685
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+#define DPRTC_MAX_IRQ_NUM 1
+#define DPRTC_IRQ_INDEX 0
+
+#define DPRTC_EVENT_PPS 0x08000000
+#define DPRTC_EVENT_ETS1 0x00800000
+#define DPRTC_EVENT_ETS2 0x00400000
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+#endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
new file mode 100644
index 000000000..397d55f2b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_CMD_H
+#define __FSL_DPSW_CMD_H
+
+#include "dpsw.h"
+
+/* DPSW Version */
+#define DPSW_VER_MAJOR 8
+#define DPSW_VER_MINOR 9
+
+#define DPSW_CMD_BASE_VERSION 1
+#define DPSW_CMD_VERSION_2 2
+#define DPSW_CMD_ID_OFFSET 4
+
+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
+#define DPSW_CMD_V2(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_VERSION_2)
+
+/* Command IDs */
+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
+
+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
+
+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
+#define DPSW_CMDID_GET_ATTR DPSW_CMD_V2(0x004)
+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
+
+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
+
+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
+
+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
+
+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
+
+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034)
+
+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
+
+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
+
+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
+
+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
+
+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
+
+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_V2(0x061)
+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
+
+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+
+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
+#define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A)
+
+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
+
+#define DPSW_CMDID_IF_GET_PORT_MAC_ADDR DPSW_CMD_ID(0x0A7)
+
+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
+#define DPSW_CMDID_CTRL_IF_SET_QUEUE DPSW_CMD_ID(0x0A6)
+
+#define DPSW_CMDID_SET_EGRESS_FLOOD DPSW_CMD_ID(0x0AC)
+#define DPSW_CMDID_IF_SET_LEARNING_MODE DPSW_CMD_ID(0x0AD)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSW_MASK(field) \
+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
+ DPSW_##field##_SHIFT)
+#define dpsw_set_field(var, field, val) \
+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
+#define dpsw_get_field(var, field) \
+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
+#define dpsw_get_bit(var, bit) \
+ (((var) >> (bit)) & GENMASK(0, 0))
+
+#pragma pack(push, 1)
+struct dpsw_cmd_open {
+ __le32 dpsw_id;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+struct dpsw_cmd_create {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 max_meters_per_if;
+ /* from LSB: only the first 4 bits */
+ u8 component_type;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le16 max_vlans;
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le16 max_fdb_mc_groups;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_destroy {
+ __le32 dpsw_id;
+};
+
+#define DPSW_ENABLE_SHIFT 0
+#define DPSW_ENABLE_SIZE 1
+
+struct dpsw_rsp_is_enabled {
+ /* from LSB: enable:1 */
+ u8 enabled;
+};
+
+struct dpsw_cmd_set_irq_enable {
+ u8 enable_state;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_enable {
+ u8 enable_state;
+};
+
+struct dpsw_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpsw_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpsw_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+#define DPSW_FLOODING_CFG_SHIFT 0
+#define DPSW_FLOODING_CFG_SIZE 4
+
+#define DPSW_BROADCAST_CFG_SHIFT 4
+#define DPSW_BROADCAST_CFG_SIZE 4
+
+struct dpsw_rsp_get_attr {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 num_fdbs;
+ __le16 max_vlans;
+ __le16 num_vlans;
+ /* cmd word 1 */
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le32 dpsw_id;
+ /* cmd word 2 */
+ __le16 mem_size;
+ __le16 max_fdb_mc_groups;
+ u8 max_meters_per_if;
+ /* from LSB only the first 4 bits */
+ u8 component_type;
+ /* [0:3] - flooding configuration
+ * [4:7] - broadcast configuration
+ */
+ u8 repl_cfg;
+ u8 pad;
+ /* cmd word 3 */
+ __le64 options;
+};
+
+#define DPSW_VLAN_ID_SHIFT 0
+#define DPSW_VLAN_ID_SIZE 12
+#define DPSW_DEI_SHIFT 12
+#define DPSW_DEI_SIZE 1
+#define DPSW_PCP_SHIFT 13
+#define DPSW_PCP_SIZE 3
+
+struct dpsw_cmd_if_set_tci {
+ __le16 if_id;
+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
+ __le16 conf;
+};
+
+struct dpsw_cmd_if_get_tci {
+ __le16 if_id;
+};
+
+struct dpsw_rsp_if_get_tci {
+ __le16 pad;
+ __le16 vlan_id;
+ u8 dei;
+ u8 pcp;
+};
+
+#define DPSW_STATE_SHIFT 0
+#define DPSW_STATE_SIZE 4
+
+struct dpsw_cmd_if_set_stp {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only the first LSB 4 bits */
+ u8 state;
+};
+
+#define DPSW_COUNTER_TYPE_SHIFT 0
+#define DPSW_COUNTER_TYPE_SIZE 5
+
+struct dpsw_cmd_if_get_counter {
+ __le16 if_id;
+ /* from LSB: type:5 */
+ u8 type;
+};
+
+struct dpsw_rsp_if_get_counter {
+ __le64 pad;
+ __le64 counter;
+};
+
+struct dpsw_cmd_if {
+ __le16 if_id;
+};
+
+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
+#define DPSW_ADMIT_UNTAGGED_SIZE 4
+#define DPSW_ENABLED_SHIFT 5
+#define DPSW_ENABLED_SIZE 1
+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
+
+struct dpsw_rsp_if_get_attr {
+ /* cmd word 0 */
+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
+ u8 conf;
+ u8 pad1;
+ u8 num_tcs;
+ u8 pad2;
+ __le16 qdid;
+ /* cmd word 1 */
+ __le32 options;
+ __le32 pad3;
+ /* cmd word 2 */
+ __le32 rate;
+};
+
+struct dpsw_cmd_if_set_max_frame_length {
+ __le16 if_id;
+ __le16 frame_length;
+};
+
+struct dpsw_cmd_if_set_link_cfg {
+ /* cmd word 0 */
+ __le16 if_id;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_if_get_link_state {
+ __le16 if_id;
+};
+
+#define DPSW_UP_SHIFT 0
+#define DPSW_UP_SIZE 1
+
+struct dpsw_rsp_if_get_link_state {
+ /* cmd word 0 */
+ __le32 pad0;
+ u8 up;
+ u8 pad1[3];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_vlan_add {
+ __le16 fdb_id;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_vlan_add_if {
+ /* cmd word 0 */
+ __le16 options;
+ __le16 vlan_id;
+ __le16 fdb_id;
+ __le16 pad0;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_vlan_manage_if {
+ /* cmd word 0 */
+ __le16 pad0;
+ __le16 vlan_id;
+ __le32 pad1;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_vlan_remove {
+ __le16 pad;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_fdb_add {
+ __le32 pad;
+ __le16 fdb_ageing_time;
+ __le16 num_fdb_entries;
+};
+
+struct dpsw_rsp_fdb_add {
+ __le16 fdb_id;
+};
+
+struct dpsw_cmd_fdb_remove {
+ __le16 fdb_id;
+};
+
+#define DPSW_ENTRY_TYPE_SHIFT 0
+#define DPSW_ENTRY_TYPE_SIZE 4
+
+struct dpsw_cmd_fdb_unicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ u8 mac_addr[6];
+ /* cmd word 1 */
+ __le16 if_egress;
+ /* only the first 4 bits from LSB */
+ u8 type;
+};
+
+struct dpsw_cmd_fdb_multicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ __le16 num_ifs;
+ /* only the first 4 bits from LSB */
+ u8 type;
+ u8 pad[3];
+ /* cmd word 1 */
+ u8 mac_addr[6];
+ __le16 pad2;
+ /* cmd word 2-5 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_fdb_dump {
+ __le16 fdb_id;
+ __le16 pad0;
+ __le32 pad1;
+ __le64 iova_addr;
+ __le32 iova_size;
+};
+
+struct dpsw_rsp_fdb_dump {
+ __le16 num_entries;
+};
+
+struct dpsw_rsp_ctrl_if_get_attr {
+ __le64 pad;
+ __le32 rx_fqid;
+ __le32 rx_err_fqid;
+ __le32 tx_err_conf_fqid;
+};
+
+#define DPSW_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpsw_cmd_ctrl_if_set_pools {
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ __le32 dpbp_id[DPSW_MAX_DPBP];
+ __le16 buffer_size[DPSW_MAX_DPBP];
+};
+
+#define DPSW_DEST_TYPE_SHIFT 0
+#define DPSW_DEST_TYPE_SIZE 4
+
+struct dpsw_cmd_ctrl_if_set_queue {
+ __le32 dest_id;
+ u8 dest_priority;
+ u8 pad;
+ /* from LSB: dest_type:4 */
+ u8 dest_type;
+ u8 qtype;
+ __le64 user_ctx;
+ __le32 options;
+};
+
+struct dpsw_rsp_get_api_version {
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dpsw_rsp_if_get_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpsw_cmd_set_egress_flood {
+ __le16 fdb_id;
+ u8 flood_type;
+ u8 pad[5];
+ __le64 if_id;
+};
+
+#define DPSW_LEARNING_MODE_SHIFT 0
+#define DPSW_LEARNING_MODE_SIZE 4
+
+struct dpsw_cmd_if_set_learning_mode {
+ __le16 if_id;
+ /* only the first 4 bits from LSB */
+ u8 mode;
+};
+
+struct dpsw_cmd_acl_add {
+ __le16 pad;
+ __le16 max_entries;
+};
+
+struct dpsw_rsp_acl_add {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_remove {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_if {
+ __le16 acl_id;
+ __le16 num_ifs;
+ __le32 pad;
+ __le64 if_id;
+};
+
+struct dpsw_prep_acl_entry {
+ u8 match_l2_dest_mac[6];
+ __le16 match_l2_tpid;
+
+ u8 match_l2_source_mac[6];
+ __le16 match_l2_vlan_id;
+
+ __le32 match_l3_dest_ip;
+ __le32 match_l3_source_ip;
+
+ __le16 match_l4_dest_port;
+ __le16 match_l4_source_port;
+ __le16 match_l2_ether_type;
+ u8 match_l2_pcp_dei;
+ u8 match_l3_dscp;
+
+ u8 mask_l2_dest_mac[6];
+ __le16 mask_l2_tpid;
+
+ u8 mask_l2_source_mac[6];
+ __le16 mask_l2_vlan_id;
+
+ __le32 mask_l3_dest_ip;
+ __le32 mask_l3_source_ip;
+
+ __le16 mask_l4_dest_port;
+ __le16 mask_l4_source_port;
+ __le16 mask_l2_ether_type;
+ u8 mask_l2_pcp_dei;
+ u8 mask_l3_dscp;
+
+ u8 match_l3_protocol;
+ u8 mask_l3_protocol;
+};
+
+#define DPSW_RESULT_ACTION_SHIFT 0
+#define DPSW_RESULT_ACTION_SIZE 4
+
+struct dpsw_cmd_acl_entry {
+ __le16 acl_id;
+ __le16 result_if_id;
+ __le32 precedence;
+ /* from LSB only the first 4 bits */
+ u8 result_action;
+ u8 pad[7];
+ __le64 pad2[4];
+ __le64 key_iova;
+};
+
+struct dpsw_cmd_set_reflection_if {
+ __le16 if_id;
+};
+
+#define DPSW_FILTER_SHIFT 0
+#define DPSW_FILTER_SIZE 2
+
+struct dpsw_cmd_if_reflection {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only 2 bits from the LSB */
+ u8 filter;
+};
+#pragma pack(pop)
+#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
new file mode 100644
index 000000000..ab921d75d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
@@ -0,0 +1,1661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpsw.h"
+#include "dpsw-cmd.h"
+
+static void build_if_id_bitmap(__le64 *bmap, const u16 *id, const u16 num_ifs)
+{
+ int i;
+
+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
+ if (id[i] < DPSW_MAX_IF)
+ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
+ }
+}
+
+/**
+ * dpsw_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpsw_id: DPSW unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpsw_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpsw_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_enable() - Enable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_disable() - Disable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_get_irq_status *cmd_params;
+ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_attributes() - Retrieve DPSW attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned DPSW attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
+ attr->max_fdbs = rsp_params->max_fdbs;
+ attr->num_fdbs = rsp_params->num_fdbs;
+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
+ attr->options = le64_to_cpu(rsp_params->options);
+ attr->component_type = dpsw_get_field(rsp_params->component_type, COMPONENT_TYPE);
+ attr->flooding_cfg = dpsw_get_field(rsp_params->repl_cfg, FLOODING_CFG);
+ attr->broadcast_cfg = dpsw_get_field(rsp_params->repl_cfg, BROADCAST_CFG);
+ return 0;
+}
+
+/**
+ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @state: Link state 1 - linkup, 0 - link down or disconnected
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_link_state *cmd_params;
+ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+ state->up = dpsw_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_tci *cmd_params;
+ u16 tmp_conf = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
+ dpsw_set_field(tmp_conf, DEI, cfg->dei);
+ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
+ cmd_params->conf = cpu_to_le16(tmp_conf);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_tci *cmd_params;
+ struct dpsw_rsp_if_get_tci *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
+ cfg->pcp = rsp_params->pcp;
+ cfg->dei = rsp_params->dei;
+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: STP State configuration parameters
+ *
+ * The following STP states are supported -
+ * blocking, listening, learning, forwarding and disabled.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @type: Counter type
+ * @counter: return value
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_counter *cmd_params;
+ struct dpsw_rsp_if_get_counter *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
+ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_attributes() - Function obtains attributes of interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @attr: Returned interface attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr)
+{
+ struct dpsw_rsp_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->rate = le32_to_cpu(rsp_params->rate);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->qdid = le16_to_cpu(rsp_params->qdid);
+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
+ ACCEPT_ALL_VLAN);
+ attr->admit_untagged = dpsw_get_field(rsp_params->conf,
+ ADMIT_UNTAGGED);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @frame_length: Maximum Frame Length
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->frame_length = cpu_to_le16(frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: VLAN configuration
+ *
+ * Only VLAN ID and FDB ID are required parameters here.
+ * 12 bit VLAN ID is defined in IEEE802.1Q.
+ * Adding a duplicate VLAN ID is not allowed.
+ * FDB ID can be shared across multiple VLANs. Shared learning
+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
+ * with same fdb_id
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces to add
+ *
+ * It adds only interfaces not belonging to this VLAN yet,
+ * otherwise an error is generated and an entire command is
+ * ignored. This function can be called numerous times always
+ * providing required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct dpsw_cmd_vlan_add_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_add_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ cmd_params->options = cpu_to_le16(cfg->options);
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
+ * transmitted as untagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
+ * These interfaces should already belong to this VLAN.
+ * By default all interfaces are transmitted as tagged.
+ * Providing un-existing interface or untagged interface that is
+ * configured untagged already generates an error and the entire
+ * command is ignored.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces must belong to this VLAN, otherwise an error
+ * is returned and an the command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
+ * converted from transmitted as untagged to transmit as tagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces provided by API have to belong to this VLAN and
+ * configured untagged, otherwise an error is returned and the
+ * command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
+ * the reference
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Returned Forwarding Database Identifier
+ * @cfg: FDB Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg)
+{
+ struct dpsw_cmd_fdb_add *cmd_params;
+ struct dpsw_rsp_fdb_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
+ cmd_params->fdb_ageing_time = cpu_to_le16(cfg->fdb_ageing_time);
+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove() - Remove FDB from switch
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id)
+{
+ struct dpsw_cmd_fdb_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_dump() - Dump the content of FDB table into memory.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @iova_addr: Data will be stored here as an array of struct fdb_dump_entry
+ * @iova_size: Memory size allocated at iova_addr
+ * @num_entries:Number of entries written at iova_addr
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ *
+ * The memory allocated at iova_addr must be initialized with zero before
+ * command execution. If the FDB table does not fit into memory MC will stop
+ * after the memory is filled up.
+ * The struct fdb_dump_entry array must be parsed until the end of memory
+ * area or until an entry with mac_addr set to zero is found.
+ */
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries)
+{
+ struct dpsw_cmd_fdb_dump *cmd_params;
+ struct dpsw_rsp_fdb_dump *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_DUMP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_dump *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->iova_addr = cpu_to_le64(iova_addr);
+ cmd_params->iova_size = cpu_to_le32(iova_size);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_dump *)cmd.params;
+ *num_entries = le16_to_cpu(rsp_params->num_entries);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * If group doesn't exist, it will be created.
+ * It adds only interfaces not belonging to this multicast group
+ * yet, otherwise error will be generated and the command is
+ * ignored.
+ * This function may be called numerous times always providing
+ * required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
+ * group.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * Interfaces provided by this API have to exist in the group,
+ * otherwise an error will be returned and an entire command
+ * ignored. If there is no interface left in the group,
+ * an entire group is deleted
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned control interface attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr)
+{
+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
+ cmd_flags, token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Buffer pools configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int i;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
+ cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPSW_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_set_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of dpsw object
+ * @qtype: dpsw_queue_type of the targeted queue
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->qtype = qtype;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpsw_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_api_version() - Get Data Path Switch API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path switch API
+ * @minor_ver: Minor version of data path switch API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->version_major);
+ *minor_ver = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_get_port_mac_addr() - Retrieve MAC address associated to the physical port
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6])
+{
+ struct dpsw_rsp_if_get_mac_addr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_enable() - Enable control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_disable() - Function disables control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_egress_flood() - Set egress parameters associated with an FDB ID
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Egress flooding configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg)
+{
+ struct dpsw_cmd_set_egress_flood *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_EGRESS_FLOOD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_set_egress_flood *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->flood_type = cfg->flood_type;
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_learning_mode() - Configure the learning mode on an interface.
+ * If this API is used, it will take precedence over the FDB configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: InterfaceID
+ * @mode: Learning mode
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode)
+{
+ struct dpsw_cmd_if_set_learning_mode *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LEARNING_MODE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add() - Create an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: Returned ACL ID, for future references
+ * @cfg: ACL configuration
+ *
+ * Create Access Control List table. Multiple ACLs can be created and
+ * co-exist in L2 switch
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg)
+{
+ struct dpsw_cmd_acl_add *cmd_params;
+ struct dpsw_rsp_acl_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
+ *acl_id = le16_to_cpu(rsp_params->acl_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_acl_remove() - Remove an ACL table from L2 switch.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id)
+{
+ struct dpsw_cmd_acl_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add_if() - Associate interface/interfaces with an ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_if() - De-associate interface/interfaces from an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_prepare_entry_cfg() - Setup an ACL entry
+ * @key: Key
+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before adding or removing acl_entry
+ *
+ */
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf)
+{
+ struct dpsw_prep_acl_entry *ext_params;
+ int i;
+
+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
+
+ for (i = 0; i < 6; i++) {
+ ext_params->match_l2_dest_mac[i] = key->match.l2_dest_mac[5 - i];
+ ext_params->match_l2_source_mac[i] = key->match.l2_source_mac[5 - i];
+ ext_params->mask_l2_dest_mac[i] = key->mask.l2_dest_mac[5 - i];
+ ext_params->mask_l2_source_mac[i] = key->mask.l2_source_mac[5 - i];
+ }
+
+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
+ ext_params->match_l4_source_port = cpu_to_le16(key->match.l4_source_port);
+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
+ ext_params->match_l3_dscp = key->match.l3_dscp;
+
+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
+ ext_params->match_l3_protocol = key->match.l3_protocol;
+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
+}
+
+/**
+ * dpsw_acl_add_entry() - Add a rule to the ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_prepare_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_reflection_if() - Set target interface for traffic mirrored
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Id
+ *
+ * Only one mirroring destination is allowed per switch
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id)
+{
+ struct dpsw_cmd_set_reflection_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_add_reflection() - Setup mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_remove_reflection() - Remove mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
new file mode 100644
index 000000000..b90bd363f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+/* Data Path L2-Switch API
+ * Contains API for handling DPSW topology and functionality
+ */
+
+struct fsl_mc_io;
+
+/* DPSW general definitions */
+
+#define DPSW_MAX_PRIORITIES 8
+
+#define DPSW_MAX_IF 64
+
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token);
+
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW options */
+
+/**
+ * DPSW_OPT_FLOODING_DIS - Flooding was disabled at device create
+ */
+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
+/**
+ * DPSW_OPT_MULTICAST_DIS - Multicast was disabled at device create
+ */
+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
+/**
+ * DPSW_OPT_CTRL_IF_DIS - Control interface support is disabled
+ */
+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
+
+/**
+ * enum dpsw_component_type - component type of a bridge
+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
+ * enterprise VLAN bridge or of a Provider Bridge used
+ * to process C-tagged frames
+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
+ * Provider Bridge
+ *
+ */
+enum dpsw_component_type {
+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
+ DPSW_COMPONENT_TYPE_S_VLAN
+};
+
+/**
+ * enum dpsw_flooding_cfg - flooding configuration requested
+ * @DPSW_FLOODING_PER_VLAN: Flooding replicators are allocated per VLAN and
+ * interfaces present in each of them can be configured using
+ * dpsw_vlan_add_if_flooding()/dpsw_vlan_remove_if_flooding().
+ * This is the default configuration.
+ *
+ * @DPSW_FLOODING_PER_FDB: Flooding replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_flooding_cfg {
+ DPSW_FLOODING_PER_VLAN = 0,
+ DPSW_FLOODING_PER_FDB,
+};
+
+/**
+ * enum dpsw_broadcast_cfg - broadcast configuration requested
+ * @DPSW_BROADCAST_PER_OBJECT: There is only one broadcast replicator per DPSW
+ * object. This is the default configuration.
+ * @DPSW_BROADCAST_PER_FDB: Broadcast replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_broadcast_cfg {
+ DPSW_BROADCAST_PER_OBJECT = 0,
+ DPSW_BROADCAST_PER_FDB,
+};
+
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW IRQ Index and Events */
+
+#define DPSW_IRQ_INDEX_IF 0x0000
+#define DPSW_IRQ_INDEX_L2SW 0x0001
+
+/**
+ * DPSW_IRQ_EVENT_LINK_CHANGED - Indicates that the link state changed
+ */
+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
+
+/**
+ * DPSW_IRQ_EVENT_ENDPOINT_CHANGED - Indicates a change in endpoint
+ */
+#define DPSW_IRQ_EVENT_ENDPOINT_CHANGED 0x0002
+
+/**
+ * struct dpsw_irq_cfg - IRQ configuration
+ * @addr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dpsw_irq_cfg {
+ u64 addr;
+ u32 val;
+ int irq_num;
+};
+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en);
+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask);
+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status);
+
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status);
+
+/**
+ * struct dpsw_attr - Structure representing DPSW attributes
+ * @id: DPSW object ID
+ * @options: Enable/Disable DPSW features
+ * @max_vlans: Maximum Number of VLANs
+ * @max_meters_per_if: Number of meters per interface
+ * @max_fdbs: Maximum Number of FDBs
+ * @max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @mem_size: DPSW frame storage memory size
+ * @num_ifs: Number of interfaces
+ * @num_vlans: Current number of VLANs
+ * @num_fdbs: Current number of FDBs
+ * @component_type: Component type of this bridge
+ * @flooding_cfg: Flooding configuration (PER_VLAN - default, PER_FDB)
+ * @broadcast_cfg: Broadcast configuration (PER_OBJECT - default, PER_FDB)
+ */
+struct dpsw_attr {
+ int id;
+ u64 options;
+ u16 max_vlans;
+ u8 max_meters_per_if;
+ u8 max_fdbs;
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ u16 max_fdb_mc_groups;
+ u16 num_ifs;
+ u16 mem_size;
+ u16 num_vlans;
+ u8 num_fdbs;
+ enum dpsw_component_type component_type;
+ enum dpsw_flooding_cfg flooding_cfg;
+ enum dpsw_broadcast_cfg broadcast_cfg;
+};
+
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr);
+
+/**
+ * struct dpsw_ctrl_if_attr - Control interface attributes
+ * @rx_fqid: Receive FQID
+ * @rx_err_fqid: Receive error FQID
+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
+ */
+struct dpsw_ctrl_if_attr {
+ u32 rx_fqid;
+ u32 rx_err_fqid;
+ u32 tx_err_conf_fqid;
+};
+
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr);
+
+enum dpsw_queue_type {
+ DPSW_QUEUE_RX,
+ DPSW_QUEUE_TX_ERR_CONF,
+ DPSW_QUEUE_RX_ERR,
+};
+
+#define DPSW_MAX_DPBP 8
+
+/**
+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpsw_ctrl_if_pools_cfg {
+ u8 num_dpbp;
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPSW_MAX_DPBP];
+};
+
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg);
+
+#define DPSW_CTRL_IF_QUEUE_OPT_USER_CTX 0x00000001
+#define DPSW_CTRL_IF_QUEUE_OPT_DEST 0x00000002
+
+enum dpsw_ctrl_if_dest {
+ DPSW_CTRL_IF_DEST_NONE = 0,
+ DPSW_CTRL_IF_DEST_DPIO = 1,
+};
+
+struct dpsw_ctrl_if_dest_cfg {
+ enum dpsw_ctrl_if_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+struct dpsw_ctrl_if_queue_cfg {
+ u32 options;
+ u64 user_ctx;
+ struct dpsw_ctrl_if_dest_cfg dest_cfg;
+};
+
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg);
+
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
+ */
+enum dpsw_action {
+ DPSW_ACTION_DROP = 0,
+ DPSW_ACTION_REDIRECT = 1
+};
+
+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ */
+struct dpsw_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg);
+
+/**
+ * struct dpsw_link_state - Structure representing DPSW link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ * @up: 0 - covers two cases: down and disconnected, 1 - up
+ */
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
+ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state);
+
+/**
+ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+ * separately or in conjunction with PCP to indicate frames
+ * eligible to be dropped in the presence of congestion
+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
+ * to which the frame belongs. The hexadecimal values
+ * of 0x000 and 0xFFF are reserved;
+ * all other values may be used as VLAN identifiers,
+ * allowing up to 4,094 VLANs
+ */
+struct dpsw_tci_cfg {
+ u8 pcp;
+ u8 dei;
+ u16 vlan_id;
+};
+
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg);
+
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg);
+
+/**
+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
+ * @DPSW_STP_STATE_DISABLED: Disabled state
+ * @DPSW_STP_STATE_LISTENING: Listening state
+ * @DPSW_STP_STATE_LEARNING: Learning state
+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
+ * @DPSW_STP_STATE_BLOCKING: Blocking state
+ *
+ */
+enum dpsw_stp_state {
+ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
+ DPSW_STP_STATE_FORWARDING = 3,
+ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
+ * @vlan_id: VLAN ID STP state
+ * @state: STP state
+ */
+struct dpsw_stp_cfg {
+ u16 vlan_id;
+ enum dpsw_stp_state state;
+};
+
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg);
+
+/**
+ * enum dpsw_accepted_frames - Types of frames to accept
+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
+ * priority tagged frames
+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
+ * Priority-Tagged frames received on this interface.
+ *
+ */
+enum dpsw_accepted_frames {
+ DPSW_ADMIT_ALL = 1,
+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
+/**
+ * enum dpsw_counter - Counters types
+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
+ * @DPSW_CNT_EGR_BYTE: Counts egress bytes
+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames
+ */
+enum dpsw_counter {
+ DPSW_CNT_ING_FRAME = 0x0,
+ DPSW_CNT_ING_BYTE = 0x1,
+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
+ DPSW_CNT_EGR_FRAME = 0x8,
+ DPSW_CNT_EGR_BYTE = 0x9,
+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb,
+ DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc,
+};
+
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+/**
+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
+ * @num_tcs: Number of traffic classes
+ * @rate: Transmit rate in bits per second
+ * @options: Interface configuration options (bitmap)
+ * @enabled: Indicates if interface is enabled
+ * @accept_all_vlan: The device discards/accepts incoming frames
+ * for VLANs that do not include this interface
+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
+ * discards untagged frames or priority-tagged frames received on
+ * this interface;
+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
+ * tagged frames received on this interface are accepted
+ * @qdid: control frames transmit qdid
+ */
+struct dpsw_if_attr {
+ u8 num_tcs;
+ u32 rate;
+ u32 options;
+ int enabled;
+ int accept_all_vlan;
+ enum dpsw_accepted_frames admit_untagged;
+ u16 qdid;
+};
+
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr);
+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length);
+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ */
+struct dpsw_vlan_cfg {
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg);
+
+#define DPSW_VLAN_ADD_IF_OPT_FDB_ID 0x0001
+
+/**
+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
+ * @num_ifs: The number of interfaces that are assigned to the egress
+ * list for this VLAN
+ * @if_id: The set of interfaces that are
+ * assigned to the egress list for this VLAN
+ * @options: Options map for this command (DPSW_VLAN_ADD_IF_OPT_FDB_ID)
+ * @fdb_id: FDB id to be used by this VLAN on these specific interfaces
+ * (taken into account only if the DPSW_VLAN_ADD_IF_OPT_FDB_ID is
+ * specified in the options field)
+ */
+struct dpsw_vlan_if_cfg {
+ u16 num_ifs;
+ u16 options;
+ u16 if_id[DPSW_MAX_IF];
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ */
+enum dpsw_fdb_entry_type {
+ DPSW_FDB_ENTRY_STATIC = 0,
+ DPSW_FDB_ENTRY_DINAMIC = 1
+};
+
+/**
+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @if_egress: Egress interface ID
+ */
+struct dpsw_fdb_unicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 if_egress;
+};
+
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0)
+#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1)
+
+/**
+ * struct fdb_dump_entry - fdb snapshot entry
+ * @mac_addr: MAC address
+ * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0)
+ * @if_info: unicast - egress interface, multicast - number of egress interfaces
+ * @if_mask: multicast - egress interface mask
+ */
+struct fdb_dump_entry {
+ u8 mac_addr[6];
+ u8 type;
+ u8 if_info;
+ u8 if_mask[8];
+};
+
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries);
+
+/**
+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @num_ifs: Number of external and internal interfaces
+ * @if_id: Egress interface IDs
+ */
+struct dpsw_fdb_multicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+/**
+ * enum dpsw_learning_mode - Auto-learning modes
+ * @DPSW_LEARNING_MODE_DIS: Disable Auto-learning
+ * @DPSW_LEARNING_MODE_HW: Enable HW auto-Learning
+ * @DPSW_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
+ * @DPSW_LEARNING_MODE_SECURE: Enable secure learning by CPU
+ *
+ * NONE - SECURE LEARNING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. DMAC destination
+ * 2. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Flooding list of interfaces
+ * 2. Control interface
+ * SECURE LEARING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Control interface
+ */
+enum dpsw_learning_mode {
+ DPSW_LEARNING_MODE_DIS = 0,
+ DPSW_LEARNING_MODE_HW = 1,
+ DPSW_LEARNING_MODE_NON_SECURE = 2,
+ DPSW_LEARNING_MODE_SECURE = 3
+};
+
+/**
+ * struct dpsw_fdb_attr - FDB Attributes
+ * @max_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ * @learning_mode: Learning mode
+ * @num_fdb_mc_groups: Current number of multicast groups
+ * @max_fdb_mc_groups: Maximum number of multicast groups
+ */
+struct dpsw_fdb_attr {
+ u16 max_fdb_entries;
+ u16 fdb_ageing_time;
+ enum dpsw_learning_mode learning_mode;
+ u16 num_fdb_mc_groups;
+ u16 max_fdb_mc_groups;
+};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6]);
+
+/**
+ * struct dpsw_fdb_cfg - FDB Configuration
+ * @num_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ */
+struct dpsw_fdb_cfg {
+ u16 num_fdb_entries;
+ u16 fdb_ageing_time;
+};
+
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg);
+
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id);
+
+/**
+ * enum dpsw_flood_type - Define the flood type of a DPSW object
+ * @DPSW_BROADCAST: Broadcast flooding
+ * @DPSW_FLOODING: Unknown flooding
+ */
+enum dpsw_flood_type {
+ DPSW_BROADCAST = 0,
+ DPSW_FLOODING,
+};
+
+struct dpsw_egress_flood_cfg {
+ u16 fdb_id;
+ enum dpsw_flood_type flood_type;
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg);
+
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode);
+
+/**
+ * struct dpsw_acl_cfg - ACL Configuration
+ * @max_entries: Number of ACL rules
+ */
+struct dpsw_acl_cfg {
+ u16 max_entries;
+};
+
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg);
+
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id);
+
+/**
+ * struct dpsw_acl_if_cfg - List of interfaces to associate with an ACL table
+ * @num_ifs: Number of interfaces
+ * @if_id: List of interfaces
+ */
+struct dpsw_acl_if_cfg {
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+/**
+ * struct dpsw_acl_fields - ACL fields.
+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
+ * slow protocols, MVRP, STP
+ * @l2_source_mac: Source MAC address
+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
+ * Q-in-Q, IPv4, IPv6, PPPoE
+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
+ * @l2_vlan_id: layer 2 VLAN ID
+ * @l2_ether_type: layer 2 Ethernet type
+ * @l3_dscp: Layer 3 differentiated services code point
+ * @l3_protocol: Tells the Network layer at the destination host, to which
+ * Protocol this packet belongs to. The following protocol are
+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
+ * (encapsulation), GRE, PTP
+ * @l3_source_ip: Source IPv4 IP
+ * @l3_dest_ip: Destination IPv4 IP
+ * @l4_source_port: Source TCP/UDP Port
+ * @l4_dest_port: Destination TCP/UDP Port
+ */
+struct dpsw_acl_fields {
+ u8 l2_dest_mac[6];
+ u8 l2_source_mac[6];
+ u16 l2_tpid;
+ u8 l2_pcp_dei;
+ u16 l2_vlan_id;
+ u16 l2_ether_type;
+ u8 l3_dscp;
+ u8 l3_protocol;
+ u32 l3_source_ip;
+ u32 l3_dest_ip;
+ u16 l4_source_port;
+ u16 l4_dest_port;
+};
+
+/**
+ * struct dpsw_acl_key - ACL key
+ * @match: Match fields
+ * @mask: Mask: b'1 - valid, b'0 don't care
+ */
+struct dpsw_acl_key {
+ struct dpsw_acl_fields match;
+ struct dpsw_acl_fields mask;
+};
+
+/**
+ * enum dpsw_acl_action - action to be run on the ACL rule match
+ * @DPSW_ACL_ACTION_DROP: Drop frame
+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
+ */
+enum dpsw_acl_action {
+ DPSW_ACL_ACTION_DROP,
+ DPSW_ACL_ACTION_REDIRECT,
+ DPSW_ACL_ACTION_ACCEPT,
+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
+};
+
+/**
+ * struct dpsw_acl_result - ACL action
+ * @action: Action should be taken when ACL entry hit
+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
+ * action
+ */
+struct dpsw_acl_result {
+ enum dpsw_acl_action action;
+ u16 if_id;
+};
+
+/**
+ * struct dpsw_acl_entry_cfg - ACL entry
+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
+ * to dpsw_acl_prepare_entry_cfg()
+ * @result: Required action when entry hit occurs
+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
+ * during the lifetime of a Policy. It is user responsibility to
+ * space the priorities according to consequent rule additions.
+ */
+struct dpsw_acl_entry_cfg {
+ u64 key_iova;
+ struct dpsw_acl_result result;
+ int precedence;
+};
+
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf);
+
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+/**
+ * enum dpsw_reflection_filter - Filter type for frames to be reflected
+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to
+ * the particular VLAN defined by vid parameter
+ *
+ */
+enum dpsw_reflection_filter {
+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
+};
+
+/**
+ * struct dpsw_reflection_cfg - Structure representing the mirroring config
+ * @filter: Filter type for frames to be mirrored
+ * @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN
+ */
+struct dpsw_reflection_cfg {
+ enum dpsw_reflection_filter filter;
+ u16 vlan_id;
+};
+
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id);
+
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
+
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
+#endif /* __FSL_DPSW_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
new file mode 100644
index 000000000..cdc0ff893
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC
+ tristate "ENETC PF driver"
+ depends on PCI && PCI_MSI
+ select FSL_ENETC_IERB
+ select FSL_ENETC_MDIO
+ select PHYLINK
+ select PCS_LYNX
+ select DIMLIB
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ physical function (PF) devices, managing ENETC Ports at a privileged
+ level.
+
+ If compiled as module (M), the module name is fsl-enetc.
+
+config FSL_ENETC_VF
+ tristate "ENETC VF driver"
+ depends on PCI && PCI_MSI
+ select FSL_ENETC_MDIO
+ select PHYLINK
+ select DIMLIB
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ virtual function (VF) devices enabled by the ENETC PF driver.
+
+ If compiled as module (M), the module name is fsl-enetc-vf.
+
+config FSL_ENETC_IERB
+ tristate "ENETC IERB driver"
+ help
+ This driver configures the Integrated Endpoint Register Block on NXP
+ LS1028A.
+
+ If compiled as module (M), the module name is fsl-enetc-ierb.
+
+config FSL_ENETC_MDIO
+ tristate "ENETC MDIO driver"
+ depends on PCI && MDIO_DEVRES && MDIO_BUS
+ help
+ This driver supports NXP ENETC Central MDIO controller as a PCIe
+ physical function (PF) device.
+
+ If compiled as module (M), the module name is fsl-enetc-mdio.
+
+config FSL_ENETC_PTP_CLOCK
+ tristate "ENETC PTP clock driver"
+ depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
+ default y
+ help
+ This driver adds support for using the ENETC 1588 timer
+ as a PTP clock. This clock is only useful if your PTP
+ programs are getting hardware time stamps on the PTP Ethernet
+ packets using the SO_TIMESTAMPING API.
+
+ If compiled as module (M), the module name is fsl-enetc-ptp.
+
+config FSL_ENETC_QOS
+ bool "ENETC hardware Time-sensitive Network support"
+ depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS)
+ help
+ There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci
+ /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set
+ enable/disable from user space via Qos commands(tc). In the kernel
+ side, it can be loaded by Qos driver. Currently, it is only support
+ taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
new file mode 100644
index 000000000..e0e8dfd13
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+
+common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+
+obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
+fsl-enetc-y := enetc_pf.o $(common-objs)
+fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+
+obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+
+obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
+fsl-enetc-ierb-y := enetc_ierb.o
+
+obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
+fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
+
+obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
+fsl-enetc-ptp-y := enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
new file mode 100644
index 000000000..25c303406
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -0,0 +1,2937 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+#include <linux/bpf_trace.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/vmalloc.h>
+#include <linux/ptp_classify.h>
+#include <net/ip6_checksum.h>
+#include <net/pkt_sched.h>
+#include <net/tso.h>
+
+static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
+{
+ int num_tx_rings = priv->num_tx_rings;
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ if (priv->rx_ring[i]->xdp.prog)
+ return num_tx_rings - num_possible_cpus();
+
+ return num_tx_rings;
+}
+
+static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
+ struct enetc_bdr *tx_ring)
+{
+ int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
+
+ return priv->rx_ring[index];
+}
+
+static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
+ return NULL;
+
+ return tx_swbd->skb;
+}
+
+static struct xdp_frame *
+enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_redirect)
+ return tx_swbd->xdp_frame;
+
+ return NULL;
+}
+
+static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ /* For XDP_TX, pages come from RX, whereas for the other contexts where
+ * we have is_dma_page_set, those come from skb_frag_dma_map. We need
+ * to match the DMA mapping length, so we need to differentiate those.
+ */
+ if (tx_swbd->is_dma_page)
+ dma_unmap_page(tx_ring->dev, tx_swbd->dma,
+ tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
+ tx_swbd->dir);
+ else
+ dma_unmap_single(tx_ring->dev, tx_swbd->dma,
+ tx_swbd->len, tx_swbd->dir);
+ tx_swbd->dma = 0;
+}
+
+static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+
+ if (tx_swbd->dma)
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+ if (xdp_frame) {
+ xdp_return_frame(tx_swbd->xdp_frame);
+ tx_swbd->xdp_frame = NULL;
+ } else if (skb) {
+ dev_kfree_skb_any(skb);
+ tx_swbd->skb = NULL;
+ }
+}
+
+/* Let H/W know BD ring has been updated */
+static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
+{
+ /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
+}
+
+static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
+ u8 *msgtype, u8 *twostep,
+ u16 *correction_offset, u16 *body_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_tx_swbd *tx_swbd;
+ int len = skb_headlen(skb);
+ union enetc_tx_bd temp_bd;
+ u8 msgtype, twostep, udp;
+ union enetc_tx_bd *txbd;
+ u16 offset1, offset2;
+ int i, count = 0;
+ skb_frag_t *frag;
+ unsigned int f;
+ dma_addr_t dma;
+ u8 flags = 0;
+
+ i = tx_ring->next_to_use;
+ txbd = ENETC_TXBD(*tx_ring, i);
+ prefetchw(txbd);
+
+ dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto dma_err;
+
+ temp_bd.addr = cpu_to_le64(dma);
+ temp_bd.buf_len = cpu_to_le16(len);
+ temp_bd.lstatus = 0;
+
+ tx_swbd = &tx_ring->tx_swbd[i];
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ count++;
+
+ do_vlan = skb_vlan_tag_present(skb);
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
+ &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep)
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ else
+ do_onestep_tstamp = true;
+ } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
+
+ tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
+ tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
+
+ if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ if (tx_ring->tsd_enable)
+ flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
+
+ /* first BD needs frm_len and offload flags set */
+ temp_bd.frm_len = cpu_to_le16(skb->len);
+ temp_bd.flags = flags;
+
+ if (flags & ENETC_TXBD_FLAGS_TSE)
+ temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
+ flags);
+
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ u8 e_flags = 0;
+ *txbd = temp_bd;
+ enetc_clear_tx_bd(&temp_bd);
+
+ /* add extension BD for VLAN and/or timestamping */
+ flags = 0;
+ tx_swbd++;
+ txbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ txbd = ENETC_TXBD(*tx_ring, 0);
+ }
+ prefetchw(txbd);
+
+ if (do_vlan) {
+ temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ temp_bd.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ if (do_onestep_tstamp) {
+ u32 lo, hi, val;
+ u64 sec, nsec;
+ u8 *data;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Configure extension BD */
+ temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ */
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) =
+ htons((sec >> 32) & 0xffff);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(sec & 0xffffffff);
+ *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+
+ /* Configure single-step register */
+ val = ENETC_PM0_SINGLE_STEP_EN;
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
+ enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ } else if (do_twostep_tstamp) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
+ }
+
+ temp_bd.ext.e_flags = e_flags;
+ count++;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_err;
+
+ *txbd = temp_bd;
+ enetc_clear_tx_bd(&temp_bd);
+
+ flags = 0;
+ tx_swbd++;
+ txbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ txbd = ENETC_TXBD(*tx_ring, 0);
+ }
+ prefetchw(txbd);
+
+ temp_bd.addr = cpu_to_le64(dma);
+ temp_bd.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ count++;
+ }
+
+ /* last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ temp_bd.flags = flags;
+ *txbd = temp_bd;
+
+ tx_ring->tx_swbd[i].is_eof = true;
+ tx_ring->tx_swbd[i].skb = skb;
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+
+ skb_tx_timestamp(skb);
+
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ dev_err(tx_ring->dev, "DMA map error");
+
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
+static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
+{
+ union enetc_tx_bd txbd_tmp;
+ u8 flags = 0, e_flags = 0;
+ dma_addr_t addr;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ if (skb_vlan_tag_present(skb))
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
+ txbd_tmp.flags = flags;
+
+ /* For the TSO header we do not set the dma address since we do not
+ * want it unmapped when we do cleanup. We still set len so that we
+ * count the bytes sent.
+ */
+ tx_swbd->len = hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Add extension BD for VLAN */
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Setup the VLAN fields */
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ *txbd = txbd_tmp;
+ }
+}
+
+static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, char *data,
+ int size, bool last_bd)
+{
+ union enetc_tx_bd txbd_tmp;
+ dma_addr_t addr;
+ u8 flags = 0;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+
+ addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -ENOMEM;
+ }
+
+ if (last_bd) {
+ flags |= ENETC_TXBD_FLAGS_F;
+ tx_swbd->is_eof = 1;
+ }
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(size);
+ txbd_tmp.flags = flags;
+
+ tx_swbd->dma = addr;
+ tx_swbd->len = size;
+ tx_swbd->dir = DMA_TO_DEVICE;
+
+ *txbd = txbd_tmp;
+
+ return 0;
+}
+
+static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
+ char *hdr, int hdr_len, int *l4_hdr_len)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ int mac_hdr_len = skb_network_offset(skb);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = 0;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = 0;
+ }
+
+ /* Compute the IP checksum. This is necessary since tso_build_hdr()
+ * already incremented the IP ID field.
+ */
+ if (!tso->ipv6) {
+ struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ /* Compute the checksum over the L4 header. */
+ *l4_hdr_len = hdr_len - skb_transport_offset(skb);
+ return csum_partial(l4_hdr, *l4_hdr_len, 0);
+}
+
+static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
+ struct sk_buff *skb, char *hdr, int len,
+ __wsum sum)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ __sum16 csum_final;
+
+ /* Complete the L4 checksum by appending the pseudo-header to the
+ * already computed checksum.
+ */
+ if (!tso->ipv6)
+ csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ len, ip_hdr(skb)->protocol, sum);
+ else
+ csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ len, ipv6_hdr(skb)->nexthdr, sum);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = csum_final;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = csum_final;
+ }
+}
+
+static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ int hdr_len, total_len, data_len;
+ struct enetc_tx_swbd *tx_swbd;
+ union enetc_tx_bd *txbd;
+ struct tso_t tso;
+ __wsum csum, csum2;
+ int count = 0, pos;
+ int err, i, bd_data_num;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ i = tx_ring->next_to_use;
+
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Get the BD */
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Determine the length of this packet */
+ data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_len;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
+
+ /* compute the csum over the L4 header */
+ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+ enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ bd_data_num = 0;
+ count++;
+
+ while (data_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_len);
+
+ /* Advance the index in the BDR */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Compute the checksum over this segment of data and
+ * add it to the csum already computed (over the L4
+ * header and possible other data segments).
+ */
+ csum2 = csum_partial(tso.data, size, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ pos += size;
+
+ err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ tso.data, size,
+ size == data_len);
+ if (err)
+ goto err_map_data;
+
+ data_len -= size;
+ count++;
+ bd_data_num++;
+ tso_build_data(skb, &tso, size);
+
+ if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ goto err_chained_bd;
+ }
+
+ enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
+
+ if (total_len == 0)
+ tx_swbd->skb = skb;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+err_map_data:
+ dev_err(tx_ring->dev, "DMA map error");
+
+err_chained_bd:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
+static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int count, err;
+
+ /* Queue one-step Sync packet if already locked */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
+ &priv->flags)) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ tx_ring = priv->tx_ring[skb->queue_mapping];
+
+ if (skb_is_gso(skb)) {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ } else {
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
+ goto drop_packet_err;
+ }
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
+
+ if (unlikely(!count))
+ goto drop_packet_err;
+
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ netif_stop_subqueue(ndev, tx_ring->index);
+
+ return NETDEV_TX_OK;
+
+drop_packet_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 udp, msgtype, twostep;
+ u16 offset1, offset2;
+
+ /* Mark tx timestamp type on skb->cb[0] if requires */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
+ skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ } else {
+ skb->cb[0] = 0;
+ }
+
+ /* Fall back to two-step timestamp if not one-step Sync packet */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
+ &offset1, &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
+ skb->cb[0] = ENETC_F_TX_TSTAMP;
+ }
+
+ return enetc_start_xmit(skb, ndev);
+}
+
+static irqreturn_t enetc_msix(int irq, void *data)
+{
+ struct enetc_int_vector *v = data;
+ int i;
+
+ enetc_lock_mdio();
+
+ /* disable interrupts */
+ enetc_wr_reg_hot(v->rbier, 0);
+ enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
+
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+ enetc_unlock_mdio();
+
+ napi_schedule(&v->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void enetc_rx_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct enetc_int_vector *v =
+ container_of(dim, struct enetc_int_vector, rx_dim);
+
+ v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
+static void enetc_rx_net_dim(struct enetc_int_vector *v)
+{
+ struct dim_sample dim_sample = {};
+
+ v->comp_cnt++;
+
+ if (!v->rx_napi_work)
+ return;
+
+ dim_update_sample(v->comp_cnt,
+ v->rx_ring.stats.packets,
+ v->rx_ring.stats.bytes,
+ &dim_sample);
+ net_dim(&v->rx_dim, dim_sample);
+}
+
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+{
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+
+ return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
+
+static bool enetc_page_reusable(struct page *page)
+{
+ return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
+}
+
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *old)
+{
+ struct enetc_rx_swbd *new;
+
+ new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+ /* next buf that may reuse a page */
+ enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+ /* copy page reference */
+ *new = *old;
+}
+
+static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
+ u64 *tstamp)
+{
+ u32 lo, hi, tstamp_lo;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
+ if (lo <= tstamp_lo)
+ hi -= 1;
+ *tstamp = (u64)hi << 32 | tstamp_lo;
+}
+
+static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_txtime_consumed(skb);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+}
+
+static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_rx_swbd rx_swbd = {
+ .dma = tx_swbd->dma,
+ .page = tx_swbd->page,
+ .page_offset = tx_swbd->page_offset,
+ .dir = tx_swbd->dir,
+ .len = tx_swbd->len,
+ };
+ struct enetc_bdr *rx_ring;
+
+ rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
+
+ if (likely(enetc_swbd_unused(rx_ring))) {
+ enetc_reuse_page(rx_ring, &rx_swbd);
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
+ rx_swbd.page_offset,
+ ENETC_RXB_DMA_SIZE_XDP,
+ rx_swbd.dir);
+
+ rx_ring->stats.recycles++;
+ } else {
+ /* RX ring is already full, we need to unmap and free the
+ * page, since there's nothing useful we can do with it.
+ */
+ rx_ring->stats.recycle_failures++;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
+ rx_swbd.dir);
+ __free_page(rx_swbd.page);
+ }
+
+ rx_ring->xdp.xdp_tx_in_flight--;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+{
+ int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
+ struct net_device *ndev = tx_ring->ndev;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_tx_swbd *tx_swbd;
+ int i, bds_to_clean;
+ bool do_twostep_tstamp;
+ u64 tstamp = 0;
+
+ i = tx_ring->next_to_clean;
+ tx_swbd = &tx_ring->tx_swbd[i];
+
+ bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+ do_twostep_tstamp = false;
+
+ while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+ bool is_eof = tx_swbd->is_eof;
+
+ if (unlikely(tx_swbd->check_wb)) {
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ if (txbd->flags & ENETC_TXBD_FLAGS_W &&
+ tx_swbd->do_twostep_tstamp) {
+ enetc_get_tx_tstamp(&priv->si->hw, txbd,
+ &tstamp);
+ do_twostep_tstamp = true;
+ }
+
+ if (tx_swbd->qbv_en &&
+ txbd->wb.status & ENETC_TXBD_STATS_WIN)
+ tx_win_drop++;
+ }
+
+ if (tx_swbd->is_xdp_tx)
+ enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
+ else if (likely(tx_swbd->dma))
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+ if (xdp_frame) {
+ xdp_return_frame(xdp_frame);
+ } else if (skb) {
+ if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ /* Start work to release lock for next one-step
+ * timestamping packet. And send one skb in
+ * tx_skbs queue if has.
+ */
+ schedule_work(&priv->tx_onestep_tstamp);
+ } else if (unlikely(do_twostep_tstamp)) {
+ enetc_tstamp_tx(skb, tstamp);
+ do_twostep_tstamp = false;
+ }
+ napi_consume_skb(skb, napi_budget);
+ }
+
+ tx_byte_cnt += tx_swbd->len;
+ /* Scrub the swbd here so we don't have to do that
+ * when we reuse it during xmit
+ */
+ memset(tx_swbd, 0, sizeof(*tx_swbd));
+
+ bds_to_clean--;
+ tx_swbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ }
+
+ /* BD iteration loop end */
+ if (is_eof) {
+ tx_frm_cnt++;
+ /* re-arm interrupt source */
+ enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
+ BIT(16 + tx_ring->index));
+ }
+
+ if (unlikely(!bds_to_clean))
+ bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ }
+
+ tx_ring->next_to_clean = i;
+ tx_ring->stats.packets += tx_frm_cnt;
+ tx_ring->stats.bytes += tx_byte_cnt;
+ tx_ring->stats.win_drop += tx_win_drop;
+
+ if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+ __netif_subqueue_stopped(ndev, tx_ring->index) &&
+ (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ netif_wake_subqueue(ndev, tx_ring->index);
+ }
+
+ return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
+}
+
+static bool enetc_new_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ bool xdp = !!(rx_ring->xdp.prog);
+ struct page *page;
+ dma_addr_t addr;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ /* For XDP_TX, we forgo dma_unmap -> dma_map */
+ rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
+ if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
+ __free_page(page);
+
+ return false;
+ }
+
+ rx_swbd->dma = addr;
+ rx_swbd->page = page;
+ rx_swbd->page_offset = rx_ring->buffer_offset;
+
+ return true;
+}
+
+static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+{
+ struct enetc_rx_swbd *rx_swbd;
+ union enetc_rx_bd *rxbd;
+ int i, j;
+
+ i = rx_ring->next_to_use;
+ rx_swbd = &rx_ring->rx_swbd[i];
+ rxbd = enetc_rxbd(rx_ring, i);
+
+ for (j = 0; j < buff_cnt; j++) {
+ /* try reuse page */
+ if (unlikely(!rx_swbd->page)) {
+ if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
+ rx_ring->stats.rx_alloc_errs++;
+ break;
+ }
+ }
+
+ /* update RxBD */
+ rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
+ rx_swbd->page_offset);
+ /* clear 'R" as well */
+ rxbd->r.lstatus = 0;
+
+ enetc_rxbd_next(rx_ring, &rxbd, &i);
+ rx_swbd = &rx_ring->rx_swbd[i];
+ }
+
+ if (likely(j)) {
+ rx_ring->next_to_alloc = i; /* keep track from page reuse */
+ rx_ring->next_to_use = i;
+
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
+ }
+
+ return j;
+}
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+static void enetc_get_rx_tstamp(struct net_device *ndev,
+ union enetc_rx_bd *rxbd,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 lo, hi, tstamp_lo;
+ u64 tstamp;
+
+ if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
+ lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
+ hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
+ rxbd = enetc_rxbd_ext(rxbd);
+ tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
+ if (lo <= tstamp_lo)
+ hi -= 1;
+
+ tstamp = (u64)hi << 32 | tstamp_lo;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
+ }
+}
+#endif
+
+static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd *rxbd, struct sk_buff *skb)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+
+ /* TODO: hashing */
+ if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
+ u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
+
+ skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ __be16 tpid = 0;
+
+ switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
+ case 0:
+ tpid = htons(ETH_P_8021Q);
+ break;
+ case 1:
+ tpid = htons(ETH_P_8021AD);
+ break;
+ case 2:
+ tpid = htons(enetc_port_rd(&priv->si->hw,
+ ENETC_PCVLANR1));
+ break;
+ case 3:
+ tpid = htons(enetc_port_rd(&priv->si->hw,
+ ENETC_PCVLANR2));
+ break;
+ default:
+ break;
+ }
+
+ __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
+ }
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+ enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
+#endif
+}
+
+/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
+ * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
+ * mapped buffers.
+ */
+static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
+ int i, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ size, rx_swbd->dir);
+ return rx_swbd;
+}
+
+/* Reuse the current page without performing half-page buffer flipping */
+static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
+
+ enetc_reuse_page(rx_ring, rx_swbd);
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ buffer_size, rx_swbd->dir);
+
+ rx_swbd->page = NULL;
+}
+
+/* Reuse the current page by performing half-page buffer flipping */
+static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ if (likely(enetc_page_reusable(rx_swbd->page))) {
+ rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
+ page_ref_inc(rx_swbd->page);
+
+ enetc_put_rx_buff(rx_ring, rx_swbd);
+ } else {
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ rx_swbd->page = NULL;
+ }
+}
+
+static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
+ int i, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ struct sk_buff *skb;
+ void *ba;
+
+ ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
+ if (unlikely(!skb)) {
+ rx_ring->stats.rx_alloc_errs++;
+ return NULL;
+ }
+
+ skb_reserve(skb, rx_ring->buffer_offset);
+ __skb_put(skb, size);
+
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
+
+ return skb;
+}
+
+static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct sk_buff *skb)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
+ rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
+
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
+}
+
+static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
+ u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i)
+{
+ if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
+ return false;
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ dma_rmb();
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ rx_ring->ndev->stats.rx_dropped++;
+ rx_ring->ndev->stats.rx_errors++;
+
+ return true;
+}
+
+static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
+ u32 bd_status, union enetc_rx_bd **rxbd,
+ int *i, int *cleaned_cnt, int buffer_size)
+{
+ struct sk_buff *skb;
+ u16 size;
+
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
+ if (!skb)
+ return NULL;
+
+ enetc_get_offloads(rx_ring, *rxbd, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = buffer_size;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ skb_record_rx_queue(skb, rx_ring->index);
+ skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+
+ return skb;
+}
+
+#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
+
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit)
+{
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ int cleaned_cnt, i;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd;
+ struct sk_buff *skb;
+ u32 bd_status;
+
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
+ cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
+ cleaned_cnt);
+
+ rxbd = enetc_rxbd(rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, ENETC_RXB_DMA_SIZE);
+ if (!skb)
+ break;
+
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += skb->len + ETH_HLEN;
+ rx_frm_cnt++;
+
+ napi_gro_receive(napi, skb);
+ }
+
+ rx_ring->next_to_clean = i;
+
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
+
+ return rx_frm_cnt;
+}
+
+static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
+ struct enetc_tx_swbd *tx_swbd,
+ int frm_len)
+{
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(txbd);
+ txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
+ txbd->buf_len = cpu_to_le16(tx_swbd->len);
+ txbd->frm_len = cpu_to_le16(frm_len);
+
+ memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
+}
+
+/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
+ * descriptors.
+ */
+static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
+{
+ struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
+ int i, k, frm_len = tmp_tx_swbd->len;
+
+ if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
+ return false;
+
+ while (unlikely(!tmp_tx_swbd->is_eof)) {
+ tmp_tx_swbd++;
+ frm_len += tmp_tx_swbd->len;
+ }
+
+ i = tx_ring->next_to_use;
+
+ for (k = 0; k < num_tx_swbd; k++) {
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
+
+ enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
+
+ /* last BD needs 'F' bit set */
+ if (xdp_tx_swbd->is_eof) {
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ txbd->flags = ENETC_TXBD_FLAGS_F;
+ }
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr,
+ struct xdp_frame *xdp_frame)
+{
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
+ struct skb_shared_info *shinfo;
+ void *data = xdp_frame->data;
+ int len = xdp_frame->len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ unsigned int f;
+ int n = 0;
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+
+ shinfo = xdp_get_shared_info_from_frame(xdp_frame);
+
+ for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
+ f++, frag++) {
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ /* Undo the DMA mapping for all fragments */
+ while (--n >= 0)
+ enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
+
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+ }
+
+ xdp_tx_arr[n - 1].is_eof = true;
+ xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
+
+ return n;
+}
+
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int xdp_tx_bd_cnt, i, k;
+ int xdp_tx_frm_cnt = 0;
+
+ enetc_lock_mdio();
+
+ tx_ring = priv->xdp_tx_ring[smp_processor_id()];
+
+ prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
+
+ for (k = 0; k < num_frames; k++) {
+ xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
+ xdp_redirect_arr,
+ frames[k]);
+ if (unlikely(xdp_tx_bd_cnt < 0))
+ break;
+
+ if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
+ xdp_tx_bd_cnt))) {
+ for (i = 0; i < xdp_tx_bd_cnt; i++)
+ enetc_unmap_tx_buff(tx_ring,
+ &xdp_redirect_arr[i]);
+ tx_ring->stats.xdp_tx_drops++;
+ break;
+ }
+
+ xdp_tx_frm_cnt++;
+ }
+
+ if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
+ enetc_update_tx_ring_tail(tx_ring);
+
+ tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
+
+ enetc_unlock_mdio();
+
+ return xdp_tx_frm_cnt;
+}
+
+static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ struct xdp_buff *xdp_buff, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ struct skb_shared_info *shinfo;
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
+
+ xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
+ rx_ring->buffer_offset, size, false);
+
+ shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ shinfo->nr_frags = 0;
+}
+
+static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct xdp_buff *xdp_buff)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
+
+ skb_frag_off_set(frag, rx_swbd->page_offset);
+ skb_frag_size_set(frag, size);
+ __skb_frag_set_page(frag, rx_swbd->page);
+
+ shinfo->nr_frags++;
+}
+
+static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i,
+ int *cleaned_cnt, struct xdp_buff *xdp_buff)
+{
+ u16 size = le16_to_cpu((*rxbd)->r.buf_len);
+
+ xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
+
+ enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = ENETC_RXB_DMA_SIZE_XDP;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+}
+
+/* Convert RX buffer descriptors to TX buffer descriptors. These will be
+ * recycled back into the RX ring in enetc_clean_tx_ring.
+ */
+static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
+ struct enetc_bdr *rx_ring,
+ int rx_ring_first, int rx_ring_last)
+{
+ int n = 0;
+
+ for (; rx_ring_first != rx_ring_last;
+ n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+ struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
+
+ /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
+ tx_swbd->dma = rx_swbd->dma;
+ tx_swbd->dir = rx_swbd->dir;
+ tx_swbd->page = rx_swbd->page;
+ tx_swbd->page_offset = rx_swbd->page_offset;
+ tx_swbd->len = rx_swbd->len;
+ tx_swbd->is_dma_page = true;
+ tx_swbd->is_xdp_tx = true;
+ tx_swbd->is_eof = false;
+ }
+
+ /* We rely on caller providing an rx_ring_last > rx_ring_first */
+ xdp_tx_arr[n - 1].is_eof = true;
+
+ return n;
+}
+
+static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_put_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_drops++;
+}
+
+static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit,
+ struct bpf_prog *prog)
+{
+ int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
+ struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ struct enetc_bdr *tx_ring;
+ int cleaned_cnt, i;
+ u32 xdp_act;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd, *orig_rxbd;
+ int orig_i, orig_cleaned_cnt;
+ struct xdp_buff xdp_buff;
+ struct sk_buff *skb;
+ u32 bd_status;
+ int err;
+
+ rxbd = enetc_rxbd(rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ orig_rxbd = rxbd;
+ orig_cleaned_cnt = cleaned_cnt;
+ orig_i = i;
+
+ enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, &xdp_buff);
+
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
+
+ xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
+
+ switch (xdp_act) {
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ break;
+ case XDP_PASS:
+ rxbd = orig_rxbd;
+ cleaned_cnt = orig_cleaned_cnt;
+ i = orig_i;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
+ &i, &cleaned_cnt,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (unlikely(!skb))
+ goto out;
+
+ napi_gro_receive(napi, skb);
+ break;
+ case XDP_TX:
+ tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
+ rx_ring,
+ orig_i, i);
+
+ if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ } else {
+ tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
+ xdp_tx_frm_cnt++;
+ /* The XDP_TX enqueue was successful, so we
+ * need to scrub the RX software BDs because
+ * the ownership of the buffers no longer
+ * belongs to the RX ring, and we must prevent
+ * enetc_refill_rx_ring() from reusing
+ * rx_swbd->page.
+ */
+ while (orig_i != i) {
+ rx_ring->rx_swbd[orig_i].page = NULL;
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+ }
+ break;
+ case XDP_REDIRECT:
+ /* xdp_return_frame does not support S/G in the sense
+ * that it leaks the fragments (__xdp_return should not
+ * call page_frag_free only for the initial buffer).
+ * Until XDP_REDIRECT gains support for S/G let's keep
+ * the code structure in place, but dead. We drop the
+ * S/G frames ourselves to avoid memory leaks which
+ * would otherwise leave the kernel OOM.
+ */
+ if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_sg++;
+ break;
+ }
+
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ if (unlikely(err)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_failures++;
+ } else {
+ while (orig_i != i) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[orig_i]);
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+ xdp_redirect_frm_cnt++;
+ rx_ring->stats.xdp_redirect++;
+ }
+ }
+
+ rx_frm_cnt++;
+ }
+
+out:
+ rx_ring->next_to_clean = i;
+
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
+
+ if (xdp_redirect_frm_cnt)
+ xdp_do_flush_map();
+
+ if (xdp_tx_frm_cnt)
+ enetc_update_tx_ring_tail(tx_ring);
+
+ if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
+ rx_ring->xdp.xdp_tx_in_flight);
+
+ return rx_frm_cnt;
+}
+
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+ struct enetc_int_vector
+ *v = container_of(napi, struct enetc_int_vector, napi);
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ struct bpf_prog *prog;
+ bool complete = true;
+ int work_done;
+ int i;
+
+ enetc_lock_mdio();
+
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
+
+ prog = rx_ring->xdp.prog;
+ if (prog)
+ work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
+ else
+ work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
+ if (work_done == budget)
+ complete = false;
+ if (work_done)
+ v->rx_napi_work = true;
+
+ if (!complete) {
+ enetc_unlock_mdio();
+ return budget;
+ }
+
+ napi_complete_done(napi, work_done);
+
+ if (likely(v->rx_dim_en))
+ enetc_rx_net_dim(v);
+
+ v->rx_napi_work = false;
+
+ /* enable interrupts */
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
+
+ return work_done;
+}
+
+/* Probing and Init */
+#define ENETC_MAX_RFS_SIZE 64
+void enetc_get_si_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ /* find out how many of various resources we have to work with */
+ val = enetc_rd(hw, ENETC_SICAPR0);
+ si->num_rx_rings = (val >> 16) & 0xff;
+ si->num_tx_rings = val & 0xff;
+
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+
+ si->num_rss = 0;
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RSS) {
+ u32 rss;
+
+ rss = enetc_rd(hw, ENETC_SIRSSCAPR);
+ si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
+ }
+
+ if (val & ENETC_SIPCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_SIPCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
+}
+
+static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+{
+ r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
+ &r->bd_dma_base, GFP_KERNEL);
+ if (!r->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(r->bd_dma_base, 128)) {
+ dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
+ r->bd_dma_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+{
+ int err;
+
+ txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
+ if (!txr->tx_swbd)
+ return -ENOMEM;
+
+ err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ if (err)
+ goto err_alloc_bdr;
+
+ txr->tso_headers = dma_alloc_coherent(txr->dev,
+ txr->bd_count * TSO_HEADER_SIZE,
+ &txr->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txr->tso_headers) {
+ err = -ENOMEM;
+ goto err_alloc_tso;
+ }
+
+ txr->next_to_clean = 0;
+ txr->next_to_use = 0;
+
+ return 0;
+
+err_alloc_tso:
+ dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
+ txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+err_alloc_bdr:
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+
+ return err;
+}
+
+static void enetc_free_txbdr(struct enetc_bdr *txr)
+{
+ int size, i;
+
+ for (i = 0; i < txr->bd_count; i++)
+ enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
+
+ size = txr->bd_count * sizeof(union enetc_tx_bd);
+
+ dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
+ txr->tso_headers, txr->tso_headers_dma);
+ txr->tso_headers = NULL;
+
+ dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+}
+
+static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+{
+ int i, err;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ err = enetc_alloc_txbdr(priv->tx_ring[i]);
+
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ enetc_free_txbdr(priv->tx_ring[i]);
+
+ return err;
+}
+
+static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_free_txbdr(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
+{
+ size_t size = sizeof(union enetc_rx_bd);
+ int err;
+
+ rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
+ if (!rxr->rx_swbd)
+ return -ENOMEM;
+
+ if (extended)
+ size *= 2;
+
+ err = enetc_dma_alloc_bdr(rxr, size);
+ if (err) {
+ vfree(rxr->rx_swbd);
+ return err;
+ }
+
+ rxr->next_to_clean = 0;
+ rxr->next_to_use = 0;
+ rxr->next_to_alloc = 0;
+ rxr->ext_en = extended;
+
+ return 0;
+}
+
+static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+{
+ int size;
+
+ size = rxr->bd_count * sizeof(union enetc_rx_bd);
+
+ dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
+ rxr->bd_base = NULL;
+
+ vfree(rxr->rx_swbd);
+ rxr->rx_swbd = NULL;
+}
+
+static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+{
+ bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+ int i, err;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
+
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ enetc_free_rxbdr(priv->rx_ring[i]);
+
+ return err;
+}
+
+static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_free_rxbdr(priv->rx_ring[i]);
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
+
+ if (!tx_ring->tx_swbd)
+ return;
+
+ for (i = 0; i < tx_ring->bd_count; i++) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ }
+
+ tx_ring->next_to_clean = 0;
+ tx_ring->next_to_use = 0;
+}
+
+static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
+{
+ int i;
+
+ if (!rx_ring->rx_swbd)
+ return;
+
+ for (i = 0; i < rx_ring->bd_count; i++) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+ if (!rx_swbd->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ __free_page(rx_swbd->page);
+ rx_swbd->page = NULL;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+}
+
+static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_free_rx_ring(priv->rx_ring[i]);
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_free_tx_ring(priv->tx_ring[i]);
+}
+
+static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
+{
+ int *rss_table;
+ int i;
+
+ rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ /* Set up RSS table defaults */
+ for (i = 0; i < si->num_rss; i++)
+ rss_table[i] = i % num_groups;
+
+ enetc_set_rss_table(si, rss_table, si->num_rss);
+
+ kfree(rss_table);
+
+ return 0;
+}
+
+int enetc_configure_si(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ int err;
+
+ /* set SI cache attributes */
+ enetc_wr(hw, ENETC_SICAR0,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+ enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+ /* enable SI */
+ enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+
+ if (si->num_rss) {
+ err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ int cpus = num_online_cpus();
+
+ priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
+ priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
+
+ /* Enable all available TX rings in order to configure as many
+ * priorities as possible, when needed.
+ * TODO: Make # of TX rings run-time configurable
+ */
+ priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
+ priv->num_tx_rings = si->num_tx_rings;
+ priv->bdr_int_num = cpus;
+ priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
+ priv->tx_ictt = ENETC_TXIC_TIMETHR;
+}
+
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+
+ priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
+ GFP_KERNEL);
+ if (!priv->cls_rules)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void enetc_free_si_resources(struct enetc_ndev_priv *priv)
+{
+ kfree(priv->cls_rules);
+}
+
+static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
+ lower_32_bits(tx_ring->bd_dma_base));
+
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
+ upper_32_bits(tx_ring->bd_dma_base));
+
+ WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
+ enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
+ ENETC_RTBLENR_LEN(tx_ring->bd_count));
+
+ /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
+ tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
+ tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
+
+ /* enable Tx ints by setting pkt thr to 1 */
+ enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
+
+ tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
+ if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tbmr |= ENETC_TBMR_VIH;
+
+ /* enable ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+
+ tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
+ tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
+ tx_ring->idr = hw->reg + ENETC_SITXIDR;
+}
+
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
+ lower_32_bits(rx_ring->bd_dma_base));
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
+ upper_32_bits(rx_ring->bd_dma_base));
+
+ WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
+ ENETC_RTBLENR_LEN(rx_ring->bd_count));
+
+ if (rx_ring->xdp.prog)
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
+ else
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+
+ /* Also prepare the consumer index in case page allocation never
+ * succeeds. In that case, hardware will never advance producer index
+ * to match consumer index, and will drop all frames.
+ */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
+
+ /* enable Rx ints by setting pkt thr to 1 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
+
+ rbmr = ENETC_RBMR_EN;
+
+ if (rx_ring->ext_en)
+ rbmr |= ENETC_RBMR_BDS;
+
+ if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rbmr |= ENETC_RBMR_VTE;
+
+ rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
+ rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+
+ enetc_lock_mdio();
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+ enetc_unlock_mdio();
+
+ /* enable ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_setup_rxbdr(hw, priv->rx_ring[i]);
+}
+
+static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+
+ /* disable EN bit on ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
+}
+
+static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
+
+ /* disable EN bit on ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+
+ /* wait for busy to clear */
+ while (delay < timeout &&
+ enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
+ msleep(delay);
+ delay *= 2;
+ }
+
+ if (delay >= timeout)
+ netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
+ idx);
+}
+
+static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_clear_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_clear_rxbdr(hw, priv->rx_ring[i]);
+
+ udelay(1);
+}
+
+static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
+ int i, j, err;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+ struct enetc_int_vector *v = priv->int_vector[i];
+ int entry = ENETC_BDR_INT_BASE_IDX + i;
+
+ snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
+ priv->ndev->name, i);
+ err = request_irq(irq, enetc_msix, 0, v->name, v);
+ if (err) {
+ dev_err(priv->dev, "request_irq() failed!\n");
+ goto irq_err;
+ }
+ disable_irq(irq);
+
+ v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
+ v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+ v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
+
+ enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ int idx = v->tx_ring[j].index;
+
+ enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
+ }
+ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
+ }
+
+ return 0;
+
+irq_err:
+ while (i--) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, priv->int_vector[i]);
+ }
+
+ return err;
+}
+
+static void enetc_free_irqs(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ int i;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, priv->int_vector[i]);
+ }
+}
+
+static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 icpt, ictt;
+ int i;
+
+ /* enable Tx & Rx event indication */
+ if (priv->ic_mode &
+ (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
+ icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
+ /* init to non-0 minimum, will be adjusted later */
+ ictt = 0x1;
+ } else {
+ icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
+ ictt = 0;
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
+ enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
+ }
+
+ if (priv->ic_mode & ENETC_IC_TX_MANUAL)
+ icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
+ else
+ icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
+ enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
+ }
+}
+
+static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
+}
+
+static int enetc_phylink_connect(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct ethtool_eee edata;
+ int err;
+
+ if (!priv->phylink)
+ return 0; /* phy-less mode */
+
+ err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
+ if (err) {
+ dev_err(&ndev->dev, "could not attach to PHY\n");
+ return err;
+ }
+
+ /* disable EEE autoneg, until ENETC driver supports it */
+ memset(&edata, 0, sizeof(struct ethtool_eee));
+ phylink_ethtool_set_eee(priv->phylink, &edata);
+
+ return 0;
+}
+
+static void enetc_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct enetc_ndev_priv *priv;
+ struct sk_buff *skb;
+
+ priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
+
+ netif_tx_lock_bh(priv->ndev);
+
+ clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (skb)
+ enetc_start_xmit(skb, priv->ndev);
+
+ netif_tx_unlock_bh(priv->ndev);
+}
+
+static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+{
+ INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
+ skb_queue_head_init(&priv->tx_skbs);
+}
+
+void enetc_start(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ enetc_setup_interrupts(priv);
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(priv->si->pdev,
+ ENETC_BDR_INT_BASE_IDX + i);
+
+ napi_enable(&priv->int_vector[i]->napi);
+ enable_irq(irq);
+ }
+
+ if (priv->phylink)
+ phylink_start(priv->phylink);
+ else
+ netif_carrier_on(ndev);
+
+ netif_tx_start_all_queues(ndev);
+}
+
+int enetc_open(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int num_stack_tx_queues;
+ int err;
+
+ err = enetc_setup_irqs(priv);
+ if (err)
+ return err;
+
+ err = enetc_phylink_connect(ndev);
+ if (err)
+ goto err_phy_connect;
+
+ err = enetc_alloc_tx_resources(priv);
+ if (err)
+ goto err_alloc_tx;
+
+ err = enetc_alloc_rx_resources(priv);
+ if (err)
+ goto err_alloc_rx;
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
+ if (err)
+ goto err_set_queues;
+
+ enetc_tx_onestep_tstamp_init(priv);
+ enetc_setup_bdrs(priv);
+ enetc_start(ndev);
+
+ return 0;
+
+err_set_queues:
+ enetc_free_rx_resources(priv);
+err_alloc_rx:
+ enetc_free_tx_resources(priv);
+err_alloc_tx:
+ if (priv->phylink)
+ phylink_disconnect_phy(priv->phylink);
+err_phy_connect:
+ enetc_free_irqs(priv);
+
+ return err;
+}
+
+void enetc_stop(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ netif_tx_stop_all_queues(ndev);
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(priv->si->pdev,
+ ENETC_BDR_INT_BASE_IDX + i);
+
+ disable_irq(irq);
+ napi_synchronize(&priv->int_vector[i]->napi);
+ napi_disable(&priv->int_vector[i]->napi);
+ }
+
+ if (priv->phylink)
+ phylink_stop(priv->phylink);
+ else
+ netif_carrier_off(ndev);
+
+ enetc_clear_interrupts(priv);
+}
+
+int enetc_close(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ enetc_stop(ndev);
+ enetc_clear_bdrs(priv);
+
+ if (priv->phylink)
+ phylink_disconnect_phy(priv->phylink);
+ enetc_free_rxtx_rings(priv);
+ enetc_free_rx_resources(priv);
+ enetc_free_tx_resources(priv);
+ enetc_free_irqs(priv);
+
+ return 0;
+}
+
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_bdr *tx_ring;
+ int num_stack_tx_queues;
+ u8 num_tc;
+ int i;
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
+
+ if (!num_tc) {
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ return 0;
+ }
+
+ /* Check if we have enough BD rings available to accommodate all TCs */
+ if (num_tc > num_stack_tx_queues) {
+ netdev_err(ndev, "Max %d traffic classes supported\n",
+ priv->num_tx_rings);
+ return -EINVAL;
+ }
+
+ /* For the moment, we use only one BD ring per TC.
+ *
+ * Configure num_tc BD rings with increasing priorities.
+ */
+ for (i = 0; i < num_tc; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = i;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ /* Reset the number of netdev queues based on the TC count */
+ netif_set_real_num_tx_queues(ndev, num_tc);
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ /* Each TC is associated with one netdev queue */
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ return 0;
+}
+
+static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ bool is_up;
+ int i;
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ is_up = netif_running(dev);
+ if (is_up)
+ dev_close(dev);
+
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+
+ rx_ring->xdp.prog = prog;
+
+ if (prog)
+ rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
+ else
+ rx_ring->buffer_offset = ENETC_RXB_PAD;
+ }
+
+ if (is_up)
+ return dev_open(dev, extack);
+
+ return 0;
+}
+
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct net_device_stats *enetc_get_stats(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned long packets = 0, bytes = 0;
+ unsigned long tx_dropped = 0;
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ packets += priv->rx_ring[i]->stats.packets;
+ bytes += priv->rx_ring[i]->stats.bytes;
+ }
+
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ bytes = 0;
+ packets = 0;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ packets += priv->tx_ring[i]->stats.packets;
+ bytes += priv->tx_ring[i]->stats.bytes;
+ tx_dropped += priv->tx_ring[i]->stats.win_drop;
+ }
+
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = tx_dropped;
+
+ return stats;
+}
+
+static int enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+
+ return 0;
+}
+
+static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_bdr_enable_rxvlan(hw, i, en);
+}
+
+static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_bdr_enable_txvlan(hw, i, en);
+}
+
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_RXHASH)
+ enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ enetc_enable_rxvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+}
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+ int ao;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ ao = priv->active_offloads;
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ break;
+ default:
+ priv->active_offloads |= ENETC_F_RX_TSTAMP;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ if (netif_running(ndev) && ao != priv->active_offloads) {
+ enetc_close(ndev);
+ enetc_open(ndev);
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+
+ if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+
+ config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif
+
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (cmd == SIOCSHWTSTAMP)
+ return enetc_hwtstamp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return enetc_hwtstamp_get(ndev, rq);
+#endif
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_mii_ioctl(priv->phylink, rq, cmd);
+}
+
+int enetc_alloc_msix(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ int first_xdp_tx_ring;
+ int i, n, err, nvec;
+ int v_tx_rings;
+
+ nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
+ /* allocate MSIX for both messaging and Rx/Tx interrupts */
+ n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+
+ if (n < 0)
+ return n;
+
+ if (n != nvec)
+ return -EPERM;
+
+ /* # of tx rings per int vector */
+ v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j;
+
+ v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+ if (!v) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv->int_vector[i] = v;
+
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ if (err) {
+ kfree(v);
+ goto fail;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ kfree(v);
+ goto fail;
+ }
+
+ /* init defaults for adaptive IC */
+ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+ v->rx_ictt = 0x1;
+ v->rx_dim_en = true;
+ }
+ INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ idx = priv->bdr_int_num * j + i;
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+ }
+
+ first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
+ priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
+
+ return 0;
+
+fail:
+ while (i--) {
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+ kfree(v);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ return err;
+}
+
+void enetc_free_msix(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ priv->rx_ring[i] = NULL;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ priv->tx_ring[i] = NULL;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ kfree(priv->int_vector[i]);
+ priv->int_vector[i] = NULL;
+ }
+
+ /* disable all MSIX for this device */
+ pci_free_irq_vectors(priv->si->pdev);
+}
+
+static void enetc_kfree_si(struct enetc_si *si)
+{
+ char *p = (char *)si - si->pad;
+
+ kfree(p);
+}
+
+static void enetc_detect_errata(struct enetc_si *si)
+{
+ if (si->pdev->revision == ENETC_REV1)
+ si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
+}
+
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
+{
+ struct enetc_si *si, *p;
+ struct enetc_hw *hw;
+ size_t alloc_size;
+ int err, len;
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+ goto err_pci_mem_reg;
+ }
+
+ pci_set_master(pdev);
+
+ alloc_size = sizeof(struct enetc_si);
+ if (sizeof_priv) {
+ /* align priv to 32B */
+ alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
+ alloc_size += sizeof_priv;
+ }
+ /* force 32B alignment for enetc_si */
+ alloc_size += ENETC_SI_ALIGN - 1;
+
+ p = kzalloc(alloc_size, GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto err_alloc_si;
+ }
+
+ si = PTR_ALIGN(p, ENETC_SI_ALIGN);
+ si->pad = (char *)si - (char *)p;
+
+ pci_set_drvdata(pdev, si);
+ si->pdev = pdev;
+ hw = &si->hw;
+
+ len = pci_resource_len(pdev, ENETC_BAR_REGS);
+ hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+ if (!hw->reg) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "ioremap() failed\n");
+ goto err_ioremap;
+ }
+ if (len > ENETC_PORT_BASE)
+ hw->port = hw->reg + ENETC_PORT_BASE;
+ if (len > ENETC_GLOBAL_BASE)
+ hw->global = hw->reg + ENETC_GLOBAL_BASE;
+
+ enetc_detect_errata(si);
+
+ return 0;
+
+err_ioremap:
+ enetc_kfree_si(si);
+err_alloc_si:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+void enetc_pci_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_hw *hw = &si->hw;
+
+ iounmap(hw->reg);
+ enetc_kfree_si(si);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
new file mode 100644
index 000000000..c6d8cc15c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -0,0 +1,554 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/phylink.h>
+#include <linux/dim.h>
+
+#include "enetc_hw.h"
+
+#define ENETC_MAC_MAXFRM_SIZE 9600
+#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
+ (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+
+#define ENETC_CBD_DATA_MEM_ALIGN 64
+
+struct enetc_tx_swbd {
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdp_frame;
+ };
+ dma_addr_t dma;
+ struct page *page; /* valid only if is_xdp_tx */
+ u16 page_offset; /* valid only if is_xdp_tx */
+ u16 len;
+ enum dma_data_direction dir;
+ u8 is_dma_page:1;
+ u8 check_wb:1;
+ u8 do_twostep_tstamp:1;
+ u8 is_eof:1;
+ u8 is_xdp_tx:1;
+ u8 is_xdp_redirect:1;
+ u8 qbv_en:1;
+};
+
+#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
+#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
+#define ENETC_RXB_DMA_SIZE \
+ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+#define ENETC_RXB_DMA_SIZE_XDP \
+ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
+
+struct enetc_rx_swbd {
+ dma_addr_t dma;
+ struct page *page;
+ u16 page_offset;
+ enum dma_data_direction dir;
+ u16 len;
+};
+
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS 13
+#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
+struct enetc_ring_stats {
+ unsigned int packets;
+ unsigned int bytes;
+ unsigned int rx_alloc_errs;
+ unsigned int xdp_drops;
+ unsigned int xdp_tx;
+ unsigned int xdp_tx_drops;
+ unsigned int xdp_redirect;
+ unsigned int xdp_redirect_failures;
+ unsigned int xdp_redirect_sg;
+ unsigned int recycles;
+ unsigned int recycle_failures;
+ unsigned int win_drop;
+};
+
+struct enetc_xdp_data {
+ struct xdp_rxq_info rxq;
+ struct bpf_prog *prog;
+ int xdp_tx_in_flight;
+};
+
+#define ENETC_RX_RING_DEFAULT_SIZE 2048
+#define ENETC_TX_RING_DEFAULT_SIZE 2048
+#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
+
+struct enetc_bdr {
+ struct device *dev; /* for DMA mapping */
+ struct net_device *ndev;
+ void *bd_base; /* points to Rx or Tx BD ring */
+ union {
+ void __iomem *tpir;
+ void __iomem *rcir;
+ };
+ u16 index;
+ u16 prio;
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ union {
+ void __iomem *tcir; /* Tx */
+ int next_to_alloc; /* Rx */
+ };
+ void __iomem *idr; /* Interrupt Detect Register pointer */
+
+ int buffer_offset;
+ struct enetc_xdp_data xdp;
+
+ struct enetc_ring_stats stats;
+
+ dma_addr_t bd_dma_base;
+ u8 tsd_enable; /* Time specific departure */
+ bool ext_en; /* enable h/w descriptor extensions */
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+} ____cacheline_aligned_in_smp;
+
+static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
+{
+ if (unlikely(++*i == bdr->bd_count))
+ *i = 0;
+}
+
+static inline int enetc_bd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_use)
+ return bdr->next_to_clean - bdr->next_to_use - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
+}
+
+static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_alloc)
+ return bdr->next_to_clean - bdr->next_to_alloc - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
+}
+
+/* Control BD ring */
+#define ENETC_CBDR_DEFAULT_SIZE 64
+struct enetc_cbdr {
+ void *bd_base; /* points to Rx or Tx BD ring */
+ void __iomem *pir;
+ void __iomem *cir;
+ void __iomem *mr; /* mode register */
+
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+
+ dma_addr_t bd_dma_base;
+ struct device *dma_dev;
+};
+
+#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
+
+static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
+{
+ int hw_idx = i;
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ hw_idx = 2 * i;
+#endif
+ return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
+}
+
+static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd **old_rxbd, int *old_index)
+{
+ union enetc_rx_bd *new_rxbd = *old_rxbd;
+ int new_index = *old_index;
+
+ new_rxbd++;
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ new_rxbd++;
+#endif
+
+ if (unlikely(++new_index == rx_ring->bd_count)) {
+ new_rxbd = rx_ring->bd_base;
+ new_index = 0;
+ }
+
+ *old_rxbd = new_rxbd;
+ *old_index = new_index;
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
+{
+ return ++rxbd;
+}
+
+struct enetc_msg_swbd {
+ void *vaddr;
+ dma_addr_t dma;
+ int size;
+};
+
+#define ENETC_REV1 0x1
+enum enetc_errata {
+ ENETC_ERR_VLAN_ISOL = BIT(0),
+ ENETC_ERR_UCMCSWP = BIT(1),
+};
+
+#define ENETC_SI_F_QBV BIT(0)
+#define ENETC_SI_F_PSFP BIT(1)
+
+/* PCI IEP device data */
+struct enetc_si {
+ struct pci_dev *pdev;
+ struct enetc_hw hw;
+ enum enetc_errata errata;
+
+ struct net_device *ndev; /* back ref. */
+
+ struct enetc_cbdr cbd_ring;
+
+ int num_rx_rings; /* how many rings are available in the SI */
+ int num_tx_rings;
+ int num_fs_entries;
+ int num_rss; /* number of RSS buckets */
+ unsigned short pad;
+ int hw_features;
+};
+
+#define ENETC_SI_ALIGN 32
+
+static inline void *enetc_si_priv(const struct enetc_si *si)
+{
+ return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
+}
+
+static inline bool enetc_si_is_pf(struct enetc_si *si)
+{
+ return !!(si->hw.port);
+}
+
+static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
+{
+ switch (pf_pdev->devfn) {
+ case 0:
+ return 0;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 6:
+ return 3;
+ default:
+ return -1;
+ }
+}
+
+#define ENETC_MAX_NUM_TXQS 8
+#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
+
+struct enetc_int_vector {
+ void __iomem *rbier;
+ void __iomem *tbier_base;
+ void __iomem *ricr1;
+ unsigned long tx_rings_map;
+ int count_tx_rings;
+ u32 rx_ictt;
+ u16 comp_cnt;
+ bool rx_dim_en, rx_napi_work;
+ struct napi_struct napi ____cacheline_aligned_in_smp;
+ struct dim rx_dim ____cacheline_aligned_in_smp;
+ char name[ENETC_INT_NAME_MAX];
+
+ struct enetc_bdr rx_ring;
+ struct enetc_bdr tx_ring[];
+} ____cacheline_aligned_in_smp;
+
+struct enetc_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ int used;
+};
+
+#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+struct psfp_cap {
+ u32 max_streamid;
+ u32 max_psfp_filter;
+ u32 max_psfp_gate;
+ u32 max_psfp_gatelist;
+ u32 max_psfp_meter;
+};
+
+#define ENETC_F_TX_TSTAMP_MASK 0xff
+/* TODO: more hardware offloads */
+enum enetc_active_offloads {
+ /* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
+ ENETC_F_TX_TSTAMP = BIT(0),
+ ENETC_F_TX_ONESTEP_SYNC_TSTAMP = BIT(1),
+
+ ENETC_F_RX_TSTAMP = BIT(8),
+ ENETC_F_QBV = BIT(9),
+ ENETC_F_QCI = BIT(10),
+};
+
+enum enetc_flags_bit {
+ ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
+};
+
+/* interrupt coalescing modes */
+enum enetc_ic_mode {
+ /* one interrupt per frame */
+ ENETC_IC_NONE = 0,
+ /* activated when int coalescing time is set to a non-0 value */
+ ENETC_IC_RX_MANUAL = BIT(0),
+ ENETC_IC_TX_MANUAL = BIT(1),
+ /* use dynamic interrupt moderation */
+ ENETC_IC_RX_ADAPTIVE = BIT(2),
+};
+
+#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
+#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
+#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600)
+
+struct enetc_ndev_priv {
+ struct net_device *ndev;
+ struct device *dev; /* dma-mapping device */
+ struct enetc_si *si;
+
+ int bdr_int_num; /* number of Rx/Tx ring interrupts */
+ struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
+ u16 num_rx_rings, num_tx_rings;
+ u16 rx_bd_count, tx_bd_count;
+
+ u16 msg_enable;
+ enum enetc_active_offloads active_offloads;
+
+ u32 speed; /* store speed for compare update pspeed */
+
+ struct enetc_bdr **xdp_tx_ring;
+ struct enetc_bdr *tx_ring[16];
+ struct enetc_bdr *rx_ring[16];
+
+ struct enetc_cls_rule *cls_rules;
+
+ struct psfp_cap psfp_cap;
+
+ struct phylink *phylink;
+ int ic_mode;
+ u32 tx_ictt;
+
+ struct bpf_prog *xdp_prog;
+
+ unsigned long flags;
+
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
+};
+
+/* Messaging */
+
+/* VF-PF set primary MAC address message format */
+struct enetc_msg_cmd_set_primary_mac {
+ struct enetc_msg_cmd_header header;
+ struct sockaddr mac;
+};
+
+#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i]))
+
+#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
+
+/* PTP driver exports */
+extern int enetc_phc_index;
+
+/* SI common */
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
+void enetc_pci_remove(struct pci_dev *pdev);
+int enetc_alloc_msix(struct enetc_ndev_priv *priv);
+void enetc_free_msix(struct enetc_ndev_priv *priv);
+void enetc_get_si_caps(struct enetc_si *si);
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
+void enetc_free_si_resources(struct enetc_ndev_priv *priv);
+int enetc_configure_si(struct enetc_ndev_priv *priv);
+
+int enetc_open(struct net_device *ndev);
+int enetc_close(struct net_device *ndev);
+void enetc_start(struct net_device *ndev);
+void enetc_stop(struct net_device *ndev);
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
+
+/* ethtool */
+void enetc_set_ethtool_ops(struct net_device *ndev);
+
+/* control buffer descriptor ring (CBDR) */
+int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
+ struct enetc_cbdr *cbdr);
+void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ char *mac_addr, int si_map);
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+ int index);
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+
+static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
+ struct enetc_cbd *cbd,
+ int size, dma_addr_t *dma,
+ void **data_align)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ dma_addr_t dma_align;
+ void *data;
+
+ data = dma_alloc_coherent(ring->dma_dev,
+ size + ENETC_CBD_DATA_MEM_ALIGN,
+ dma, GFP_KERNEL);
+ if (!data) {
+ dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
+ return NULL;
+ }
+
+ dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
+ *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
+
+ cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+ cbd->length = cpu_to_le16(size);
+
+ return data;
+}
+
+static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
+ void *data, dma_addr_t *dma)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+
+ dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
+ data, *dma);
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
+#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
+void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
+int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
+int enetc_psfp_init(struct enetc_ndev_priv *priv);
+int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
+
+static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
+ priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
+ /* Port stream filter capability */
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
+ priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
+ /* Port stream gate capability */
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
+ priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
+ priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
+ /* Port flow meter capability */
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
+ priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
+}
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ enetc_get_max_cap(priv);
+
+ err = enetc_psfp_init(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
+ ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
+ ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
+
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ err = enetc_psfp_clean(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
+ ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
+ ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
+
+ memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
+
+ return 0;
+}
+
+#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
+#define enetc_sched_speed_set(priv, speed) (void)0
+#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_block_cb NULL
+
+#define enetc_get_max_cap(p) \
+ memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
new file mode 100644
index 000000000..af68dc46a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
+ struct enetc_cbdr *cbdr)
+{
+ int size = bd_count * sizeof(struct enetc_cbd);
+
+ cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+ GFP_KERNEL);
+ if (!cbdr->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+ dma_free_coherent(dev, size, cbdr->bd_base,
+ cbdr->bd_dma_base);
+ return -EINVAL;
+ }
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+ cbdr->dma_dev = dev;
+ cbdr->bd_count = bd_count;
+
+ cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+ cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+ cbdr->mr = hw->reg + ENETC_SICBDRMR;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+ enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
+ enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
+ /* enable ring */
+ enetc_wr_reg(cbdr->mr, BIT(31));
+
+ return 0;
+}
+
+void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ /* disable ring */
+ enetc_wr_reg(cbdr->mr, 0);
+
+ dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
+ cbdr->bd_dma_base);
+ cbdr->bd_base = NULL;
+ cbdr->dma_dev = NULL;
+}
+
+static void enetc_clean_cbdr(struct enetc_cbdr *ring)
+{
+ struct enetc_cbd *dest_cbd;
+ int i, status;
+
+ i = ring->next_to_clean;
+
+ while (enetc_rd_reg(ring->cir) != i) {
+ dest_cbd = ENETC_CBD(*ring, i);
+ status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
+ if (status)
+ dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
+ status, dest_cbd->cmd);
+
+ memset(dest_cbd, 0, sizeof(*dest_cbd));
+
+ i = (i + 1) % ring->bd_count;
+ }
+
+ ring->next_to_clean = i;
+}
+
+static int enetc_cbd_unused(struct enetc_cbdr *r)
+{
+ return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
+ r->bd_count;
+}
+
+int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ int timeout = ENETC_CBDR_TIMEOUT;
+ struct enetc_cbd *dest_cbd;
+ int i;
+
+ if (unlikely(!ring->bd_base))
+ return -EIO;
+
+ if (unlikely(!enetc_cbd_unused(ring)))
+ enetc_clean_cbdr(ring);
+
+ i = ring->next_to_use;
+ dest_cbd = ENETC_CBD(*ring, i);
+
+ /* copy command to the ring */
+ *dest_cbd = *cbd;
+ i = (i + 1) % ring->bd_count;
+
+ ring->next_to_use = i;
+ /* let H/W know BD ring has been updated */
+ enetc_wr_reg(ring->pir, i);
+
+ do {
+ if (enetc_rd_reg(ring->cir) == i)
+ break;
+ udelay(10); /* cannot sleep, rtnl_lock() */
+ timeout -= 10;
+ } while (timeout);
+
+ if (!timeout)
+ return -EBUSY;
+
+ /* CBD may writeback data, feedback up level */
+ *cbd = *dest_cbd;
+
+ enetc_clean_cbdr(ring);
+
+ return 0;
+}
+
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
+{
+ struct enetc_cbd cbd;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.cls = 1;
+ cbd.status_flags = ENETC_CBD_FLAGS_SF;
+ cbd.index = cpu_to_le16(index);
+
+ return enetc_send_cmd(si, &cbd);
+}
+
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ char *mac_addr, int si_map)
+{
+ struct enetc_cbd cbd;
+ u32 upper;
+ u16 lower;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ /* fill up the "set" descriptor */
+ cbd.cls = 1;
+ cbd.status_flags = ENETC_CBD_FLAGS_SF;
+ cbd.index = cpu_to_le16(index);
+ cbd.opt[3] = cpu_to_le32(si_map);
+ /* enable entry */
+ cbd.opt[0] = cpu_to_le32(BIT(31));
+
+ upper = *(const u32 *)mac_addr;
+ lower = *(const u16 *)(mac_addr + 4);
+ cbd.addr[0] = cpu_to_le32(upper);
+ cbd.addr[1] = cpu_to_le32(lower);
+
+ return enetc_send_cmd(si, &cbd);
+}
+
+/* Set entry in RFS table */
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+ int index)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ struct enetc_cbd cbd = {.cmd = 0};
+ void *tmp, *tmp_align;
+ dma_addr_t dma;
+ int err;
+
+ /* fill up the "set" descriptor */
+ cbd.cmd = 0;
+ cbd.cls = 4;
+ cbd.index = cpu_to_le16(index);
+ cbd.opt[3] = cpu_to_le32(0); /* SI */
+
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
+ &dma, &tmp_align);
+ if (!tmp)
+ return -ENOMEM;
+
+ memcpy(tmp_align, rfse, sizeof(*rfse));
+
+ err = enetc_send_cmd(si, &cbd);
+ if (err)
+ dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
+
+ enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
+
+ return err;
+}
+
+static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
+ bool read)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ struct enetc_cbd cbd = {.cmd = 0};
+ u8 *tmp, *tmp_align;
+ dma_addr_t dma;
+ int err, i;
+
+ if (count < ENETC_CBD_DATA_MEM_ALIGN)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
+ &dma, (void *)&tmp_align);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (!read)
+ for (i = 0; i < count; i++)
+ tmp_align[i] = (u8)(table[i]);
+
+ /* fill up the descriptor */
+ cbd.cmd = read ? 2 : 1;
+ cbd.cls = 3;
+
+ err = enetc_send_cmd(si, &cbd);
+ if (err)
+ dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
+
+ if (read)
+ for (i = 0; i < count; i++)
+ table[i] = tmp_align[i];
+
+ enetc_cbd_free_data_mem(si, count, tmp, &dma);
+
+ return err;
+}
+
+/* Get RSS table */
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return enetc_cmd_rss_table(si, table, count, true);
+}
+
+/* Set RSS table */
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return enetc_cmd_rss_table(si, (u32 *)table, count, false);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
new file mode 100644
index 000000000..c8369e375
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/net_tstamp.h>
+#include <linux/module.h>
+#include "enetc.h"
+
+static const u32 enetc_si_regs[] = {
+ ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR,
+ ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR,
+ ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1,
+ ENETC_SIUEFDCR
+};
+
+static const u32 enetc_txbdr_regs[] = {
+ ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
+ ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0,
+ ENETC_TBICR1
+};
+
+static const u32 enetc_rxbdr_regs[] = {
+ ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
+ ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0,
+ ENETC_RBICR1
+};
+
+static const u32 enetc_port_regs[] = {
+ ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0),
+ ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1,
+ ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0),
+ ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
+};
+
+static int enetc_get_reglen(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int len;
+
+ len = ARRAY_SIZE(enetc_si_regs);
+ len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings;
+ len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings;
+
+ if (hw->port)
+ len += ARRAY_SIZE(enetc_port_regs);
+
+ len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
+
+ return len;
+}
+
+static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 *buf = (u32 *)regbuf;
+ int i, j;
+ u32 addr;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) {
+ *buf++ = enetc_si_regs[i];
+ *buf++ = enetc_rd(hw, enetc_si_regs[i]);
+ }
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) {
+ addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]);
+
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) {
+ addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]);
+
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
+
+ if (!hw->port)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) {
+ addr = ENETC_PORT_BASE + enetc_port_regs[i];
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+}
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN];
+} enetc_si_counters[] = {
+ { ENETC_SIROCT, "SI rx octets" },
+ { ENETC_SIRFRM, "SI rx frames" },
+ { ENETC_SIRUCA, "SI rx u-cast frames" },
+ { ENETC_SIRMCA, "SI rx m-cast frames" },
+ { ENETC_SITOCT, "SI tx octets" },
+ { ENETC_SITFRM, "SI tx frames" },
+ { ENETC_SITUCA, "SI tx u-cast frames" },
+ { ENETC_SITMCA, "SI tx m-cast frames" },
+ { ENETC_RBDCR(0), "Rx ring 0 discarded frames" },
+ { ENETC_RBDCR(1), "Rx ring 1 discarded frames" },
+ { ENETC_RBDCR(2), "Rx ring 2 discarded frames" },
+ { ENETC_RBDCR(3), "Rx ring 3 discarded frames" },
+ { ENETC_RBDCR(4), "Rx ring 4 discarded frames" },
+ { ENETC_RBDCR(5), "Rx ring 5 discarded frames" },
+ { ENETC_RBDCR(6), "Rx ring 6 discarded frames" },
+ { ENETC_RBDCR(7), "Rx ring 7 discarded frames" },
+ { ENETC_RBDCR(8), "Rx ring 8 discarded frames" },
+ { ENETC_RBDCR(9), "Rx ring 9 discarded frames" },
+ { ENETC_RBDCR(10), "Rx ring 10 discarded frames" },
+ { ENETC_RBDCR(11), "Rx ring 11 discarded frames" },
+ { ENETC_RBDCR(12), "Rx ring 12 discarded frames" },
+ { ENETC_RBDCR(13), "Rx ring 13 discarded frames" },
+ { ENETC_RBDCR(14), "Rx ring 14 discarded frames" },
+ { ENETC_RBDCR(15), "Rx ring 15 discarded frames" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN];
+} enetc_port_counters[] = {
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+};
+
+static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
+ "Rx ring %2d frames",
+ "Rx ring %2d alloc errors",
+ "Rx ring %2d XDP drops",
+ "Rx ring %2d recycles",
+ "Rx ring %2d recycle failures",
+ "Rx ring %2d redirects",
+ "Rx ring %2d redirect failures",
+ "Rx ring %2d redirect S/G",
+};
+
+static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
+ "Tx ring %2d frames",
+ "Tx ring %2d XDP frames",
+ "Tx ring %2d XDP drops",
+ "Tx window drop %2d frames",
+};
+
+static int enetc_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int len;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ len = ARRAY_SIZE(enetc_si_counters) +
+ ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+ ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings;
+
+ if (!enetc_si_is_pf(priv->si))
+ return len;
+
+ len += ARRAY_SIZE(enetc_port_counters);
+
+ return len;
+}
+
+static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 *p = data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
+ strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
+ snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
+ i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
+ snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
+ i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ if (!enetc_si_is_pf(priv->si))
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
+ strscpy(p, enetc_port_counters[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void enetc_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int i, o = 0;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+ data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ data[o++] = priv->tx_ring[i]->stats.packets;
+ data[o++] = priv->tx_ring[i]->stats.xdp_tx;
+ data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
+ data[o++] = priv->tx_ring[i]->stats.win_drop;
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ data[o++] = priv->rx_ring[i]->stats.packets;
+ data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
+ data[o++] = priv->rx_ring[i]->stats.xdp_drops;
+ data[o++] = priv->rx_ring[i]->stats.recycles;
+ data[o++] = priv->rx_ring[i]->stats.recycle_failures;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
+ }
+
+ if (!enetc_si_is_pf(priv->si))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+ data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+}
+
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+
+ *ranges = enetc_rmon_ranges;
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_mac_stats(hw, 0, mac_stats);
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+}
+
+#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
+ RXH_IP_DST)
+#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+{
+ static const u32 rsshash[] = {
+ [TCP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [UDP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [SCTP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3,
+ [IPV4_FLOW] = ENETC_RSSHASH_L3,
+ [TCP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [UDP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [SCTP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3,
+ [IPV6_FLOW] = ENETC_RSSHASH_L3,
+ [ETHER_FLOW] = 0,
+ };
+
+ if (rxnfc->flow_type >= ARRAY_SIZE(rsshash))
+ return -EINVAL;
+
+ rxnfc->data = rsshash[rxnfc->flow_type];
+
+ return 0;
+}
+
+/* current HW spec does byte reversal on everything including MAC addresses */
+static void ether_addr_copy_swap(u8 *dst, const u8 *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[i] = src[ETH_ALEN - i - 1];
+}
+
+static int enetc_set_cls_entry(struct enetc_si *si,
+ struct ethtool_rx_flow_spec *fs, bool en)
+{
+ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
+ struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m;
+ struct ethhdr *eth_h, *eth_m;
+ struct enetc_cmd_rfse rfse = { {0} };
+
+ if (!en)
+ goto done;
+
+ switch (fs->flow_type & 0xff) {
+ case TCP_V4_FLOW:
+ l4ip4_h = &fs->h_u.tcp_ip4_spec;
+ l4ip4_m = &fs->m_u.tcp_ip4_spec;
+ goto l4ip4;
+ case UDP_V4_FLOW:
+ l4ip4_h = &fs->h_u.udp_ip4_spec;
+ l4ip4_m = &fs->m_u.udp_ip4_spec;
+ goto l4ip4;
+ case SCTP_V4_FLOW:
+ l4ip4_h = &fs->h_u.sctp_ip4_spec;
+ l4ip4_m = &fs->m_u.sctp_ip4_spec;
+l4ip4:
+ rfse.sip_h[0] = l4ip4_h->ip4src;
+ rfse.sip_m[0] = l4ip4_m->ip4src;
+ rfse.dip_h[0] = l4ip4_h->ip4dst;
+ rfse.dip_m[0] = l4ip4_m->ip4dst;
+ rfse.sport_h = ntohs(l4ip4_h->psrc);
+ rfse.sport_m = ntohs(l4ip4_m->psrc);
+ rfse.dport_h = ntohs(l4ip4_h->pdst);
+ rfse.dport_m = ntohs(l4ip4_m->pdst);
+ if (l4ip4_m->tos)
+ netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+ rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+ rfse.ethtype_m = 0xffff;
+ break;
+ case IP_USER_FLOW:
+ l3ip4_h = &fs->h_u.usr_ip4_spec;
+ l3ip4_m = &fs->m_u.usr_ip4_spec;
+
+ rfse.sip_h[0] = l3ip4_h->ip4src;
+ rfse.sip_m[0] = l3ip4_m->ip4src;
+ rfse.dip_h[0] = l3ip4_h->ip4dst;
+ rfse.dip_m[0] = l3ip4_m->ip4dst;
+ if (l3ip4_m->tos)
+ netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+ rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+ rfse.ethtype_m = 0xffff;
+ break;
+ case ETHER_FLOW:
+ eth_h = &fs->h_u.ether_spec;
+ eth_m = &fs->m_u.ether_spec;
+
+ ether_addr_copy_swap(rfse.smac_h, eth_h->h_source);
+ ether_addr_copy_swap(rfse.smac_m, eth_m->h_source);
+ ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest);
+ ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest);
+ rfse.ethtype_h = ntohs(eth_h->h_proto);
+ rfse.ethtype_m = ntohs(eth_m->h_proto);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rfse.mode |= ENETC_RFSE_EN;
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC) {
+ rfse.mode |= ENETC_RFSE_MODE_BD;
+ rfse.result = fs->ring_cookie;
+ }
+done:
+ return enetc_set_fs_entry(si, &rfse, fs->location);
+}
+
+static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i, j;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->num_rx_rings;
+ break;
+ case ETHTOOL_GRXFH:
+ /* get RSS hash config */
+ return enetc_get_rsshash(rxnfc);
+ case ETHTOOL_GRXCLSRLCNT:
+ /* total number of entries */
+ rxnfc->data = priv->si->num_fs_entries;
+ /* number of entries in use */
+ rxnfc->rule_cnt = 0;
+ for (i = 0; i < priv->si->num_fs_entries; i++)
+ if (priv->cls_rules[i].used)
+ rxnfc->rule_cnt++;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ /* get entry x */
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ /* total number of entries */
+ rxnfc->data = priv->si->num_fs_entries;
+ /* array of indexes of used entries */
+ j = 0;
+ for (i = 0; i < priv->si->num_fs_entries; i++) {
+ if (!priv->cls_rules[i].used)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ /* number of entries in use */
+ rxnfc->rule_cnt = j;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ if (rxnfc->fs.ring_cookie >= priv->num_rx_rings &&
+ rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true);
+ if (err)
+ return err;
+ priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs;
+ priv->cls_rules[rxnfc->fs.location].used = 1;
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false);
+ if (err)
+ return err;
+ priv->cls_rules[rxnfc->fs.location].used = 0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 enetc_get_rxfh_key_size(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ /* return the size of the RX flow hash key. PF only */
+ return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0;
+}
+
+static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ /* return the size of the RX flow hash indirection table */
+ return priv->si->num_rss;
+}
+
+static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int err = 0, i;
+
+ /* return hash function */
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ /* return hash key */
+ if (key && hw->port)
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+
+ /* return RSS table */
+ if (indir)
+ err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+
+ return err;
+}
+
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+{
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+}
+
+static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int err = 0;
+
+ /* set hash key, if PF */
+ if (key && hw->port)
+ enetc_set_rss_key(hw, key);
+
+ /* set RSS table */
+ if (indir)
+ err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+
+ return err;
+}
+
+static void enetc_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ ring->rx_pending = priv->rx_bd_count;
+ ring->tx_pending = priv->tx_bd_count;
+
+ /* do some h/w sanity checks for BDR length */
+ if (netif_running(ndev)) {
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR);
+
+ if (val != priv->rx_bd_count)
+ netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val);
+
+ val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR);
+
+ if (val != priv->tx_bd_count)
+ netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val);
+ }
+}
+
+static int enetc_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_int_vector *v = priv->int_vector[0];
+
+ ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt);
+ ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt);
+
+ ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR;
+ ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR;
+
+ ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE;
+
+ return 0;
+}
+
+static int enetc_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u32 rx_ictt, tx_ictt;
+ int i, ic_mode;
+ bool changed;
+
+ tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs);
+ rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs);
+
+ if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR)
+ return -EOPNOTSUPP;
+
+ if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR)
+ return -EOPNOTSUPP;
+
+ ic_mode = ENETC_IC_NONE;
+ if (ic->use_adaptive_rx_coalesce) {
+ ic_mode |= ENETC_IC_RX_ADAPTIVE;
+ rx_ictt = 0x1;
+ } else {
+ ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0;
+ }
+
+ ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0;
+
+ /* commit the settings */
+ changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt);
+
+ priv->ic_mode = ic_mode;
+ priv->tx_ictt = tx_ictt;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v = priv->int_vector[i];
+
+ v->rx_ictt = rx_ictt;
+ v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE);
+ }
+
+ if (netif_running(ndev) && changed) {
+ /* reconfigure the operation mode of h/w interrupts,
+ * traffic needs to be paused in the process
+ */
+ enetc_stop(ndev);
+ enetc_start(ndev);
+ }
+
+ return 0;
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ int *phc_idx;
+
+ phc_idx = symbol_get(enetc_phc_index);
+ if (phc_idx) {
+ info->phc_index = *phc_idx;
+ symbol_put(enetc_phc_index);
+ } else {
+ info->phc_index = -1;
+ }
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+#else
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+#endif
+ return 0;
+}
+
+static void enetc_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int enetc_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, wol->wolopts);
+
+ return ret;
+}
+
+static void enetc_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int enetc_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
+static int enetc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static int enetc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static const struct ethtool_ops enetc_pf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_regs_len = enetc_get_reglen,
+ .get_regs = enetc_get_regs,
+ .get_sset_count = enetc_get_sset_count,
+ .get_strings = enetc_get_strings,
+ .get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
+ .get_rxnfc = enetc_get_rxnfc,
+ .set_rxnfc = enetc_set_rxnfc,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
+};
+
+static const struct ethtool_ops enetc_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_regs_len = enetc_get_reglen,
+ .get_regs = enetc_get_regs,
+ .get_sset_count = enetc_get_sset_count,
+ .get_strings = enetc_get_strings,
+ .get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rxnfc = enetc_get_rxnfc,
+ .set_rxnfc = enetc_set_rxnfc,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
+};
+
+void enetc_set_ethtool_ops(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (enetc_si_is_pf(priv->si))
+ ndev->ethtool_ops = &enetc_pf_ethtool_ops;
+ else
+ ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
new file mode 100644
index 000000000..18ca1f42b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -0,0 +1,965 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/bitops.h>
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_PF 0xe100
+#define ENETC_DEV_ID_VF 0xef00
+#define ENETC_DEV_ID_PTP 0xee02
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS 0
+
+/** SI regs, offset: 0h */
+#define ENETC_SIMR 0
+#define ENETC_SIMR_EN BIT(31)
+#define ENETC_SIMR_RSSE BIT(0)
+#define ENETC_SICTR0 0x18
+#define ENETC_SICTR1 0x1c
+#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_PSFP BIT(9)
+#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR1 0x24
+#define ENETC_SITGTGR 0x30
+#define ENETC_SIRBGCR 0x38
+/* cache attribute registers for transactions initiated by ENETC */
+#define ENETC_SICAR0 0x40
+#define ENETC_SICAR1 0x44
+#define ENETC_SICAR2 0x48
+/* rd snoop, no alloc
+ * wr snoop, no alloc, partial cache line update for BDs and full cache line
+ * update for data
+ */
+#define ENETC_SICAR_RD_COHERENT 0x2b2b0000
+#define ENETC_SICAR_WR_COHERENT 0x00006727
+#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */
+
+#define ENETC_SIPMAR0 0x80
+#define ENETC_SIPMAR1 0x84
+
+/* VF-PF Message passing */
+#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
+/* msg size encoding: default and max msg value of 1024B encoded as 0 */
+static inline u32 enetc_vsi_set_msize(u32 size)
+{
+ return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0;
+}
+
+#define ENETC_PSIMSGRR 0x204
+#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8)
+
+#define ENETC_VSIMSGSR 0x204 /* RO */
+#define ENETC_VSIMSGSR_MB BIT(0)
+#define ENETC_VSIMSGSR_MS BIT(1)
+#define ENETC_VSIMSGSNDAR0 0x210
+#define ENETC_VSIMSGSNDAR1 0x214
+
+#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16)
+#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16)
+
+/* SI statistics */
+#define ENETC_SIROCT 0x300
+#define ENETC_SIRFRM 0x308
+#define ENETC_SIRUCA 0x310
+#define ENETC_SIRMCA 0x318
+#define ENETC_SITOCT 0x320
+#define ENETC_SITFRM 0x328
+#define ENETC_SITUCA 0x330
+#define ENETC_SITMCA 0x338
+#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200)
+
+/* Control BDR regs */
+#define ENETC_SICBDRMR 0x800
+#define ENETC_SICBDRSR 0x804 /* RO */
+#define ENETC_SICBDRBAR0 0x810
+#define ENETC_SICBDRBAR1 0x814
+#define ENETC_SICBDRPIR 0x818
+#define ENETC_SICBDRCIR 0x81c
+#define ENETC_SICBDRLENR 0x820
+
+#define ENETC_SICAPR0 0x900
+#define ENETC_SICAPR1 0x904
+
+#define ENETC_PSIIER 0xa00
+#define ENETC_PSIIER_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIIDR 0xa08
+#define ENETC_SITXIDR 0xa18
+#define ENETC_SIRXIDR 0xa28
+#define ENETC_SIMSIVR 0xa30
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SIUEFDCR 0xe28
+
+#define ENETC_SIRFSCAPR 0x1200
+#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f)
+#define ENETC_SIRSSCAPR 0x1600
+#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+
+/** SI BDR sub-blocks, n = 0..7 */
+enum enetc_bdr_type {TX, RX};
+#define ENETC_BDR_OFF(i) ((i) * 0x200)
+#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
+/* RX BDR reg offsets */
+#define ENETC_RBMR 0
+#define ENETC_RBMR_BDS BIT(2)
+#define ENETC_RBMR_CM BIT(4)
+#define ENETC_RBMR_VTE BIT(5)
+#define ENETC_RBMR_EN BIT(31)
+#define ENETC_RBSR 0x4
+#define ENETC_RBBSR 0x8
+#define ENETC_RBCIR 0xc
+#define ENETC_RBBAR0 0x10
+#define ENETC_RBBAR1 0x14
+#define ENETC_RBPIR 0x18
+#define ENETC_RBLENR 0x20
+#define ENETC_RBIER 0xa0
+#define ENETC_RBIER_RXTIE BIT(0)
+#define ENETC_RBIDR 0xa4
+#define ENETC_RBICR0 0xa8
+#define ENETC_RBICR0_ICEN BIT(31)
+#define ENETC_RBICR0_ICPT_MASK 0x1ff
+#define ENETC_RBICR0_SET_ICPT(n) ((n) & ENETC_RBICR0_ICPT_MASK)
+#define ENETC_RBICR1 0xac
+
+/* TX BDR reg offsets */
+#define ENETC_TBMR 0
+#define ENETC_TBSR_BUSY BIT(0)
+#define ENETC_TBMR_VIH BIT(9)
+#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
+#define ENETC_TBMR_SET_PRIO(val) ((val) & ENETC_TBMR_PRIO_MASK)
+#define ENETC_TBMR_EN BIT(31)
+#define ENETC_TBSR 0x4
+#define ENETC_TBBAR0 0x10
+#define ENETC_TBBAR1 0x14
+#define ENETC_TBPIR 0x18
+#define ENETC_TBCIR 0x1c
+#define ENETC_TBCIR_IDX_MASK 0xffff
+#define ENETC_TBLENR 0x20
+#define ENETC_TBIER 0xa0
+#define ENETC_TBIER_TXTIE BIT(0)
+#define ENETC_TBIDR 0xa4
+#define ENETC_TBICR0 0xa8
+#define ENETC_TBICR0_ICEN BIT(31)
+#define ENETC_TBICR0_ICPT_MASK 0xf
+#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK)
+#define ENETC_TBICR1 0xac
+
+#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE 0x10000
+#define ENETC_PMR 0x0000
+#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8)
+#define ENETC_PMR_PSPEED_10M 0
+#define ENETC_PMR_PSPEED_100M BIT(8)
+#define ENETC_PMR_PSPEED_1000M BIT(9)
+#define ENETC_PMR_PSPEED_2500M BIT(10)
+#define ENETC_PSR 0x0004 /* RO */
+#define ENETC_PSIPMR 0x0018
+#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16)
+#define ENETC_PSIPVMR 0x001c
+#define ENETC_VLAN_PROMISC_MAP_ALL 0x7
+#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7)
+#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16)
+#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */
+#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8)
+#define ENETC_PVCLCTR 0x0208
+#define ENETC_PCVLANR1 0x0210
+#define ENETC_PCVLANR2 0x0214
+#define ENETC_VLAN_TYPE_C BIT(0)
+#define ENETC_VLAN_TYPE_S BIT(1)
+#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */
+#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
+#define ENETC_PSIVLAN_EN BIT(31)
+#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12)
+#define ENETC_PPAUONTR 0x0410
+#define ENETC_PPAUOFFTR 0x0414
+#define ENETC_PTXMBAR 0x0608
+#define ENETC_PCAPR0 0x0900
+#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
+#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR1 0x0904
+#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
+#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
+#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16)
+#define ENETC_PSICFGR0_VTE BIT(12)
+#define ENETC_PSICFGR0_SIVIE BIT(14)
+#define ENETC_PSICFGR0_ASE BIT(15)
+#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
+
+#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_CBSE BIT(31)
+#define ENETC_CBS_BW_MASK GENMASK(6, 0)
+#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
+#define ENETC_PSIVLANFMR 0x1700
+#define ENETC_PSIVLANFMR_VS BIT(0)
+#define ENETC_PRFSMR 0x1800
+#define ENETC_PRFSMR_RFSE BIT(31)
+#define ENETC_PRFSCAPR 0x1804
+#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16)
+#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
+#define ENETC_PFPMR 0x1900
+#define ENETC_PFPMR_PMACE BIT(1)
+#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_EMDIO_BASE 0x1c00
+#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
+#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
+#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
+#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10)
+#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
+#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
+#define ENETC_MMCSR 0x1f00
+#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+
+#define ENETC_PM0_CMD_CFG 0x8008
+#define ENETC_PM1_CMD_CFG 0x9008
+#define ENETC_PM0_TX_EN BIT(0)
+#define ENETC_PM0_RX_EN BIT(1)
+#define ENETC_PM0_PROMISC BIT(4)
+#define ENETC_PM0_PAUSE_IGN BIT(8)
+#define ENETC_PM0_CMD_XGLP BIT(10)
+#define ENETC_PM0_CMD_TXP BIT(11)
+#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
+#define ENETC_PM0_CMD_SFD BIT(21)
+#define ENETC_PM0_MAXFRM 0x8014
+#define ENETC_SET_TX_MTU(val) ((val) << 16)
+#define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
+#define ENETC_PM0_RX_FIFO 0x801c
+#define ENETC_PM0_RX_FIFO_VAL 1
+
+#define ENETC_PM_IMDIO_BASE 0x8030
+
+#define ENETC_PM0_PAUSE_QUANTA 0x8054
+#define ENETC_PM0_PAUSE_THRESH 0x8064
+#define ENETC_PM1_PAUSE_QUANTA 0x9054
+#define ENETC_PM1_PAUSE_THRESH 0x9064
+
+#define ENETC_PM0_SINGLE_STEP 0x80c0
+#define ENETC_PM1_SINGLE_STEP 0x90c0
+#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
+#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
+#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
+
+#define ENETC_PM0_IF_MODE 0x8300
+#define ENETC_PM0_IFM_RG BIT(2)
+#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
+#define ENETC_PM0_IFM_EN_AUTO BIT(15)
+#define ENETC_PM0_IFM_SSP_MASK GENMASK(14, 13)
+#define ENETC_PM0_IFM_SSP_1000 (2 << 13)
+#define ENETC_PM0_IFM_SSP_100 (0 << 13)
+#define ENETC_PM0_IFM_SSP_10 (1 << 13)
+#define ENETC_PM0_IFM_FULL_DPX BIT(12)
+#define ENETC_PM0_IFM_IFMODE_MASK GENMASK(1, 0)
+#define ENETC_PM0_IFM_IFMODE_XGMII 0
+#define ENETC_PM0_IFM_IFMODE_GMII 2
+#define ENETC_PSIDCAPR 0x1b08
+#define ENETC_PSIDCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSFCAPR 0x1b18
+#define ENETC_PSFCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSGCAPR 0x1b28
+#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16)
+#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
+#define ENETC_PFMCAPR 0x1b38
+#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
+
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
+
+/* Port counters */
+#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
+#define ENETC_PBFDSIR 0x0810
+#define ENETC_PFDMSAPR 0x0814
+#define ENETC_UFDMF 0x1680
+#define ENETC_MFDMF 0x1684
+#define ENETC_PUFDVFR 0x1780
+#define ENETC_PMFDVFR 0x1784
+#define ENETC_PBFDVFR 0x1788
+
+/** Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE 0x20000
+#define ENETC_G_EIPBRR0 0x0bf8
+#define ENETC_G_EIPBRR1 0x0bfc
+#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
+#define ENETC_G_EPFBLPR1_XGMII 0x80000000
+
+/* PCI device info */
+struct enetc_hw {
+ /* SI registers, used by all PCI functions */
+ void __iomem *reg;
+ /* Port registers, PF only */
+ void __iomem *port;
+ /* IP global registers, PF only */
+ void __iomem *global;
+};
+
+/* ENETC register accessors */
+
+/* MDIO issue workaround (on LS1028A) -
+ * Due to a hardware issue, an access to MDIO registers
+ * that is concurrent with other ENETC register accesses
+ * may lead to the MDIO access being dropped or corrupted.
+ * To protect the MDIO accesses a readers-writers locking
+ * scheme is used, where the MDIO register accesses are
+ * protected by write locks to insure exclusivity, while
+ * the remaining ENETC registers are accessed under read
+ * locks since they only compete with MDIO accesses.
+ */
+extern rwlock_t enetc_mdio_lock;
+
+/* use this locking primitive only on the fast datapath to
+ * group together multiple non-MDIO register accesses to
+ * minimize the overhead of the lock
+ */
+static inline void enetc_lock_mdio(void)
+{
+ read_lock(&enetc_mdio_lock);
+}
+
+static inline void enetc_unlock_mdio(void)
+{
+ read_unlock(&enetc_mdio_lock);
+}
+
+/* use these accessors only on the fast datapath under
+ * the enetc_lock_mdio() locking primitive to minimize
+ * the overhead of the lock
+ */
+static inline u32 enetc_rd_reg_hot(void __iomem *reg)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ return ioread32(reg);
+}
+
+static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
+{
+ lockdep_assert_held(&enetc_mdio_lock);
+
+ iowrite32(val, reg);
+}
+
+/* internal helpers for the MDIO w/a */
+static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
+{
+ u32 val;
+
+ enetc_lock_mdio();
+ val = ioread32(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
+{
+ enetc_lock_mdio();
+ iowrite32(val, reg);
+ enetc_unlock_mdio();
+}
+
+static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+
+ return val;
+}
+
+static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+}
+
+#ifdef ioread64
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
+{
+ return ioread64(reg);
+}
+#else
+/* using this to read out stats on 32b systems */
+static inline u64 _enetc_rd_reg64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32(reg + 4);
+ low = ioread32(reg);
+ tmp = ioread32(reg + 4);
+ } while (high != tmp);
+
+ return le64_to_cpu((__le64)high << 32 | low);
+}
+#endif
+
+static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
+{
+ u64 val;
+
+ enetc_lock_mdio();
+ val = _enetc_rd_reg64(reg);
+ enetc_unlock_mdio();
+
+ return val;
+}
+
+/* general register accessors */
+#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg))
+#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val))
+#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
+#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
+#define enetc_rd_hot(hw, off) enetc_rd_reg_hot((hw)->reg + (off))
+#define enetc_wr_hot(hw, off, val) enetc_wr_reg_hot((hw)->reg + (off), val)
+#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off))
+/* port register accessors - PF only */
+#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
+#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
+#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
+ (hw)->port + (off), val)
+/* global register accessors - PF only */
+#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
+#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
+/* BDR register accessors, see ENETC_BDR() */
+#define enetc_bdr_rd(hw, t, n, off) \
+ enetc_rd(hw, ENETC_BDR(t, n, off))
+#define enetc_bdr_wr(hw, t, n, off, val) \
+ enetc_wr(hw, ENETC_BDR(t, n, off), val)
+#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
+#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
+#define enetc_txbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, TX, n, off, val)
+#define enetc_rxbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, RX, n, off, val)
+
+/* Buffer Descriptors (BD) */
+union enetc_tx_bd {
+ struct {
+ __le64 addr;
+ __le16 buf_len;
+ __le16 frm_len;
+ union {
+ struct {
+ u8 reserved[3];
+ u8 flags;
+ }; /* default layout */
+ __le32 txstart;
+ __le32 lstatus;
+ };
+ };
+ struct {
+ __le32 tstamp;
+ __le16 tpid;
+ __le16 vid;
+ u8 reserved[6];
+ u8 e_flags;
+ u8 flags;
+ } ext; /* Tx BD extension */
+ struct {
+ __le32 tstamp;
+ u8 reserved[10];
+ u8 status;
+ u8 flags;
+ } wb; /* writeback descriptor */
+};
+
+enum enetc_txbd_flags {
+ ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_W = BIT(2),
+ ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_TXSTART = BIT(4),
+ ENETC_TXBD_FLAGS_EX = BIT(6),
+ ENETC_TXBD_FLAGS_F = BIT(7)
+};
+#define ENETC_TXBD_STATS_WIN BIT(7)
+#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
+#define ENETC_TXBD_FLAGS_OFFSET 24
+
+static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
+{
+ u32 temp;
+
+ temp = (tx_start >> 5 & ENETC_TXBD_TXSTART_MASK) |
+ (flags << ENETC_TXBD_FLAGS_OFFSET);
+
+ return cpu_to_le32(temp);
+}
+
+static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
+{
+ memset(txbd, 0, sizeof(*txbd));
+}
+
+/* Extension flags */
+#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
+#define ENETC_TXBD_E_FLAGS_ONE_STEP_PTP BIT(1)
+#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
+
+union enetc_rx_bd {
+ struct {
+ __le64 addr;
+ u8 reserved[8];
+ } w;
+ struct {
+ __le16 inet_csum;
+ __le16 parse_summary;
+ __le32 rss_hash;
+ __le16 buf_len;
+ __le16 vlan_opt;
+ union {
+ struct {
+ __le16 flags;
+ __le16 error;
+ };
+ __le32 lstatus;
+ };
+ } r;
+ struct {
+ __le32 tstamp;
+ u8 reserved[12];
+ } ext;
+};
+
+#define ENETC_RXBD_LSTATUS_R BIT(30)
+#define ENETC_RXBD_LSTATUS_F BIT(31)
+#define ENETC_RXBD_ERR_MASK 0xff
+#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16)
+#define ENETC_RXBD_FLAG_VLAN BIT(9)
+#define ENETC_RXBD_FLAG_TSTMP BIT(10)
+#define ENETC_RXBD_FLAG_TPID GENMASK(1, 0)
+
+#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */
+#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
+#define ENETC_MAX_NUM_VFS 2
+
+#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
+#define ENETC_CBD_STATUS_MASK 0xf
+
+struct enetc_cmd_rfse {
+ u8 smac_h[6];
+ u8 smac_m[6];
+ u8 dmac_h[6];
+ u8 dmac_m[6];
+ __be32 sip_h[4];
+ __be32 sip_m[4];
+ __be32 dip_h[4];
+ __be32 dip_m[4];
+ u16 ethtype_h;
+ u16 ethtype_m;
+ u16 ethtype4_h;
+ u16 ethtype4_m;
+ u16 sport_h;
+ u16 sport_m;
+ u16 dport_h;
+ u16 dport_m;
+ u16 vlan_h;
+ u16 vlan_m;
+ u8 proto_h;
+ u8 proto_m;
+ u16 flags;
+ u16 result;
+ u16 mode;
+};
+
+#define ENETC_RFSE_EN BIT(15)
+#define ENETC_RFSE_MODE_BD 2
+
+static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw,
+ struct net_device *ndev)
+{
+ u8 addr[ETH_ALEN] __aligned(4);
+
+ *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
+ *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+ eth_hw_addr_set(ndev, addr);
+}
+
+#define ENETC_SI_INT_IDX 0
+/* base index for Rx/Tx interrupts */
+#define ENETC_BDR_INT_BASE_IDX 1
+
+/* Messaging */
+
+/* Command completion status */
+enum enetc_msg_cmd_status {
+ ENETC_MSG_CMD_STATUS_OK,
+ ENETC_MSG_CMD_STATUS_FAIL
+};
+
+/* VSI-PSI command message types */
+enum enetc_msg_cmd_type {
+ ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */
+ ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */
+ ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */
+};
+
+/* VSI-PSI command action types */
+enum enetc_msg_cmd_action_type {
+ ENETC_MSG_CMD_MNG_ADD = 1,
+ ENETC_MSG_CMD_MNG_REMOVE
+};
+
+/* PSI-VSI command header format */
+struct enetc_msg_cmd_header {
+ u16 type; /* command class type */
+ u16 id; /* denotes the specific required action */
+};
+
+/* Common H/W utility functions */
+
+static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
+ bool en)
+{
+ u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+
+ val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
+}
+
+static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
+ bool en)
+{
+ u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
+
+ val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
+}
+
+static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
+ int prio)
+{
+ u32 val = enetc_txbdr_rd(hw, bdr_idx, ENETC_TBMR);
+
+ val &= ~ENETC_TBMR_PRIO_MASK;
+ val |= ENETC_TBMR_SET_PRIO(prio);
+ enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
+}
+
+enum bdcr_cmd_class {
+ BDCR_CMD_UNSPEC = 0,
+ BDCR_CMD_MAC_FILTER,
+ BDCR_CMD_VLAN_FILTER,
+ BDCR_CMD_RSS,
+ BDCR_CMD_RFS,
+ BDCR_CMD_PORT_GCL,
+ BDCR_CMD_RECV_CLASSIFIER,
+ BDCR_CMD_STREAM_IDENTIFY,
+ BDCR_CMD_STREAM_FILTER,
+ BDCR_CMD_STREAM_GCL,
+ BDCR_CMD_FLOW_METER,
+ __BDCR_CMD_MAX_LEN,
+ BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
+};
+
+/* class 5, command 0 */
+struct tgs_gcl_conf {
+ u8 atc; /* init gate value */
+ u8 res[7];
+ struct {
+ u8 res1[4];
+ __le16 acl_len;
+ u8 res2[2];
+ };
+};
+
+/* gate control list entry */
+struct gce {
+ __le32 period;
+ u8 gate;
+ u8 res[3];
+};
+
+/* tgs_gcl_conf address point to this data space */
+struct tgs_gcl_data {
+ __le32 btl;
+ __le32 bth;
+ __le32 ct;
+ __le32 cte;
+ struct gce entry[];
+};
+
+/* class 7, command 0, Stream Identity Entry Configuration */
+struct streamid_conf {
+ __le32 stream_handle; /* init gate value */
+ __le32 iports;
+ u8 id_type;
+ u8 oui[3];
+ u8 res[3];
+ u8 en;
+};
+
+#define ENETC_CBDR_SID_VID_MASK 0xfff
+#define ENETC_CBDR_SID_VIDM BIT(12)
+#define ENETC_CBDR_SID_TG_MASK 0xc000
+/* streamid_conf address point to this data space */
+struct streamid_data {
+ union {
+ u8 dmac[6];
+ u8 smac[6];
+ };
+ u16 vid_vidm_tg;
+};
+
+#define ENETC_CBDR_SFI_PRI_MASK 0x7
+#define ENETC_CBDR_SFI_PRIM BIT(3)
+#define ENETC_CBDR_SFI_BLOV BIT(4)
+#define ENETC_CBDR_SFI_BLEN BIT(5)
+#define ENETC_CBDR_SFI_MSDUEN BIT(6)
+#define ENETC_CBDR_SFI_FMITEN BIT(7)
+#define ENETC_CBDR_SFI_ENABLE BIT(7)
+/* class 8, command 0, Stream Filter Instance, Short Format */
+struct sfi_conf {
+ __le32 stream_handle;
+ u8 multi;
+ u8 res[2];
+ u8 sthm;
+ /* Max Service Data Unit or Flow Meter Instance Table index.
+ * Depending on the value of FLT this represents either Max
+ * Service Data Unit (max frame size) allowed by the filter
+ * entry or is an index into the Flow Meter Instance table
+ * index identifying the policer which will be used to police
+ * it.
+ */
+ __le16 fm_inst_table_index;
+ __le16 msdu;
+ __le16 sg_inst_table_index;
+ u8 res1[2];
+ __le32 input_ports;
+ u8 res2[3];
+ u8 en;
+};
+
+/* class 8, command 2 stream Filter Instance status query short format
+ * command no need structure define
+ * Stream Filter Instance Query Statistics Response data
+ */
+struct sfi_counter_data {
+ u32 matchl;
+ u32 matchh;
+ u32 msdu_dropl;
+ u32 msdu_droph;
+ u32 stream_gate_dropl;
+ u32 stream_gate_droph;
+ u32 flow_meter_dropl;
+ u32 flow_meter_droph;
+};
+
+#define ENETC_CBDR_SGI_OIPV_MASK 0x7
+#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_CGTST BIT(6)
+#define ENETC_CBDR_SGI_OGTST BIT(7)
+#define ENETC_CBDR_SGI_CFG_CHG BIT(1)
+#define ENETC_CBDR_SGI_CFG_PND BIT(2)
+#define ENETC_CBDR_SGI_OEX BIT(4)
+#define ENETC_CBDR_SGI_OEXEN BIT(5)
+#define ENETC_CBDR_SGI_IRX BIT(6)
+#define ENETC_CBDR_SGI_IRXEN BIT(7)
+#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
+#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
+#define ENETC_CBDR_SGI_EN BIT(7)
+/* class 9, command 0, Stream Gate Instance Table, Short Format
+ * class 9, command 2, Stream Gate Instance Table entry query write back
+ * Short Format
+ */
+struct sgi_table {
+ u8 res[8];
+ u8 oipv;
+ u8 res0[2];
+ u8 ocgtst;
+ u8 res1[7];
+ u8 gset;
+ u8 oacl_len;
+ u8 res2[2];
+ u8 en;
+};
+
+#define ENETC_CBDR_SGI_AIPV_MASK 0x7
+#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_AGTST BIT(7)
+
+/* class 9, command 1, Stream Gate Control List, Long Format */
+struct sgcl_conf {
+ u8 aipv;
+ u8 res[2];
+ u8 agtst;
+ u8 res1[4];
+ union {
+ struct {
+ u8 res2[4];
+ u8 acl_len;
+ u8 res3[3];
+ };
+ u8 cct[8]; /* Config change time */
+ };
+};
+
+#define ENETC_CBDR_SGL_IOMEN BIT(0)
+#define ENETC_CBDR_SGL_IPVEN BIT(3)
+#define ENETC_CBDR_SGL_GTST BIT(4)
+#define ENETC_CBDR_SGL_IPV_MASK 0xe
+/* Stream Gate Control List Entry */
+struct sgce {
+ u32 interval;
+ u8 msdu[3];
+ u8 multi;
+};
+
+/* stream control list class 9 , cmd 1 data buffer */
+struct sgcl_data {
+ u32 btl;
+ u32 bth;
+ u32 ct;
+ u32 cte;
+ struct sgce sgcl[];
+};
+
+#define ENETC_CBDR_FMI_MR BIT(0)
+#define ENETC_CBDR_FMI_MREN BIT(1)
+#define ENETC_CBDR_FMI_DOY BIT(2)
+#define ENETC_CBDR_FMI_CM BIT(3)
+#define ENETC_CBDR_FMI_CF BIT(4)
+#define ENETC_CBDR_FMI_NDOR BIT(5)
+#define ENETC_CBDR_FMI_OALEN BIT(6)
+#define ENETC_CBDR_FMI_IRFPP_MASK GENMASK(4, 0)
+
+/* class 10: command 0/1, Flow Meter Instance Set, short Format */
+struct fmi_conf {
+ __le32 cir;
+ __le32 cbs;
+ __le32 eir;
+ __le32 ebs;
+ u8 conf;
+ u8 res1;
+ u8 ir_fpp;
+ u8 res2[4];
+ u8 en;
+};
+
+struct enetc_cbd {
+ union{
+ struct sfi_conf sfi_conf;
+ struct sgi_table sgi_table;
+ struct fmi_conf fmi_conf;
+ struct {
+ __le32 addr[2];
+ union {
+ __le32 opt[4];
+ struct tgs_gcl_conf gcl_conf;
+ struct streamid_conf sid_set;
+ struct sgcl_conf sgcl_conf;
+ };
+ }; /* Long format */
+ __le32 data[6];
+ };
+ __le16 index;
+ __le16 length;
+ u8 cmd;
+ u8 cls;
+ u8 _res;
+ u8 status_flags;
+};
+
+#define ENETC_CLK 400000000ULL
+static inline u32 enetc_cycles_to_usecs(u32 cycles)
+{
+ return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK);
+}
+
+static inline u32 enetc_usecs_to_cycles(u32 usecs)
+{
+ return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
+}
+
+/* port time gating control register */
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
+
+/* Port time gating capability register */
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
+
+/* Port time specific departure */
+#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
+#define ENETC_TSDE BIT(31)
+
+/* PSFP setting */
+#define ENETC_PPSFPMR 0x11b00
+#define ENETC_PPSFPMR_PSFPEN BIT(0)
+#define ENETC_PPSFPMR_VS BIT(1)
+#define ENETC_PPSFPMR_PVC BIT(2)
+#define ENETC_PPSFPMR_PVZC BIT(3)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
new file mode 100644
index 000000000..91f02c505
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2021 NXP
+ *
+ * The Integrated Endpoint Register Block (IERB) is configured by pre-boot
+ * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
+ * card. Upon FLR, values from the IERB are transferred to the ENETC PFs, and
+ * are read-only in the PF memory space.
+ *
+ * This driver fixes up the power-on reset values for the ENETC shared FIFO,
+ * such that the TX and RX allocations are sufficient for jumbo frames, and
+ * that intelligent FIFO dropping is enabled before the internal data
+ * structures are corrupted.
+ *
+ * Even though not all ports might be used on a given board, we are not
+ * concerned with partitioning the FIFO, because the default values configure
+ * no strict reservations, so the entire FIFO can be used by the RX of a single
+ * port, or the TX of a single port.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "enetc.h"
+#include "enetc_ierb.h"
+
+/* IERB registers */
+#define ENETC_IERB_TXMBAR(port) (((port) * 0x100) + 0x8080)
+#define ENETC_IERB_RXMBER(port) (((port) * 0x100) + 0x8090)
+#define ENETC_IERB_RXMBLR(port) (((port) * 0x100) + 0x8094)
+#define ENETC_IERB_RXBCR(port) (((port) * 0x100) + 0x80a0)
+#define ENETC_IERB_TXBCR(port) (((port) * 0x100) + 0x80a8)
+#define ENETC_IERB_FMBDTR 0xa000
+
+#define ENETC_RESERVED_FOR_ICM 1024
+
+struct enetc_ierb {
+ void __iomem *regs;
+};
+
+static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val)
+{
+ iowrite32(val, ierb->regs + offset);
+}
+
+int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev)
+{
+ struct enetc_ierb *ierb = platform_get_drvdata(pdev);
+ int port = enetc_pf_to_port(pf_pdev);
+ u16 tx_credit, rx_credit, tx_alloc;
+
+ if (port < 0)
+ return -ENODEV;
+
+ if (!ierb)
+ return -EPROBE_DEFER;
+
+ /* By default, it is recommended to set the Host Transfer Agent
+ * per port transmit byte credit to "1000 + max_frame_size/2".
+ * The power-on reset value (1800 bytes) is rounded up to the nearest
+ * 100 assuming a maximum frame size of 1536 bytes.
+ */
+ tx_credit = roundup(1000 + ENETC_MAC_MAXFRM_SIZE / 2, 100);
+
+ /* Internal memory allocated for transmit buffering is guaranteed but
+ * not reserved; i.e. if the total transmit allocation is not used,
+ * then the unused portion is not left idle, it can be used for receive
+ * buffering but it will be reclaimed, if required, from receive by
+ * intelligently dropping already stored receive frames in the internal
+ * memory to ensure that the transmit allocation is respected.
+ *
+ * PaTXMBAR must be set to a value larger than
+ * PaTXBCR + 2 * max_frame_size + 32
+ * if frame preemption is not enabled, or to
+ * 2 * PaTXBCR + 2 * p_max_frame_size (pMAC maximum frame size) +
+ * 2 * np_max_frame_size (eMAC maximum frame size) + 64
+ * if frame preemption is enabled.
+ */
+ tx_alloc = roundup(2 * tx_credit + 4 * ENETC_MAC_MAXFRM_SIZE + 64, 16);
+
+ /* Initial credits, in units of 8 bytes, to the Ingress Congestion
+ * Manager for the maximum amount of bytes the port is allocated for
+ * pending traffic.
+ * It is recommended to set the initial credits to 2 times the maximum
+ * frame size (2 frames of maximum size).
+ */
+ rx_credit = DIV_ROUND_UP(ENETC_MAC_MAXFRM_SIZE * 2, 8);
+
+ enetc_ierb_write(ierb, ENETC_IERB_TXBCR(port), tx_credit);
+ enetc_ierb_write(ierb, ENETC_IERB_TXMBAR(port), tx_alloc);
+ enetc_ierb_write(ierb, ENETC_IERB_RXBCR(port), rx_credit);
+
+ return 0;
+}
+EXPORT_SYMBOL(enetc_ierb_register_pf);
+
+static int enetc_ierb_probe(struct platform_device *pdev)
+{
+ struct enetc_ierb *ierb;
+ void __iomem *regs;
+
+ ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL);
+ if (!ierb)
+ return -ENOMEM;
+
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ierb->regs = regs;
+
+ /* Free buffer depletion threshold in bytes.
+ * This sets the minimum amount of free buffer memory that should be
+ * maintained in the datapath sub system, and when the amount of free
+ * buffer memory falls below this threshold, a depletion indication is
+ * asserted, which may trigger "intelligent drop" frame releases from
+ * the ingress queues in the ICM.
+ * It is recommended to set the free buffer depletion threshold to 1024
+ * bytes, since the ICM needs some FIFO memory for its own use.
+ */
+ enetc_ierb_write(ierb, ENETC_IERB_FMBDTR, ENETC_RESERVED_FOR_ICM);
+
+ platform_set_drvdata(pdev, ierb);
+
+ return 0;
+}
+
+static int enetc_ierb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id enetc_ierb_match[] = {
+ { .compatible = "fsl,ls1028a-enetc-ierb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, enetc_ierb_match);
+
+static struct platform_driver enetc_ierb_driver = {
+ .driver = {
+ .name = "fsl-enetc-ierb",
+ .of_match_table = enetc_ierb_match,
+ },
+ .probe = enetc_ierb_probe,
+ .remove = enetc_ierb_remove,
+};
+
+module_platform_driver(enetc_ierb_driver);
+
+MODULE_DESCRIPTION("NXP ENETC IERB");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
new file mode 100644
index 000000000..c2ce47c4b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2021 NXP */
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#if IS_ENABLED(CONFIG_FSL_ENETC_IERB)
+
+int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev);
+
+#else
+
+static inline int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
new file mode 100644
index 000000000..1c8f5cc6d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+
+#include "enetc_pf.h"
+
+#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
+#define ENETC_MDIO_CTL 0x4 /* MDIO control */
+#define ENETC_MDIO_DATA 0x8 /* MDIO data */
+#define ENETC_MDIO_ADDR 0xc /* MDIO address */
+
+#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
+#define MDIO_CFG_BSY BIT(0)
+#define MDIO_CFG_RD_ER BIT(1)
+#define MDIO_CFG_HOLD(x) (((x) << 2) & GENMASK(4, 2))
+#define MDIO_CFG_ENC45 BIT(6)
+ /* external MDIO only - driven on neg MDC edge */
+#define MDIO_CFG_NEG BIT(23)
+
+#define ENETC_EMDIO_CFG \
+ (MDIO_CFG_HOLD(2) | \
+ MDIO_CFG_CLKDIV(258) | \
+ MDIO_CFG_NEG)
+
+#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
+#define MDIO_CTL_READ BIT(15)
+
+static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
+
+static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv)
+{
+ return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY;
+}
+
+static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
+{
+ bool is_busy;
+
+ return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv,
+ is_busy, !is_busy, 10, 10 * 1000);
+}
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ u16 dev_addr;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_cfg |= MDIO_CFG_ENC45;
+ } else {
+ /* clause 22 (ie 1G) */
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+ }
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* set the register address */
+ if (regnum & MII_ADDR_C45) {
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+ }
+
+ /* write the value */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_write);
+
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ u16 dev_addr, value;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_cfg |= MDIO_CFG_ENC45;
+ } else {
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+ }
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and device addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* set the register address */
+ if (regnum & MII_ADDR_C45) {
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+ }
+
+ /* initiate the read */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* return all Fs if nothing was there */
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
+ }
+
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_read);
+
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+{
+ struct enetc_hw *hw;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ hw->port = port_regs;
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(enetc_hw_alloc);
+
+/* Lock for MDIO access errata on LS1028A */
+DEFINE_RWLOCK(enetc_mdio_lock);
+EXPORT_SYMBOL_GPL(enetc_mdio_lock);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
new file mode 100644
index 000000000..40d22ebe9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc_pf.h"
+
+static void enetc_msg_disable_mr_int(struct enetc_hw *hw)
+{
+ u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+ /* disable MR int source(s) */
+ enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK);
+}
+
+static void enetc_msg_enable_mr_int(struct enetc_hw *hw)
+{
+ u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+
+ enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK);
+}
+
+static irqreturn_t enetc_msg_psi_msix(int irq, void *data)
+{
+ struct enetc_si *si = (struct enetc_si *)data;
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ enetc_msg_disable_mr_int(&si->hw);
+ schedule_work(&pf->msg_task);
+
+ return IRQ_HANDLED;
+}
+
+static void enetc_msg_task(struct work_struct *work)
+{
+ struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task);
+ struct enetc_hw *hw = &pf->si->hw;
+ unsigned long mr_mask;
+ int i;
+
+ for (;;) {
+ mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK;
+ if (!mr_mask) {
+ /* re-arm MR interrupts, w1c the IDR reg */
+ enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK);
+ enetc_msg_enable_mr_int(hw);
+ return;
+ }
+
+ for (i = 0; i < pf->num_vfs; i++) {
+ u32 psimsgrr;
+ u16 msg_code;
+
+ if (!(ENETC_PSIMSGRR_MR(i) & mr_mask))
+ continue;
+
+ enetc_msg_handle_rxmsg(pf, i, &msg_code);
+
+ psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code);
+ psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */
+ enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr);
+ }
+ }
+}
+
+/* Init */
+static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct enetc_msg_swbd *msg;
+ u32 val;
+
+ msg = &pf->rxmsg[idx];
+ /* allocate and set receive buffer */
+ msg->size = ENETC_DEFAULT_MSG_SIZE;
+
+ msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma,
+ GFP_KERNEL);
+ if (!msg->vaddr) {
+ dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n",
+ msg->size);
+ return -ENOMEM;
+ }
+
+ /* set multiple of 32 bytes */
+ val = lower_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val);
+ val = upper_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val);
+
+ return 0;
+}
+
+static void enetc_msg_free_mbx(struct enetc_si *si, int idx)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ struct enetc_msg_swbd *msg;
+
+ msg = &pf->rxmsg[idx];
+ dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma);
+ memset(msg, 0, sizeof(*msg));
+
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0);
+}
+
+int enetc_msg_psi_init(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+ int vector, i, err;
+
+ /* register message passing interrupt handler */
+ snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg",
+ si->ndev->name);
+ vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX);
+ err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si);
+ if (err) {
+ dev_err(&si->pdev->dev,
+ "PSI messaging: request_irq() failed!\n");
+ return err;
+ }
+
+ /* set one IRQ entry for PSI message receive notification (SI int) */
+ enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX);
+
+ /* initialize PSI mailbox */
+ INIT_WORK(&pf->msg_task, enetc_msg_task);
+
+ for (i = 0; i < pf->num_vfs; i++) {
+ err = enetc_msg_alloc_mbx(si, i);
+ if (err)
+ goto err_init_mbx;
+ }
+
+ /* enable MR interrupts */
+ enetc_msg_enable_mr_int(&si->hw);
+
+ return 0;
+
+err_init_mbx:
+ for (i--; i >= 0; i--)
+ enetc_msg_free_mbx(si, i);
+
+ free_irq(vector, si);
+
+ return err;
+}
+
+void enetc_msg_psi_free(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+ int i;
+
+ cancel_work_sync(&pf->msg_task);
+
+ /* disable MR interrupts */
+ enetc_msg_disable_mr_int(&si->hw);
+
+ for (i = 0; i < pf->num_vfs; i++)
+ enetc_msg_free_mbx(si, i);
+
+ /* de-register message passing interrupt handler */
+ free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
new file mode 100644
index 000000000..dafb26f81
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_mdio.h>
+#include "enetc_pf.h"
+
+#define ENETC_MDIO_DEV_ID 0xee01
+#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
+#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
+#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+
+static int enetc_pci_mdio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = &pdev->dev;
+ void __iomem *port_regs;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int err;
+
+ port_regs = pci_iomap(pdev, 0, 0);
+ if (!port_regs) {
+ dev_err(dev, "iomap failed\n");
+ err = -ENXIO;
+ goto err_ioremap;
+ }
+
+ hw = enetc_hw_alloc(dev, port_regs);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ goto err_hw_alloc;
+ }
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus) {
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
+
+ bus->name = ENETC_MDIO_BUS_NAME;
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "device enable failed\n");
+ goto err_pci_enable;
+ }
+
+ err = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_region failed\n");
+ goto err_pci_mem_reg;
+ }
+
+ err = of_mdiobus_register(bus, dev->of_node);
+ if (err)
+ goto err_mdiobus_reg;
+
+ pci_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_mdiobus_reg:
+ pci_release_region(pdev, 0);
+err_pci_mem_reg:
+ pci_disable_device(pdev);
+err_pci_enable:
+err_mdiobus_alloc:
+err_hw_alloc:
+ iounmap(port_regs);
+err_ioremap:
+ return err;
+}
+
+static void enetc_pci_mdio_remove(struct pci_dev *pdev)
+{
+ struct mii_bus *bus = pci_get_drvdata(pdev);
+ struct enetc_mdio_priv *mdio_priv;
+
+ mdiobus_unregister(bus);
+ mdio_priv = bus->priv;
+ iounmap(mdio_priv->hw->port);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_pci_mdio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
+
+static struct pci_driver enetc_pci_mdio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pci_mdio_id_table,
+ .probe = enetc_pci_mdio_probe,
+ .remove = enetc_pci_mdio_remove,
+};
+module_pci_driver(enetc_pci_mdio_driver);
+
+MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
new file mode 100644
index 000000000..bdf94335e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -0,0 +1,1414 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <asm/unaligned.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pcs-lynx.h>
+#include "enetc_ierb.h"
+#include "enetc_pf.h"
+
+#define ENETC_DRV_NAME_STR "ENETC PF driver"
+
+static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
+{
+ u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
+ u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
+
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
+}
+
+static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u32 upper = get_unaligned_le32(addr);
+ u16 lower = get_unaligned_le16(addr + 4);
+
+ __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
+}
+
+static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+ enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+
+ return 0;
+}
+
+static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
+{
+ u32 val = enetc_port_rd(hw, ENETC_PSIPVMR);
+
+ val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL);
+ enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
+}
+
+static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+ pf->vlan_promisc_simap |= BIT(si_idx);
+ enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+ pf->vlan_promisc_simap &= ~BIT(si_idx);
+ enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
+{
+ u32 val = 0;
+
+ if (vlan)
+ val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan;
+
+ enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
+}
+
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+
+static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ /* add exact match addr */
+ ether_addr_copy(filter->mac_addr, addr);
+ filter->mac_addr_cnt++;
+}
+
+static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+
+static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
+{
+ bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+ if (type == UC) {
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0);
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0);
+ } else { /* MC */
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0);
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0);
+ }
+}
+
+static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
+ unsigned long hash)
+{
+ bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+ if (type == UC) {
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err),
+ lower_32_bits(hash));
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx),
+ upper_32_bits(hash));
+ } else { /* MC */
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err),
+ lower_32_bits(hash));
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx),
+ upper_32_bits(hash));
+ }
+}
+
+static void enetc_sync_mac_filters(struct enetc_pf *pf)
+{
+ struct enetc_mac_filter *f = pf->mac_filter;
+ struct enetc_si *si = pf->si;
+ int i, pos;
+
+ pos = EMETC_MAC_ADDR_FILT_RES;
+
+ for (i = 0; i < MADDR_TYPE; i++, f++) {
+ bool em = (f->mac_addr_cnt == 1) && (i == UC);
+ bool clear = !f->mac_addr_cnt;
+
+ if (clear) {
+ if (i == UC)
+ enetc_clear_mac_flt_entry(si, pos);
+
+ enetc_clear_mac_ht_flt(si, 0, i);
+ continue;
+ }
+
+ /* exact match filter */
+ if (em) {
+ int err;
+
+ enetc_clear_mac_ht_flt(si, 0, UC);
+
+ err = enetc_set_mac_flt_entry(si, pos, f->mac_addr,
+ BIT(0));
+ if (!err)
+ continue;
+
+ /* fallback to HT filtering */
+ dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n",
+ err);
+ }
+
+ /* hash table filter, clear EM filter for UC entries */
+ if (i == UC)
+ enetc_clear_mac_flt_entry(si, pos);
+
+ enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table);
+ }
+}
+
+static void enetc_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct enetc_hw *hw = &priv->si->hw;
+ bool uprom = false, mprom = false;
+ struct enetc_mac_filter *filter;
+ struct netdev_hw_addr *ha;
+ u32 psipmr = 0;
+ bool em;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* enable promisc mode for SI0 (PF) */
+ psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+ uprom = true;
+ mprom = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* enable multi cast promisc mode for SI0 (PF) */
+ psipmr = ENETC_PSIPMR_SET_MP(0);
+ mprom = true;
+ }
+
+ /* first 2 filter entries belong to PF */
+ if (!uprom) {
+ /* Update unicast filters */
+ filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(filter);
+
+ em = (netdev_uc_count(ndev) == 1);
+ netdev_for_each_uc_addr(ha, ndev) {
+ if (em) {
+ enetc_add_mac_addr_em_filter(filter, ha->addr);
+ break;
+ }
+
+ enetc_add_mac_addr_ht_filter(filter, ha->addr);
+ }
+ }
+
+ if (!mprom) {
+ /* Update multicast filters */
+ filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(filter);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (!is_multicast_ether_addr(ha->addr))
+ continue;
+
+ enetc_add_mac_addr_ht_filter(filter, ha->addr);
+ }
+ }
+
+ if (!uprom || !mprom)
+ /* update PF entries */
+ enetc_sync_mac_filters(pf);
+
+ psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) &
+ ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0));
+ enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
+}
+
+static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
+ unsigned long hash)
+{
+ enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
+}
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
+{
+ int i;
+
+ if (rehash) {
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+ }
+
+ enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
+}
+
+static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_sync_vlan_ht_filter(pf, false);
+
+ return 0;
+}
+
+static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ __clear_bit(vid, pf->active_vlans);
+ enetc_sync_vlan_ht_filter(pf, true);
+
+ return 0;
+}
+
+static void enetc_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ if (reg & ENETC_PM0_IFM_RG) {
+ /* RGMII mode */
+ reg = (reg & ~ENETC_PM0_IFM_RLP) |
+ (en ? ENETC_PM0_IFM_RLP : 0);
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ } else {
+ /* assume SGMII mode */
+ reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = (reg & ~ENETC_PM0_CMD_XGLP) |
+ (en ? ENETC_PM0_CMD_XGLP : 0);
+ reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
+ (en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ }
+}
+
+static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct enetc_vf_state *vf_state;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ vf_state = &pf->vf_state[vf];
+ vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC;
+ enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac);
+ return 0;
+}
+
+static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan,
+ u8 qos, __be16 proto)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (priv->si->errata & ENETC_ERR_VLAN_ISOL)
+ return -EOPNOTSUPP;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ /* only C-tags supported for now */
+ return -EPROTONOSUPPORT;
+
+ enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos);
+ return 0;
+}
+
+static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ u32 cfgr;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1));
+ cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0);
+ enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr);
+
+ return 0;
+}
+
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_hw *hw = &pf->si->hw;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
+
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
+ eth_random_addr(mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
+
+ return 0;
+}
+
+static int enetc_setup_mac_addresses(struct device_node *np,
+ struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void enetc_port_assign_rfs_entries(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int num_entries, vf_entries, i;
+ u32 val;
+
+ /* split RFS entries between functions */
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+ vf_entries = num_entries / (pf->total_vfs + 1);
+
+ for (i = 0; i < pf->total_vfs; i++)
+ enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries);
+ enetc_port_wr(hw, ENETC_PSIRFSCFGR(0),
+ num_entries - vf_entries * pf->total_vfs);
+
+ /* enable RFS on port */
+ enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
+}
+
+static void enetc_port_si_configure(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int num_rings, i;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+ num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
+
+ val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS);
+ val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS);
+
+ if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) {
+ val = ENETC_PSICFGR0_SET_TXBDR(num_rings);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+
+ dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n",
+ num_rings, ENETC_PF_NUM_RINGS);
+
+ num_rings = 0;
+ }
+
+ /* Add default one-time settings for SI0 (PF) */
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ enetc_port_wr(hw, ENETC_PSICFGR0(0), val);
+
+ if (num_rings)
+ num_rings -= ENETC_PF_NUM_RINGS;
+
+ /* Configure the SIs for each available VF */
+ val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ if (num_rings) {
+ num_rings /= pf->total_vfs;
+ val |= ENETC_PSICFGR0_SET_TXBDR(num_rings);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+ }
+
+ for (i = 0; i < pf->total_vfs; i++)
+ enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val);
+
+ /* Port level VLAN settings */
+ val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ enetc_port_wr(hw, ENETC_PVCLCTR, val);
+ /* use outer tag for VLAN filtering */
+ enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
+}
+
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; tc++)
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_reset_ptcmsdur(hw);
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+
+ /* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
+ * and may lead to RX lock-up under traffic. Set it to 1 instead,
+ * as recommended by the hardware team.
+ */
+ enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+}
+
+static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+{
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(phy_mode)) {
+ val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
+ val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ }
+
+ if (phy_mode == PHY_INTERFACE_MODE_USXGMII) {
+ val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII;
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ }
+}
+
+static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+
+ val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
+}
+
+static void enetc_configure_port_pmac(struct enetc_hw *hw)
+{
+ u32 temp;
+
+ /* Set pMAC step lock */
+ temp = enetc_port_rd(hw, ENETC_PFPMR);
+ enetc_port_wr(hw, ENETC_PFPMR,
+ temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
+
+ temp = enetc_port_rd(hw, ENETC_MMCSR);
+ enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+}
+
+static void enetc_configure_port(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc_configure_port_pmac(hw);
+
+ enetc_configure_port_mac(hw);
+
+ enetc_port_si_configure(pf->si);
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(hw, hash_key);
+
+ /* split up RFS entries */
+ enetc_port_assign_rfs_entries(pf->si);
+
+ /* enforce VLAN promisc mode for all SIs */
+ pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
+ enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
+
+ enetc_port_wr(hw, ENETC_PSIPMR, 0);
+
+ /* enable port */
+ enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN);
+}
+
+/* Messaging */
+static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf,
+ int vf_id)
+{
+ struct enetc_vf_state *vf_state = &pf->vf_state[vf_id];
+ struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+ struct enetc_msg_cmd_set_primary_mac *cmd;
+ struct device *dev = &pf->si->pdev->dev;
+ u16 cmd_id;
+ char *addr;
+
+ cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr;
+ cmd_id = cmd->header.id;
+ if (cmd_id != ENETC_MSG_CMD_MNG_ADD)
+ return ENETC_MSG_CMD_STATUS_FAIL;
+
+ addr = cmd->mac.sa_data;
+ if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC)
+ dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n",
+ vf_id);
+ else
+ enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr);
+
+ return ENETC_MSG_CMD_STATUS_OK;
+}
+
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status)
+{
+ struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_msg_cmd_header *cmd_hdr;
+ u16 cmd_type;
+
+ *status = ENETC_MSG_CMD_STATUS_OK;
+ cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr;
+ cmd_type = cmd_hdr->type;
+
+ switch (cmd_type) {
+ case ENETC_MSG_CMD_MNG_MAC:
+ *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id);
+ break;
+ default:
+ dev_err(dev, "command not supported (cmd_type: 0x%x)\n",
+ cmd_type);
+ }
+}
+
+#ifdef CONFIG_PCI_IOV
+static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ int err;
+
+ if (!num_vfs) {
+ enetc_msg_psi_free(pf);
+ kfree(pf->vf_state);
+ pf->num_vfs = 0;
+ pci_disable_sriov(pdev);
+ } else {
+ pf->num_vfs = num_vfs;
+
+ pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state) {
+ pf->num_vfs = 0;
+ return -ENOMEM;
+ }
+
+ err = enetc_msg_psi_init(pf);
+ if (err) {
+ dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
+ goto err_msg_psi;
+ }
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err);
+ goto err_en_sriov;
+ }
+ }
+
+ return num_vfs;
+
+err_en_sriov:
+ enetc_msg_psi_free(pf);
+err_msg_psi:
+ kfree(pf->vf_state);
+ pf->num_vfs = 0;
+
+ return err;
+}
+#else
+#define enetc_sriov_configure(pdev, num_vfs) (void)0
+#endif
+
+static int enetc_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ enetc_disable_si_vlan_promisc(pf, 0);
+ else
+ enetc_enable_si_vlan_promisc(pf, 0);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc_pf_set_rx_mode,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_set_vf_mac = enetc_pf_set_vf_mac,
+ .ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
+ .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
+ .ndo_set_features = enetc_pf_set_features,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_setup_tc = enetc_pf_setup_tc,
+ .ndo_bpf = enetc_setup_bpf,
+ .ndo_xdp_xmit = enetc_xdp_xmit,
+};
+
+static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+ /* pick up primary MAC address from SI */
+ enetc_load_primary_mac_addr(&si->hw, ndev);
+}
+
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
+static int enetc_imdio_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
+ struct mii_bus *bus;
+ int err;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC internal MDIO Bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
+ goto free_mdio_bus;
+ }
+
+ mdio_device = mdio_device_create(bus, 0);
+ if (IS_ERR(mdio_device)) {
+ err = PTR_ERR(mdio_device);
+ dev_err(dev, "cannot create mdio device (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
+ err = -ENOMEM;
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pf->imdio = bus;
+ pf->pcs = phylink_pcs;
+
+ return 0;
+
+unregister_mdiobus:
+ mdiobus_unregister(bus);
+free_mdio_bus:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void enetc_imdio_remove(struct enetc_pf *pf)
+{
+ struct mdio_device *mdio_device;
+
+ if (pf->pcs) {
+ mdio_device = lynx_get_mdio_device(pf->pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(pf->pcs);
+ }
+ if (pf->imdio) {
+ mdiobus_unregister(pf->imdio);
+ mdiobus_free(pf->imdio);
+ }
+}
+
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
+{
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void enetc_mdiobus_destroy(struct enetc_pf *pf)
+{
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
+
+static struct phylink_pcs *
+enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
+static void enetc_pl_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc_mac_config(&pf->si->hw, state->interface);
+}
+
+static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
+{
+ u32 old_val, val;
+
+ old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+
+ if (speed == SPEED_1000) {
+ val &= ~ENETC_PM0_IFM_SSP_MASK;
+ val |= ENETC_PM0_IFM_SSP_1000;
+ } else if (speed == SPEED_100) {
+ val &= ~ENETC_PM0_IFM_SSP_MASK;
+ val |= ENETC_PM0_IFM_SSP_100;
+ } else if (speed == SPEED_10) {
+ val &= ~ENETC_PM0_IFM_SSP_MASK;
+ val |= ENETC_PM0_IFM_SSP_10;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ val |= ENETC_PM0_IFM_FULL_DPX;
+ else
+ val &= ~ENETC_PM0_IFM_FULL_DPX;
+
+ if (val == old_val)
+ return;
+
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+}
+
+static void enetc_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_ndev_priv *priv;
+ u32 rbmr, cmd_cfg;
+ int idx;
+
+ priv = netdev_priv(pf->si->ndev);
+
+ if (pf->si->hw_features & ENETC_SI_F_QBV)
+ enetc_sched_speed_set(priv, speed);
+
+ if (!phylink_autoneg_inband(mode) &&
+ phy_interface_mode_is_rgmii(interface))
+ enetc_force_rgmii_mac(hw, speed, duplex);
+
+ /* Flow control */
+ for (idx = 0; idx < priv->num_rx_rings; idx++) {
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+
+ if (tx_pause)
+ rbmr |= ENETC_RBMR_CM;
+ else
+ rbmr &= ~ENETC_RBMR_CM;
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
+ enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
+ enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
+
+ cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+
+ if (rx_pause)
+ cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
+ else
+ cmd_cfg |= ENETC_PM0_PAUSE_IGN;
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(hw, true);
+}
+
+static void enetc_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc_mac_enable(&pf->si->hw, false);
+}
+
+static const struct phylink_mac_ops enetc_mac_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = enetc_pl_mac_select_pcs,
+ .mac_config = enetc_pl_mac_config,
+ .mac_link_up = enetc_pl_mac_link_up,
+ .mac_link_down = enetc_pl_mac_link_down,
+};
+
+static int enetc_phylink_create(struct enetc_ndev_priv *priv,
+ struct device_node *node)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct phylink *phylink;
+ int err;
+
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
+ pf->if_mode, &enetc_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ return err;
+ }
+
+ priv->phylink = phylink;
+
+ return 0;
+}
+
+static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
+
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+ struct enetc_cmd_rfse rfse = {0};
+ struct enetc_hw *hw = &si->hw;
+ int num_rfs, i, err = 0;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+ for (i = 0; i < num_rfs; i++) {
+ err = enetc_set_fs_entry(si, &rfse, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ int num_rss, err;
+ int *rss_table;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+ num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+ if (!num_rss)
+ return 0;
+
+ rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ err = enetc_set_rss_table(si, rss_table, num_rss);
+
+ kfree(rss_table);
+
+ return err;
+}
+
+static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct platform_device *ierb_pdev;
+ struct device_node *ierb_node;
+
+ /* Don't register with the IERB if the PF itself is disabled */
+ if (!node || !of_device_is_available(node))
+ return 0;
+
+ ierb_node = of_find_compatible_node(NULL, NULL,
+ "fsl,ls1028a-enetc-ierb");
+ if (!ierb_node || !of_device_is_available(ierb_node))
+ return -ENODEV;
+
+ ierb_pdev = of_find_device_by_node(ierb_node);
+ of_node_put(ierb_node);
+
+ if (!ierb_pdev)
+ return -EPROBE_DEFER;
+
+ return enetc_ierb_register_pf(ierb_pdev, pdev);
+}
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pf_register_with_ierb(pdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err)
+ dev_warn(&pdev->dev,
+ "Could not register with IERB driver: %pe, please update the device tree\n",
+ ERR_PTR(err));
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
+
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global) {
+ err = -ENODEV;
+ dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
+ goto err_map_pf_space;
+ }
+
+ err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
+ &si->cbd_ring);
+ if (err)
+ goto err_setup_cbdr;
+
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
+ if (node && !of_device_is_available(node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ err = -ENODEV;
+ goto err_device_disabled;
+ }
+
+ pf = enetc_si_priv(si);
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ err = enetc_setup_mac_addresses(node, pf);
+ if (err)
+ goto err_setup_mac_addresses;
+
+ enetc_configure_port(pf);
+
+ enetc_get_si_caps(si);
+
+ ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+ if (!ndev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "netdev creation failed\n");
+ goto err_alloc_netdev;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+ priv = netdev_priv(ndev);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_alloc_si_resources(priv);
+ if (err) {
+ dev_err(&pdev->dev, "SI resource alloc failed\n");
+ goto err_alloc_si_res;
+ }
+
+ err = enetc_configure_si(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to configure SI\n");
+ goto err_config_si;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MSIX alloc failed\n");
+ goto err_alloc_msix;
+ }
+
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to read PHY mode\n");
+ goto err_phy_mode;
+ }
+
+ err = enetc_mdiobus_create(pf, node);
+ if (err)
+ goto err_mdiobus_create;
+
+ err = enetc_phylink_create(priv, node);
+ if (err)
+ goto err_phylink_create;
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_reg_netdev;
+
+ return 0;
+
+err_reg_netdev:
+ enetc_phylink_destroy(priv);
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+err_mdiobus_create:
+err_phy_mode:
+ enetc_free_msix(priv);
+err_config_si:
+err_alloc_msix:
+ enetc_free_si_resources(priv);
+err_alloc_si_res:
+ si->ndev = NULL;
+ free_netdev(ndev);
+err_alloc_netdev:
+err_init_port_rss:
+err_init_port_rfs:
+err_device_disabled:
+err_setup_mac_addresses:
+ enetc_teardown_cbdr(&si->cbd_ring);
+err_setup_cbdr:
+err_map_pf_space:
+ enetc_pci_remove(pdev);
+
+ return err;
+}
+
+static void enetc_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
+
+ if (pf->num_vfs)
+ enetc_sriov_configure(pdev, 0);
+
+ unregister_netdev(si->ndev);
+
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+ enetc_teardown_cbdr(&si->cbd_ring);
+
+ free_netdev(si->ndev);
+
+ enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pf_id_table);
+
+static struct pci_driver enetc_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pf_id_table,
+ .probe = enetc_pf_probe,
+ .remove = enetc_pf_remove,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = enetc_sriov_configure,
+#endif
+};
+module_pci_driver(enetc_pf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
new file mode 100644
index 000000000..c26bd66e4
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+#include <linux/phylink.h>
+
+#define ENETC_PF_NUM_RINGS 8
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
+
+#define ENETC_MADDR_HASH_TBL_SZ 64
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
+#define ENETC_VLAN_HT_SIZE 64
+
+enum enetc_vf_flags {
+ ENETC_VF_FLAG_PF_SET_MAC = BIT(0),
+};
+
+struct enetc_vf_state {
+ enum enetc_vf_flags flags;
+};
+
+struct enetc_pf {
+ struct enetc_si *si;
+ int num_vfs; /* number of active VFs, after sriov_init */
+ int total_vfs; /* max number of VFs, set for PF at probe */
+ struct enetc_vf_state *vf_state;
+
+ struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT];
+
+ struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS];
+ struct work_struct msg_task;
+ char msg_int_name[ENETC_INT_NAME_MAX];
+
+ char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */
+ DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ DECLARE_BITMAP(active_vlans, VLAN_N_VID);
+
+ struct mii_bus *mdio; /* saved for cleanup */
+ struct mii_bus *imdio;
+ struct phylink_pcs *pcs;
+
+ phy_interface_t if_mode;
+ struct phylink_config phylink_config;
+};
+
+#define phylink_to_enetc_pf(config) \
+ container_of((config), struct enetc_pf, phylink_config)
+
+int enetc_msg_psi_init(struct enetc_pf *pf);
+void enetc_msg_psi_free(struct enetc_pf *pf);
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
new file mode 100644
index 000000000..5243fc031
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "enetc.h"
+
+int enetc_phc_index = -1;
+EXPORT_SYMBOL_GPL(enetc_phc_index);
+
+static struct ptp_clock_info enetc_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "ENETC PTP clock",
+ .max_adj = 512000,
+ .n_alarm = 0,
+ .n_ext_ts = 2,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = ptp_qoriq_adjfine,
+ .adjtime = ptp_qoriq_adjtime,
+ .gettime64 = ptp_qoriq_gettime,
+ .settime64 = ptp_qoriq_settime,
+ .enable = ptp_qoriq_enable,
+};
+
+static int enetc_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct ptp_qoriq *ptp_qoriq;
+ void __iomem *base;
+ int err, len, n;
+
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+
+ err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+ goto err_pci_mem_reg;
+ }
+
+ pci_set_master(pdev);
+
+ ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq) {
+ err = -ENOMEM;
+ goto err_alloc_ptp;
+ }
+
+ len = pci_resource_len(pdev, ENETC_BAR_REGS);
+
+ base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "ioremap() failed\n");
+ goto err_ioremap;
+ }
+
+ /* Allocate 1 interrupt */
+ n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (n != 1) {
+ err = -EPERM;
+ goto err_irq_vectors;
+ }
+
+ ptp_qoriq->irq = pci_irq_vector(pdev, 0);
+
+ err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq);
+ if (err) {
+ dev_err(&pdev->dev, "request_irq() failed!\n");
+ goto err_irq;
+ }
+
+ ptp_qoriq->dev = &pdev->dev;
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps);
+ if (err)
+ goto err_no_clock;
+
+ enetc_phc_index = ptp_qoriq->phc_index;
+ pci_set_drvdata(pdev, ptp_qoriq);
+
+ return 0;
+
+err_no_clock:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+err_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_vectors:
+ iounmap(base);
+err_ioremap:
+ kfree(ptp_qoriq);
+err_alloc_ptp:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void enetc_ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
+
+ enetc_phc_index = -1;
+ ptp_qoriq_free(ptp_qoriq);
+ pci_free_irq_vectors(pdev);
+ kfree(ptp_qoriq);
+
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table);
+
+static struct pci_driver enetc_ptp_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_ptp_id_table,
+ .probe = enetc_ptp_probe,
+ .remove = enetc_ptp_remove,
+};
+module_pci_driver(enetc_ptp_driver);
+
+MODULE_DESCRIPTION("ENETC PTP clock driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
new file mode 100644
index 000000000..762849959
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -0,0 +1,1625 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "enetc.h"
+
+#include <net/pkt_sched.h>
+#include <linux/math64.h>
+#include <linux/refcount.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gate.h>
+
+static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
+{
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
+}
+
+void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 old_speed = priv->speed;
+ u32 pspeed, tmp;
+
+ if (speed == old_speed)
+ return;
+
+ switch (speed) {
+ case SPEED_1000:
+ pspeed = ENETC_PMR_PSPEED_1000M;
+ break;
+ case SPEED_2500:
+ pspeed = ENETC_PMR_PSPEED_2500M;
+ break;
+ case SPEED_100:
+ pspeed = ENETC_PMR_PSPEED_100M;
+ break;
+ case SPEED_10:
+ default:
+ pspeed = ENETC_PMR_PSPEED_10M;
+ }
+
+ priv->speed = speed;
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
+}
+
+static int enetc_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *admin_conf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct tgs_gcl_conf *gcl_config;
+ struct tgs_gcl_data *gcl_data;
+ dma_addr_t dma;
+ struct gce *gce;
+ u16 data_size;
+ u16 gcl_len;
+ void *tmp;
+ u32 tge;
+ int err;
+ int i;
+
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
+ return -EINVAL;
+ gcl_len = admin_conf->num_entries;
+
+ tge = enetc_rd(hw, ENETC_PTGCR);
+ if (!admin_conf->enable) {
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
+
+ priv->active_offloads &= ~ENETC_F_QBV;
+
+ return 0;
+ }
+
+ if (admin_conf->cycle_time > U32_MAX ||
+ admin_conf->cycle_time_extension > U32_MAX)
+ return -EINVAL;
+
+ /* Configure the (administrative) gate control list using the
+ * control BD descriptor.
+ */
+ gcl_config = &cbd.gcl_conf;
+
+ data_size = struct_size(gcl_data, entry, gcl_len);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&gcl_data);
+ if (!tmp)
+ return -ENOMEM;
+
+ gce = (struct gce *)(gcl_data + 1);
+
+ /* Set all gates open as default */
+ gcl_config->atc = 0xff;
+ gcl_config->acl_len = cpu_to_le16(gcl_len);
+
+ gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
+ gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
+ gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
+ gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
+
+ for (i = 0; i < gcl_len; i++) {
+ struct tc_taprio_sched_entry *temp_entry;
+ struct gce *temp_gce = gce + i;
+
+ temp_entry = &admin_conf->entries[i];
+
+ temp_gce->gate = (u8)temp_entry->gate_mask;
+ temp_gce->period = cpu_to_le32(temp_entry->interval);
+ }
+
+ cbd.status_flags = 0;
+
+ cbd.cls = BDCR_CMD_PORT_GCL;
+ cbd.status_flags = 0;
+
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
+ if (err)
+ return err;
+
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
+}
+
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_bdr *tx_ring;
+ int err;
+ int i;
+
+ /* TSD and Qbv are mutually exclusive in hardware */
+ for (i = 0; i < priv->num_tx_rings; i++)
+ if (priv->tx_ring[i]->tsd_enable)
+ return -EBUSY;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = taprio->enable ? i : 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ err = enetc_setup_taprio(ndev, taprio);
+ if (err) {
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = taprio->enable ? 0 : i;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+ }
+
+ return err;
+}
+
+static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
+{
+ return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
+}
+
+static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
+{
+ return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
+}
+
+int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_cbs_qopt_offload *cbs = type_data;
+ u32 port_transmit_rate = priv->speed;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 hi_credit_bit, hi_credit_reg;
+ u32 max_interference_size;
+ u32 port_frame_max_size;
+ u8 tc = cbs->queue;
+ u8 prio_top, prio_next;
+ int bw_sum = 0;
+ u8 bw;
+
+ prio_top = tc_nums - 1;
+ prio_next = tc_nums - 2;
+
+ /* Support highest prio and second prio tc in cbs mode */
+ if (tc != prio_top && tc != prio_next)
+ return -EOPNOTSUPP;
+
+ if (!cbs->enable) {
+ /* Make sure the other TC that are numerically
+ * lower than this TC have been disabled.
+ */
+ if (tc == prio_top &&
+ enetc_get_cbs_enable(hw, prio_next)) {
+ dev_err(&ndev->dev,
+ "Disable TC%d before disable TC%d\n",
+ prio_next, tc);
+ return -EINVAL;
+ }
+
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
+
+ return 0;
+ }
+
+ if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
+ cbs->idleslope < 0 || cbs->sendslope > 0)
+ return -EOPNOTSUPP;
+
+ port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ bw = cbs->idleslope / (port_transmit_rate * 10UL);
+
+ /* Make sure the other TC that are numerically
+ * higher than this TC have been enabled.
+ */
+ if (tc == prio_next) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
+ dev_err(&ndev->dev,
+ "Enable TC%d first before enable TC%d\n",
+ prio_top, prio_next);
+ return -EINVAL;
+ }
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
+ }
+
+ if (bw_sum + bw >= 100) {
+ dev_err(&ndev->dev,
+ "The sum of all CBS Bandwidth can't exceed 100\n");
+ return -EINVAL;
+ }
+
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
+
+ /* For top prio TC, the max_interfrence_size is maxSizedFrame.
+ *
+ * For next prio TC, the max_interfrence_size is calculated as below:
+ *
+ * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
+ *
+ * - RA: idleSlope for AVB Class A
+ * - R0: port transmit rate
+ * - M0: maximum sized frame for the port
+ * - MA: maximum sized frame for AVB Class A
+ */
+
+ if (tc == prio_top) {
+ max_interference_size = port_frame_max_size * 8;
+ } else {
+ u32 m0, ma, r0, ra;
+
+ m0 = port_frame_max_size * 8;
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
+ port_transmit_rate * 10000ULL;
+ r0 = port_transmit_rate * 1000000ULL;
+ max_interference_size = m0 + ma +
+ (u32)div_u64((u64)ra * m0, r0 - ra);
+ }
+
+ /* hiCredit bits calculate by:
+ *
+ * maxSizedFrame * (idleSlope/portTxRate)
+ */
+ hi_credit_bit = max_interference_size * bw / 100;
+
+ /* hiCredit bits to hiCredit register need to calculated as:
+ *
+ * (enetClockFrequency / portTransmitRate) * 100
+ */
+ hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ port_transmit_rate * 1000000ULL);
+
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+
+ /* Set bw register and enable this traffic class */
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+
+ return 0;
+}
+
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_etf_qopt_offload *qopt = type_data;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int tc;
+
+ if (!tc_nums)
+ return -EOPNOTSUPP;
+
+ tc = qopt->queue;
+
+ if (tc < 0 || tc >= priv->num_tx_rings)
+ return -EINVAL;
+
+ /* TSD and Qbv are mutually exclusive in hardware */
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
+ return -EBUSY;
+
+ priv->tx_ring[tc]->tsd_enable = qopt->enable;
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
+
+ return 0;
+}
+
+enum streamid_type {
+ STREAMID_TYPE_RESERVED = 0,
+ STREAMID_TYPE_NULL,
+ STREAMID_TYPE_SMAC,
+};
+
+enum streamid_vlan_tagged {
+ STREAMID_VLAN_RESERVED = 0,
+ STREAMID_VLAN_TAGGED,
+ STREAMID_VLAN_UNTAGGED,
+ STREAMID_VLAN_ALL,
+};
+
+#define ENETC_PSFP_WILDCARD -1
+#define HANDLE_OFFSET 100
+
+enum forward_type {
+ FILTER_ACTION_TYPE_PSFP = BIT(0),
+ FILTER_ACTION_TYPE_ACL = BIT(1),
+ FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
+};
+
+/* This is for limit output type for input actions */
+struct actions_fwd {
+ u64 actions;
+ u64 keys; /* include the must needed keys */
+ enum forward_type output;
+};
+
+struct psfp_streamfilter_counters {
+ u64 matching_frames_count;
+ u64 passing_frames_count;
+ u64 not_passing_frames_count;
+ u64 passing_sdu_count;
+ u64 not_passing_sdu_count;
+ u64 red_frames_count;
+};
+
+struct enetc_streamid {
+ u32 index;
+ union {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ };
+ u8 filtertype;
+ u16 vid;
+ u8 tagged;
+ s32 handle;
+};
+
+struct enetc_psfp_filter {
+ u32 index;
+ s32 handle;
+ s8 prio;
+ u32 maxsdu;
+ u32 gate_id;
+ s32 meter_id;
+ refcount_t refcount;
+ struct hlist_node node;
+};
+
+struct enetc_psfp_gate {
+ u32 index;
+ s8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimext;
+ u32 num_entries;
+ refcount_t refcount;
+ struct hlist_node node;
+ struct action_gate_entry entries[];
+};
+
+/* Only enable the green color frame now
+ * Will add eir and ebs color blind, couple flag etc when
+ * policing action add more offloading parameters
+ */
+struct enetc_psfp_meter {
+ u32 index;
+ u32 cir;
+ u32 cbs;
+ refcount_t refcount;
+ struct hlist_node node;
+};
+
+#define ENETC_PSFP_FLAGS_FMI BIT(0)
+
+struct enetc_stream_filter {
+ struct enetc_streamid sid;
+ u32 sfi_index;
+ u32 sgi_index;
+ u32 flags;
+ u32 fmi_index;
+ struct flow_stats stats;
+ struct hlist_node node;
+};
+
+struct enetc_psfp {
+ unsigned long dev_bitmap;
+ unsigned long *psfp_sfi_bitmap;
+ struct hlist_head stream_list;
+ struct hlist_head psfp_filter_list;
+ struct hlist_head psfp_gate_list;
+ struct hlist_head psfp_meter_list;
+ spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
+};
+
+static struct actions_fwd enetc_act_fwd[] = {
+ {
+ BIT(FLOW_ACTION_GATE),
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ FILTER_ACTION_TYPE_PSFP
+ },
+ {
+ BIT(FLOW_ACTION_POLICE) |
+ BIT(FLOW_ACTION_GATE),
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ FILTER_ACTION_TYPE_PSFP
+ },
+ /* example for ACL actions */
+ {
+ BIT(FLOW_ACTION_DROP),
+ 0,
+ FILTER_ACTION_TYPE_ACL
+ }
+};
+
+static struct enetc_psfp epsfp = {
+ .dev_bitmap = 0,
+ .psfp_sfi_bitmap = NULL,
+};
+
+static LIST_HEAD(enetc_block_cb_list);
+
+/* Stream Identity Entry Set Descriptor */
+static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct streamid_data *si_data;
+ struct streamid_conf *si_conf;
+ dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
+ int port;
+ int err;
+
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ if (sid->index >= priv->psfp_cap.max_streamid)
+ return -EINVAL;
+
+ if (sid->filtertype != STREAMID_TYPE_NULL &&
+ sid->filtertype != STREAMID_TYPE_SMAC)
+ return -EOPNOTSUPP;
+
+ /* Disable operation before enable */
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct streamid_data);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&si_data);
+ if (!tmp)
+ return -ENOMEM;
+
+ eth_broadcast_addr(si_data->dmac);
+ si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
+
+ si_conf = &cbd.sid_set;
+ /* Only one port supported for one entry, set itself */
+ si_conf->iports = cpu_to_le32(1 << port);
+ si_conf->id_type = 1;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ goto out;
+
+ if (!enable)
+ goto out;
+
+ /* Enable the entry overwrite again incase space flushed by hardware */
+ cbd.status_flags = 0;
+
+ si_conf->en = 0x80;
+ si_conf->stream_handle = cpu_to_le32(sid->handle);
+ si_conf->iports = cpu_to_le32(1 << port);
+ si_conf->id_type = sid->filtertype;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ memset(si_data, 0, data_size);
+
+ /* VIDM default to be 1.
+ * VID Match. If set (b1) then the VID must match, otherwise
+ * any VID is considered a match. VIDM setting is only used
+ * when TG is set to b01.
+ */
+ if (si_conf->id_type == STREAMID_TYPE_NULL) {
+ ether_addr_copy(si_data->dmac, sid->dst_mac);
+ si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM);
+ } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
+ ether_addr_copy(si_data->smac, sid->src_mac);
+ si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM);
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+out:
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
+ return err;
+}
+
+/* Stream Filter Instance Set Descriptor */
+static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_filter *sfi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct sfi_conf *sfi_config;
+ int port;
+
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ cbd.index = cpu_to_le16(sfi->index);
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0x80;
+ cbd.length = cpu_to_le16(1);
+
+ sfi_config = &cbd.sfi_conf;
+ if (!enable)
+ goto exit;
+
+ sfi_config->en = 0x80;
+
+ if (sfi->handle >= 0) {
+ sfi_config->stream_handle =
+ cpu_to_le32(sfi->handle);
+ sfi_config->sthm |= 0x80;
+ }
+
+ sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
+ sfi_config->input_ports = cpu_to_le32(1 << port);
+
+ /* The priority value which may be matched against the
+ * frame’s priority value to determine a match for this entry.
+ */
+ if (sfi->prio >= 0)
+ sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
+
+ /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
+ * field as being either an MSDU value or an index into the Flow
+ * Meter Instance table.
+ */
+ if (sfi->maxsdu) {
+ sfi_config->msdu =
+ cpu_to_le16(sfi->maxsdu);
+ sfi_config->multi |= 0x40;
+ }
+
+ if (sfi->meter_id >= 0) {
+ sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
+ sfi_config->multi |= 0x80;
+ }
+
+exit:
+ return enetc_send_cmd(priv->si, &cbd);
+}
+
+static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
+ u32 index,
+ struct psfp_streamfilter_counters *cnt)
+{
+ struct enetc_cbd cbd = { .cmd = 2 };
+ struct sfi_counter_data *data_buf;
+ dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
+ int err;
+
+ cbd.index = cpu_to_le16((u16)index);
+ cbd.cmd = 2;
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct sfi_counter_data);
+
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&data_buf);
+ if (!tmp)
+ return -ENOMEM;
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ goto exit;
+
+ cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
+ data_buf->matchl;
+
+ cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
+ data_buf->msdu_dropl;
+
+ cnt->passing_sdu_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count;
+
+ cnt->not_passing_frames_count =
+ ((u64)data_buf->stream_gate_droph << 32) +
+ data_buf->stream_gate_dropl;
+
+ cnt->passing_frames_count = cnt->matching_frames_count -
+ cnt->not_passing_sdu_count -
+ cnt->not_passing_frames_count;
+
+ cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) +
+ data_buf->flow_meter_dropl;
+
+exit:
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
+ return err;
+}
+
+static u64 get_ptp_now(struct enetc_hw *hw)
+{
+ u64 now_lo, now_hi, now;
+
+ now_lo = enetc_rd(hw, ENETC_SICTR0);
+ now_hi = enetc_rd(hw, ENETC_SICTR1);
+ now = now_lo | now_hi << 32;
+
+ return now;
+}
+
+static int get_start_ns(u64 now, u64 cycle, u64 *start)
+{
+ u64 n;
+
+ if (!cycle)
+ return -EFAULT;
+
+ n = div64_u64(now, cycle);
+
+ *start = (n + 1) * cycle;
+
+ return 0;
+}
+
+/* Stream Gate Instance Set Descriptor */
+static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_gate *sgi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = { .cmd = 0 };
+ struct sgi_table *sgi_config;
+ struct sgcl_conf *sgcl_config;
+ struct sgcl_data *sgcl_data;
+ struct sgce *sgce;
+ dma_addr_t dma;
+ u16 data_size;
+ int err, i;
+ void *tmp;
+ u64 now;
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0x80;
+
+ /* disable */
+ if (!enable)
+ return enetc_send_cmd(priv->si, &cbd);
+
+ if (!sgi->num_entries)
+ return 0;
+
+ if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
+ !sgi->cycletime)
+ return -EINVAL;
+
+ /* enable */
+ sgi_config = &cbd.sgi_table;
+
+ /* Keep open before gate list start */
+ sgi_config->ocgtst = 0x80;
+
+ sgi_config->oipv = (sgi->init_ipv < 0) ?
+ 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
+
+ sgi_config->en = 0x80;
+
+ /* Basic config */
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 1;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0;
+
+ sgcl_config = &cbd.sgcl_conf;
+
+ sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
+
+ data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&sgcl_data);
+ if (!tmp)
+ return -ENOMEM;
+
+ sgce = &sgcl_data->sgcl[0];
+
+ sgcl_config->agtst = 0x80;
+
+ sgcl_data->ct = sgi->cycletime;
+ sgcl_data->cte = sgi->cycletimext;
+
+ if (sgi->init_ipv >= 0)
+ sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
+
+ for (i = 0; i < sgi->num_entries; i++) {
+ struct action_gate_entry *from = &sgi->entries[i];
+ struct sgce *to = &sgce[i];
+
+ if (from->gate_state)
+ to->multi |= 0x10;
+
+ if (from->ipv >= 0)
+ to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
+
+ if (from->maxoctets >= 0) {
+ to->multi |= 0x01;
+ to->msdu[0] = from->maxoctets & 0xFF;
+ to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
+ to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
+ }
+
+ to->interval = from->interval;
+ }
+
+ /* If basetime is less than now, calculate start time */
+ now = get_ptp_now(&priv->si->hw);
+
+ if (sgi->basetime < now) {
+ u64 start;
+
+ err = get_start_ns(now, sgi->cycletime, &start);
+ if (err)
+ goto exit;
+ sgcl_data->btl = lower_32_bits(start);
+ sgcl_data->bth = upper_32_bits(start);
+ } else {
+ u32 hi, lo;
+
+ hi = upper_32_bits(sgi->basetime);
+ lo = lower_32_bits(sgi->basetime);
+ sgcl_data->bth = hi;
+ sgcl_data->btl = lo;
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+
+exit:
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+ return err;
+}
+
+static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_meter *fmi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = { .cmd = 0 };
+ struct fmi_conf *fmi_config;
+ u64 temp = 0;
+
+ cbd.index = cpu_to_le16((u16)fmi->index);
+ cbd.cls = BDCR_CMD_FLOW_METER;
+ cbd.status_flags = 0x80;
+
+ if (!enable)
+ return enetc_send_cmd(priv->si, &cbd);
+
+ fmi_config = &cbd.fmi_conf;
+ fmi_config->en = 0x80;
+
+ if (fmi->cir) {
+ temp = (u64)8000 * fmi->cir;
+ temp = div_u64(temp, 3725);
+ }
+
+ fmi_config->cir = cpu_to_le32((u32)temp);
+ fmi_config->cbs = cpu_to_le32(fmi->cbs);
+
+ /* Default for eir ebs disable */
+ fmi_config->eir = 0;
+ fmi_config->ebs = 0;
+
+ /* Default:
+ * mark red disable
+ * drop on yellow disable
+ * color mode disable
+ * couple flag disable
+ */
+ fmi_config->conf = 0;
+
+ return enetc_send_cmd(priv->si, &cbd);
+}
+
+static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
+{
+ struct enetc_stream_filter *f;
+
+ hlist_for_each_entry(f, &epsfp.stream_list, node)
+ if (f->sid.index == index)
+ return f;
+
+ return NULL;
+}
+
+static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
+{
+ struct enetc_psfp_gate *g;
+
+ hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
+ if (g->index == index)
+ return g;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->index == index)
+ return s;
+
+ return NULL;
+}
+
+static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
+{
+ struct enetc_psfp_meter *m;
+
+ hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
+ if (m->index == index)
+ return m;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter
+ *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->gate_id == sfi->gate_id &&
+ s->prio == sfi->prio &&
+ s->maxsdu == sfi->maxsdu &&
+ s->meter_id == sfi->meter_id)
+ return s;
+
+ return NULL;
+}
+
+static int enetc_get_free_index(struct enetc_ndev_priv *priv)
+{
+ u32 max_size = priv->psfp_cap.max_psfp_filter;
+ unsigned long index;
+
+ index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
+ if (index == max_size)
+ return -1;
+
+ return index;
+}
+
+static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_filter *sfi;
+ u8 z;
+
+ sfi = enetc_get_filter_by_index(index);
+ WARN_ON(!sfi);
+ z = refcount_dec_and_test(&sfi->refcount);
+
+ if (z) {
+ enetc_streamfilter_hw_set(priv, sfi, false);
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ clear_bit(index, epsfp.psfp_sfi_bitmap);
+ }
+}
+
+static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_gate *sgi;
+ u8 z;
+
+ sgi = enetc_get_gate_by_index(index);
+ WARN_ON(!sgi);
+ z = refcount_dec_and_test(&sgi->refcount);
+ if (z) {
+ enetc_streamgate_hw_set(priv, sgi, false);
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_meter *fmi;
+ u8 z;
+
+ fmi = enetc_get_meter_by_index(index);
+ WARN_ON(!fmi);
+ z = refcount_dec_and_test(&fmi->refcount);
+ if (z) {
+ enetc_flowmeter_hw_set(priv, fmi, false);
+ hlist_del(&fmi->node);
+ kfree(fmi);
+ }
+}
+
+static void remove_one_chain(struct enetc_ndev_priv *priv,
+ struct enetc_stream_filter *filter)
+{
+ if (filter->flags & ENETC_PSFP_FLAGS_FMI)
+ flow_meter_unref(priv, filter->fmi_index);
+
+ stream_gate_unref(priv, filter->sgi_index);
+ stream_filter_unref(priv, filter->sfi_index);
+
+ hlist_del(&filter->node);
+ kfree(filter);
+}
+
+static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ struct enetc_psfp_filter *sfi,
+ struct enetc_psfp_gate *sgi,
+ struct enetc_psfp_meter *fmi)
+{
+ int err;
+
+ err = enetc_streamid_hw_set(priv, sid, true);
+ if (err)
+ return err;
+
+ if (sfi) {
+ err = enetc_streamfilter_hw_set(priv, sfi, true);
+ if (err)
+ goto revert_sid;
+ }
+
+ err = enetc_streamgate_hw_set(priv, sgi, true);
+ if (err)
+ goto revert_sfi;
+
+ if (fmi) {
+ err = enetc_flowmeter_hw_set(priv, fmi, true);
+ if (err)
+ goto revert_sgi;
+ }
+
+ return 0;
+
+revert_sgi:
+ enetc_streamgate_hw_set(priv, sgi, false);
+revert_sfi:
+ if (sfi)
+ enetc_streamfilter_hw_set(priv, sfi, false);
+revert_sid:
+ enetc_streamid_hw_set(priv, sid, false);
+ return err;
+}
+
+static struct actions_fwd *enetc_check_flow_actions(u64 acts,
+ unsigned int inputkeys)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
+ if (acts == enetc_act_fwd[i].actions &&
+ inputkeys & enetc_act_fwd[i].keys)
+ return &enetc_act_fwd[i];
+
+ return NULL;
+}
+
+static int enetc_psfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct flow_action_entry *entryg = NULL, *entryp = NULL;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct enetc_stream_filter *filter, *old_filter;
+ struct enetc_psfp_meter *fmi = NULL, *old_fmi;
+ struct enetc_psfp_filter *sfi, *old_sfi;
+ struct enetc_psfp_gate *sgi, *old_sgi;
+ struct flow_action_entry *entry;
+ struct action_gate_entry *e;
+ u8 sfi_overwrite = 0;
+ int entries_size;
+ int i, err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ flow_action_for_each(i, entry, &rule->action)
+ if (entry->id == FLOW_ACTION_GATE)
+ entryg = entry;
+ else if (entry->id == FLOW_ACTION_POLICE)
+ entryp = entry;
+
+ /* Not support without gate action */
+ if (!entryg)
+ return -EINVAL;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->sid.index = f->common.chain_index;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->dst) &&
+ !is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot match on both source and destination MAC");
+ err = -EINVAL;
+ goto free_filter;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on destination MAC not supported");
+ err = -EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.dst_mac, match.key->dst);
+ filter->sid.filtertype = STREAMID_TYPE_NULL;
+ }
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (!is_broadcast_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on source MAC not supported");
+ err = -EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.src_mac, match.key->src);
+ filter->sid.filtertype = STREAMID_TYPE_SMAC;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
+ err = -EINVAL;
+ goto free_filter;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority) {
+ if (match.mask->vlan_priority !=
+ (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
+ err = -EINVAL;
+ goto free_filter;
+ }
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
+ err = -EINVAL;
+ goto free_filter;
+ }
+
+ filter->sid.vid = match.key->vlan_id;
+ if (!filter->sid.vid)
+ filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
+ else
+ filter->sid.tagged = STREAMID_VLAN_TAGGED;
+ }
+ } else {
+ filter->sid.tagged = STREAMID_VLAN_ALL;
+ }
+
+ /* parsing gate action */
+ if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
+ sgi = kzalloc(entries_size, GFP_KERNEL);
+ if (!sgi) {
+ err = -ENOMEM;
+ goto free_filter;
+ }
+
+ refcount_set(&sgi->refcount, 1);
+ sgi->index = entryg->hw_index;
+ sgi->init_ipv = entryg->gate.prio;
+ sgi->basetime = entryg->gate.basetime;
+ sgi->cycletime = entryg->gate.cycletime;
+ sgi->num_entries = entryg->gate.num_entries;
+
+ e = sgi->entries;
+ for (i = 0; i < entryg->gate.num_entries; i++) {
+ e[i].gate_state = entryg->gate.entries[i].gate_state;
+ e[i].interval = entryg->gate.entries[i].interval;
+ e[i].ipv = entryg->gate.entries[i].ipv;
+ e[i].maxoctets = entryg->gate.entries[i].maxoctets;
+ }
+
+ filter->sgi_index = sgi->index;
+
+ sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
+ if (!sfi) {
+ err = -ENOMEM;
+ goto free_gate;
+ }
+
+ refcount_set(&sfi->refcount, 1);
+ sfi->gate_id = sgi->index;
+ sfi->meter_id = ENETC_PSFP_WILDCARD;
+
+ /* Flow meter and max frame size */
+ if (entryp) {
+ err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
+ if (err)
+ goto free_sfi;
+
+ if (entryp->police.burst) {
+ fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
+ if (!fmi) {
+ err = -ENOMEM;
+ goto free_sfi;
+ }
+ refcount_set(&fmi->refcount, 1);
+ fmi->cir = entryp->police.rate_bytes_ps;
+ fmi->cbs = entryp->police.burst;
+ fmi->index = entryp->hw_index;
+ filter->flags |= ENETC_PSFP_FLAGS_FMI;
+ filter->fmi_index = fmi->index;
+ sfi->meter_id = fmi->index;
+ }
+
+ if (entryp->police.mtu)
+ sfi->maxsdu = entryp->police.mtu;
+ }
+
+ /* prio ref the filter prio */
+ if (f->common.prio && f->common.prio <= BIT(3))
+ sfi->prio = f->common.prio - 1;
+ else
+ sfi->prio = ENETC_PSFP_WILDCARD;
+
+ old_sfi = enetc_psfp_check_sfi(sfi);
+ if (!old_sfi) {
+ int index;
+
+ index = enetc_get_free_index(priv);
+ if (index < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ err = -ENOSPC;
+ goto free_fmi;
+ }
+
+ sfi->index = index;
+ sfi->handle = index + HANDLE_OFFSET;
+ /* Update the stream filter handle also */
+ filter->sid.handle = sfi->handle;
+ filter->sfi_index = sfi->index;
+ sfi_overwrite = 0;
+ } else {
+ filter->sfi_index = old_sfi->index;
+ filter->sid.handle = old_sfi->handle;
+ sfi_overwrite = 1;
+ }
+
+ err = enetc_psfp_hw_set(priv, &filter->sid,
+ sfi_overwrite ? NULL : sfi, sgi, fmi);
+ if (err)
+ goto free_fmi;
+
+ spin_lock(&epsfp.psfp_lock);
+ if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
+ old_fmi = enetc_get_meter_by_index(filter->fmi_index);
+ if (old_fmi) {
+ fmi->refcount = old_fmi->refcount;
+ refcount_set(&fmi->refcount,
+ refcount_read(&old_fmi->refcount) + 1);
+ hlist_del(&old_fmi->node);
+ kfree(old_fmi);
+ }
+ hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
+ }
+
+ /* Remove the old node if exist and update with a new node */
+ old_sgi = enetc_get_gate_by_index(filter->sgi_index);
+ if (old_sgi) {
+ refcount_set(&sgi->refcount,
+ refcount_read(&old_sgi->refcount) + 1);
+ hlist_del(&old_sgi->node);
+ kfree(old_sgi);
+ }
+
+ hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
+
+ if (!old_sfi) {
+ hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
+ set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
+ } else {
+ kfree(sfi);
+ refcount_inc(&old_sfi->refcount);
+ }
+
+ old_filter = enetc_get_stream_by_index(filter->sid.index);
+ if (old_filter)
+ remove_one_chain(priv, old_filter);
+
+ filter->stats.lastused = jiffies;
+ hlist_add_head(&filter->node, &epsfp.stream_list);
+
+ spin_unlock(&epsfp.psfp_lock);
+
+ return 0;
+
+free_fmi:
+ kfree(fmi);
+free_sfi:
+ kfree(sfi);
+free_gate:
+ kfree(sgi);
+free_filter:
+ kfree(filter);
+
+ return err;
+}
+
+static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action *action = &rule->action;
+ struct flow_action_entry *entry;
+ struct actions_fwd *fwd;
+ u64 actions = 0;
+ int i, err;
+
+ if (!flow_action_has_entries(action)) {
+ NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, entry, action)
+ actions |= BIT(entry->id);
+
+ fwd = enetc_check_flow_actions(actions, dissector->used_keys);
+ if (!fwd) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
+ return -EOPNOTSUPP;
+ }
+
+ if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
+ err = enetc_psfp_parse_clsflower(priv, cls_flower);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
+ return err;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct enetc_stream_filter *filter;
+ struct netlink_ext_ack *extack = f->common.extack;
+ int err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamid_hw_set(priv, &filter->sid, false);
+ if (err)
+ return err;
+
+ remove_one_chain(priv, filter);
+
+ return 0;
+}
+
+static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ return enetc_psfp_destroy_clsflower(priv, f);
+}
+
+static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct psfp_streamfilter_counters counters = {};
+ struct enetc_stream_filter *filter;
+ struct flow_stats stats = {};
+ int err;
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
+ if (err)
+ return -EINVAL;
+
+ spin_lock(&epsfp.psfp_lock);
+ stats.pkts = counters.matching_frames_count +
+ counters.not_passing_sdu_count -
+ filter->stats.pkts;
+ stats.drops = counters.not_passing_frames_count +
+ counters.not_passing_sdu_count +
+ counters.red_frames_count -
+ filter->stats.drops;
+ stats.lastused = filter->stats.lastused;
+ filter->stats.pkts += stats.pkts;
+ filter->stats.drops += stats.drops;
+ spin_unlock(&epsfp.psfp_lock);
+
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
+ stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
+
+ return 0;
+}
+
+static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return enetc_config_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return enetc_destroy_clsflower(priv, cls_flower);
+ case FLOW_CLS_STATS:
+ return enetc_psfp_get_stats(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline void clean_psfp_sfi_bitmap(void)
+{
+ bitmap_free(epsfp.psfp_sfi_bitmap);
+ epsfp.psfp_sfi_bitmap = NULL;
+}
+
+static void clean_stream_list(void)
+{
+ struct enetc_stream_filter *s;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
+ hlist_del(&s->node);
+ kfree(s);
+ }
+}
+
+static void clean_sfi_list(void)
+{
+ struct enetc_psfp_filter *sfi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ }
+}
+
+static void clean_sgi_list(void)
+{
+ struct enetc_psfp_gate *sgi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void clean_psfp_all(void)
+{
+ /* Disable all list nodes and free all memory */
+ clean_sfi_list();
+ clean_sgi_list();
+ clean_stream_list();
+ epsfp.dev_bitmap = 0;
+ clean_psfp_sfi_bitmap();
+}
+
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct net_device *ndev = cb_priv;
+
+ if (!tc_can_offload(ndev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
+int enetc_psfp_init(struct enetc_ndev_priv *priv)
+{
+ if (epsfp.psfp_sfi_bitmap)
+ return 0;
+
+ epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
+ GFP_KERNEL);
+ if (!epsfp.psfp_sfi_bitmap)
+ return -ENOMEM;
+
+ spin_lock_init(&epsfp.psfp_lock);
+
+ if (list_empty(&enetc_block_cb_list))
+ epsfp.dev_bitmap = 0;
+
+ return 0;
+}
+
+int enetc_psfp_clean(struct enetc_ndev_priv *priv)
+{
+ if (!list_empty(&enetc_block_cb_list))
+ return -EBUSY;
+
+ clean_psfp_all();
+
+ return 0;
+}
+
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct flow_block_offload *f = type_data;
+ int port, err;
+
+ err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
+ enetc_setup_tc_block_cb,
+ ndev, ndev, true);
+ if (err)
+ return err;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ set_bit(port, &epsfp.dev_bitmap);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ clear_bit(port, &epsfp.dev_bitmap);
+ if (!epsfp.dev_bitmap)
+ clean_psfp_all();
+ break;
+ }
+
+ return 0;
+}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
new file mode 100644
index 000000000..dfcaac302
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include "enetc.h"
+
+#define ENETC_DRV_NAME_STR "ENETC VF driver"
+
+/* Messaging */
+static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
+ struct enetc_msg_swbd *msg)
+{
+ u32 val;
+
+ val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma));
+ enetc_wr(hw, ENETC_VSIMSGSNDAR0, val);
+}
+
+static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
+{
+ int timeout = 100;
+ u32 vsimsgsr;
+
+ enetc_msg_vsi_write_msg(&si->hw, msg);
+
+ do {
+ vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR);
+ if (!(vsimsgsr & ENETC_VSIMSGSR_MB))
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* check for message delivery error */
+ if (vsimsgsr & ENETC_VSIMSGSR_MS) {
+ dev_err(&si->pdev->dev, "VSI command execute error: %d\n",
+ ENETC_SIMSGSR_GET_MC(vsimsgsr));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
+ struct sockaddr *saddr)
+{
+ struct enetc_msg_cmd_set_primary_mac *cmd;
+ struct enetc_msg_swbd msg;
+ int err;
+
+ msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64);
+ msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
+ GFP_KERNEL);
+ if (!msg.vaddr) {
+ dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n",
+ msg.size);
+ return -ENOMEM;
+ }
+
+ cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr;
+ cmd->header.type = ENETC_MSG_CMD_MNG_MAC;
+ cmd->header.id = ENETC_MSG_CMD_MNG_ADD;
+ memcpy(&cmd->mac, saddr, sizeof(struct sockaddr));
+
+ /* send the command and wait */
+ err = enetc_msg_vsi_send(priv->si, &msg);
+
+ dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
+
+ return err;
+}
+
+static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+}
+
+static int enetc_vf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Probing/ Init */
+static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_vf_set_mac_addr,
+ .ndo_set_features = enetc_vf_set_features,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_setup_tc = enetc_vf_setup_tc,
+};
+
+static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ /* pick up primary MAC address from SI */
+ enetc_load_primary_mac_addr(&si->hw, ndev);
+}
+
+static int enetc_vf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
+
+ si = pci_get_drvdata(pdev);
+
+ enetc_get_si_caps(si);
+
+ ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+ if (!ndev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "netdev creation failed\n");
+ goto err_alloc_netdev;
+ }
+
+ enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+ priv = netdev_priv(ndev);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
+ &si->cbd_ring);
+ if (err)
+ goto err_setup_cbdr;
+
+ err = enetc_alloc_si_resources(priv);
+ if (err) {
+ dev_err(&pdev->dev, "SI resource alloc failed\n");
+ goto err_alloc_si_res;
+ }
+
+ err = enetc_configure_si(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to configure SI\n");
+ goto err_config_si;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MSIX alloc failed\n");
+ goto err_alloc_msix;
+ }
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_reg_netdev;
+
+ netif_carrier_off(ndev);
+
+ return 0;
+
+err_reg_netdev:
+ enetc_free_msix(priv);
+err_config_si:
+err_alloc_msix:
+ enetc_free_si_resources(priv);
+err_alloc_si_res:
+ enetc_teardown_cbdr(&si->cbd_ring);
+err_setup_cbdr:
+ si->ndev = NULL;
+ free_netdev(ndev);
+err_alloc_netdev:
+ enetc_pci_remove(pdev);
+
+ return err;
+}
+
+static void enetc_vf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
+ unregister_netdev(si->ndev);
+
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+ enetc_teardown_cbdr(&si->cbd_ring);
+
+ free_netdev(si->ndev);
+
+ enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_vf_id_table);
+
+static struct pci_driver enetc_vf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_vf_id_table,
+ .probe = enetc_vf_probe,
+ .remove = enetc_vf_remove,
+};
+module_pci_driver(enetc_vf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
new file mode 100644
index 000000000..33f84a30e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -0,0 +1,675 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/****************************************************************************/
+
+/*
+ * fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC
+ * processors.
+ *
+ * (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000-2001, Lineo (www.lineo.com)
+ */
+
+/****************************************************************************/
+#ifndef FEC_H
+#define FEC_H
+/****************************************************************************/
+
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
+#include <linux/bpf.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+/*
+ * Just figures, Motorola would have to change the offsets for
+ * registers in the same peripheral device on different models
+ * of the ColdFire!
+ */
+#define FEC_IEVENT 0x004 /* Interrupt event reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
+#define FEC_ECNTRL 0x024 /* Ethernet control reg */
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */
+#define FEC_R_CNTRL 0x084 /* Receive control reg */
+#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
+#define FEC_OPD 0x0ec /* Opcode + Pause duration */
+#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
+#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
+#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
+#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
+#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
+#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */
+#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
+#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
+#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
+#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */
+#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
+#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */
+#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */
+#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
+#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
+#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
+#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
+#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/
+#define FEC_RACC 0x1c4 /* Receive Accelerator function */
+#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
+#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
+#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
+#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */
+#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */
+#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */
+#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
+#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
+#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
+#define FEC_LPI_SLEEP 0x1f4 /* Set IEEE802.3az LPI Sleep Ts time */
+#define FEC_LPI_WAKE 0x1f8 /* Set IEEE802.3az LPI Wake Tw time */
+#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
+
+#define BM_MIIGSK_CFGR_MII 0x00
+#define BM_MIIGSK_CFGR_RMII 0x01
+#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
+
+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
+#define RMON_T_COL 0x224 /* RMON TX collision count */
+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
+#define RMON_T_OCTETS 0x244 /* RMON TX octets */
+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
+
+#else
+
+#define FEC_ECNTRL 0x000 /* Ethernet control reg */
+#define FEC_IEVENT 0x004 /* Interrupt even reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_IVEC 0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0
+#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
+#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x090 /* FIFO receive start reg */
+#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */
+#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */
+#define FEC_R_CNTRL 0x104 /* Receive control reg */
+#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */
+#define FEC_X_CNTRL 0x144 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
+#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
+#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */
+#define FEC_R_DES_START_1 FEC_R_DES_START_0
+#define FEC_R_DES_START_2 FEC_R_DES_START_0
+#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
+#define FEC_X_DES_START_1 FEC_X_DES_START_0
+#define FEC_X_DES_START_2 FEC_X_DES_START_0
+#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0
+#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0
+#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
+/* Not existed in real chip
+ * Just for pass build.
+ */
+#define FEC_RCMR_1 0xfff
+#define FEC_RCMR_2 0xfff
+#define FEC_DMA_CFG_1 0xfff
+#define FEC_DMA_CFG_2 0xfff
+#define FEC_TXIC0 0xfff
+#define FEC_TXIC1 0xfff
+#define FEC_TXIC2 0xfff
+#define FEC_RXIC0 0xfff
+#define FEC_RXIC1 0xfff
+#define FEC_RXIC2 0xfff
+#define FEC_LPI_SLEEP 0xfff
+#define FEC_LPI_WAKE 0xfff
+#endif /* CONFIG_M5272 */
+
+
+/*
+ * Define the buffer descriptor structure.
+ *
+ * Evidently, ARM SoCs have the FEC block generated in a
+ * little endian mode so adjust endianness accordingly.
+ */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#define fec32_to_cpu le32_to_cpu
+#define fec16_to_cpu le16_to_cpu
+#define cpu_to_fec32 cpu_to_le32
+#define cpu_to_fec16 cpu_to_le16
+#define __fec32 __le32
+#define __fec16 __le16
+
+struct bufdesc {
+ __fec16 cbd_datlen; /* Data length */
+ __fec16 cbd_sc; /* Control and status info */
+ __fec32 cbd_bufaddr; /* Buffer address */
+};
+#else
+#define fec32_to_cpu be32_to_cpu
+#define fec16_to_cpu be16_to_cpu
+#define cpu_to_fec32 cpu_to_be32
+#define cpu_to_fec16 cpu_to_be16
+#define __fec32 __be32
+#define __fec16 __be16
+
+struct bufdesc {
+ __fec16 cbd_sc; /* Control and status info */
+ __fec16 cbd_datlen; /* Data length */
+ __fec32 cbd_bufaddr; /* Buffer address */
+};
+#endif
+
+struct bufdesc_ex {
+ struct bufdesc desc;
+ __fec32 cbd_esc;
+ __fec32 cbd_prot;
+ __fec32 cbd_bdu;
+ __fec32 ts;
+ __fec16 res0[4];
+};
+
+/*
+ * The following definitions courtesy of commproc.h, which where
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
+ */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
+#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
+#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
+#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
+#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
+#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
+#define BD_SC_BR ((ushort)0x0020) /* Break received */
+#define BD_SC_FR ((ushort)0x0010) /* Framing error */
+#define BD_SC_PR ((ushort)0x0008) /* Parity error */
+#define BD_SC_OV ((ushort)0x0002) /* Overrun */
+#define BD_SC_CD ((ushort)0x0001) /* ?? */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ */
+#define BD_ENET_RX_EMPTY ((ushort)0x8000)
+#define BD_ENET_RX_WRAP ((ushort)0x2000)
+#define BD_ENET_RX_INTR ((ushort)0x1000)
+#define BD_ENET_RX_LAST ((ushort)0x0800)
+#define BD_ENET_RX_FIRST ((ushort)0x0400)
+#define BD_ENET_RX_MISS ((ushort)0x0100)
+#define BD_ENET_RX_LG ((ushort)0x0020)
+#define BD_ENET_RX_NO ((ushort)0x0010)
+#define BD_ENET_RX_SH ((ushort)0x0008)
+#define BD_ENET_RX_CR ((ushort)0x0004)
+#define BD_ENET_RX_OV ((ushort)0x0002)
+#define BD_ENET_RX_CL ((ushort)0x0001)
+#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
+
+/* Enhanced buffer descriptor control/status used by Ethernet receive */
+#define BD_ENET_RX_VLAN 0x00000004
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ */
+#define BD_ENET_TX_READY ((ushort)0x8000)
+#define BD_ENET_TX_PAD ((ushort)0x4000)
+#define BD_ENET_TX_WRAP ((ushort)0x2000)
+#define BD_ENET_TX_INTR ((ushort)0x1000)
+#define BD_ENET_TX_LAST ((ushort)0x0800)
+#define BD_ENET_TX_TC ((ushort)0x0400)
+#define BD_ENET_TX_DEF ((ushort)0x0200)
+#define BD_ENET_TX_HB ((ushort)0x0100)
+#define BD_ENET_TX_LC ((ushort)0x0080)
+#define BD_ENET_TX_RL ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK ((ushort)0x003c)
+#define BD_ENET_TX_UN ((ushort)0x0002)
+#define BD_ENET_TX_CSL ((ushort)0x0001)
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
+
+/* enhanced buffer descriptor control/status used by Ethernet transmit */
+#define BD_ENET_TX_INT 0x40000000
+#define BD_ENET_TX_TS 0x20000000
+#define BD_ENET_TX_PINS 0x10000000
+#define BD_ENET_TX_IINS 0x08000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM 3
+
+/* Maximum number of queues supported
+ * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
+ * User can point the queue number that is less than or equal to 3.
+ */
+#define FEC_ENET_MAX_TX_QS 3
+#define FEC_ENET_MAX_RX_QS 3
+
+#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \
+ (((X) == 2) ? \
+ FEC_R_DES_START_2 : FEC_R_DES_START_0))
+#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \
+ (((X) == 2) ? \
+ FEC_X_DES_START_2 : FEC_X_DES_START_0))
+#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
+ (((X) == 2) ? \
+ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
+
+#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+
+#define DMA_CLASS_EN (1 << 16)
+#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK 0xffff
+#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE(X) (((X) == 1) ? \
+ (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
+ (IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
+#define RCMR_MATCHEN (0x1 << 16)
+#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2))
+#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
+ RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
+#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
+ RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
+#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
+
+/* The number of Tx and Rx buffers. These are allocated from the page
+ * pool. The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter. We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
+
+#define FEC_ENET_RX_PAGES 256
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE 2048
+#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE 512 /* Must be power of two */
+#define TX_RING_MOD_MASK 511 /* for this to work */
+
+#define BD_ENET_RX_INT 0x00800000
+#define BD_ENET_RX_PTP ((ushort)0x0400)
+#define BD_ENET_RX_ICE 0x00000020
+#define BD_ENET_RX_PCR 0x00000010
+#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
+#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
+#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */
+#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */
+#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */
+#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
+#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */
+#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */
+#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */
+#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
+#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
+#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
+#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
+ (((X) == 1) ? FEC_ENET_RXF_1 : \
+ FEC_ENET_RXF_2))
+#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
+#define FEC_ENET_TS_TIMER ((uint)0x00008000)
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+
+#define FEC_ENET_TXC_DLY ((uint)0x00010000)
+#define FEC_ENET_RXC_DLY ((uint)0x00020000)
+
+/* ENET interrupt coalescing macro define */
+#define FEC_ITR_CLK_SEL (0x1 << 30)
+#define FEC_ITR_EN (0x1 << 31)
+#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20)
+#define FEC_ITR_ICTT(X) ((X) & 0xffff)
+#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
+#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
+
+#define FEC_VLAN_TAG_LEN 0x04
+#define FEC_ETHTYPE_LEN 0x02
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET (1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM (1 << 5)
+/* Controller has hardware vlan support */
+#define FEC_QUIRK_HAS_VLAN (1 << 6)
+/* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+#define FEC_QUIRK_ERR006358 (1 << 7)
+/* ENET IP hw AVB
+ *
+ * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
+ * - Two class indicators on receive with configurable priority
+ * - Two class indicators and line speed timer on transmit allowing
+ * implementation class credit based shapers externally
+ * - Additional DMA registers provisioned to allow managing up to 3
+ * independent rings
+ */
+#define FEC_QUIRK_HAS_AVB (1 << 8)
+/* There is a TDAR race condition for mutliQ when the software sets TDAR
+ * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+ * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+ * The issue exist at i.MX6SX enet IP.
+ */
+#define FEC_QUIRK_ERR007885 (1 << 9)
+/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue:
+ * After set ENET_ATCR[Capture], there need some time cycles before the counter
+ * value is capture in the register clock domain.
+ * The wait-time-cycles is at least 6 clock cycles of the slower clock between
+ * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz,
+ * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns
+ * (40ns * 6).
+ */
+#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
+/* Controller supports RACC register */
+#define FEC_QUIRK_HAS_RACC (1 << 12)
+/* Controller supports interrupt coalesc */
+#define FEC_QUIRK_HAS_COALESCE (1 << 13)
+/* Interrupt doesn't wake CPU from deep idle */
+#define FEC_QUIRK_ERR006687 (1 << 14)
+/* The MIB counters should be cleared and enabled during
+ * initialisation.
+ */
+#define FEC_QUIRK_MIB_CLEAR (1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG (1 << 16)
+
+/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
+ * the generation of an MII event. This must be avoided in the older
+ * FEC blocks where it will stop MII events being generated.
+ */
+#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
+/* i.MX6SX ENET IP supports multiple queues (3 queues), use this quirk to
+ * represents this ENET IP.
+ */
+#define FEC_QUIRK_HAS_MULTI_QUEUES (1 << 19)
+
+/* i.MX8MQ ENET IP version add new feature to support IEEE 802.3az EEE
+ * standard. For the transmission, MAC supply two user registers to set
+ * Sleep (TS) and Wake (TW) time.
+ */
+#define FEC_QUIRK_HAS_EEE (1 << 20)
+
+/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+ * as an alternative option to make sure it works well with various PHYs.
+ * For the implementation of delayed clock, ENET takes synchronized 250MHz
+ * clocks to generate 2ns delay.
+ */
+#define FEC_QUIRK_DELAYED_CLKS_SUPPORT (1 << 21)
+
+/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
+#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
+struct bufdesc_prop {
+ int qid;
+ /* Address of Rx and Tx buffers */
+ struct bufdesc *base;
+ struct bufdesc *last;
+ struct bufdesc *cur;
+ void __iomem *reg_desc_active;
+ dma_addr_t dma;
+ unsigned short ring_size;
+ unsigned char dsize;
+ unsigned char dsize_log2;
+};
+
+struct fec_enet_priv_txrx_info {
+ int offset;
+ struct page *page;
+ struct sk_buff *skb;
+};
+
+struct fec_enet_priv_tx_q {
+ struct bufdesc_prop bd;
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ struct bufdesc *dirty_tx;
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+};
+
+struct fec_enet_priv_rx_q {
+ struct bufdesc_prop bd;
+ struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
+};
+
+struct fec_stop_mode_gpr {
+ struct regmap *gpr;
+ u8 reg;
+ u8 bit;
+};
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ void __iomem *hwp;
+
+ struct net_device *netdev;
+
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_ref;
+ struct clk *clk_enet_out;
+ struct clk *clk_ptp;
+ struct clk *clk_2x_txclk;
+
+ bool ptp_clk_on;
+ struct mutex ptp_clk_mutex;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
+
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
+ struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
+
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
+ struct platform_device *pdev;
+
+ int dev_id;
+
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ uint phy_speed;
+ phy_interface_t phy_interface;
+ struct device_node *phy_node;
+ bool rgmii_txc_dly;
+ bool rgmii_rxc_dly;
+ bool rpm_active;
+ int link;
+ int full_duplex;
+ int speed;
+ int irq[FEC_IRQ_NUM];
+ bool bufdesc_ex;
+ int pause_flag;
+ int wol_flag;
+ int wake_irq;
+ u32 quirks;
+
+ struct napi_struct napi;
+ int csum_flags;
+
+ struct work_struct tx_timeout_work;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ unsigned long last_overflow_check;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ int rx_hwtstamp_filter;
+ u32 base_incval;
+ u32 cycle_speed;
+ int hwts_rx_en;
+ int hwts_tx_en;
+ struct delayed_work time_keep;
+ struct regulator *reg_phy;
+ struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
+
+ unsigned int tx_align;
+ unsigned int rx_align;
+
+ /* hw interrupt coalesce */
+ unsigned int rx_pkts_itr;
+ unsigned int rx_time_itr;
+ unsigned int tx_pkts_itr;
+ unsigned int tx_time_itr;
+ unsigned int itr_clk_rate;
+
+ /* tx lpi eee mode */
+ struct ethtool_eee eee;
+ unsigned int clk_ref_rate;
+
+ u32 rx_copybreak;
+
+ /* ptp clock period in ns*/
+ unsigned int ptp_inc;
+
+ /* pps */
+ int pps_channel;
+ unsigned int reload_period;
+ int pps_enable;
+ unsigned int next_counter;
+
+ struct imx_sc_ipc *ipc_handle;
+
+ u64 ethtool_stats[];
+};
+
+void fec_ptp_init(struct platform_device *pdev, int irq_idx);
+void fec_ptp_stop(struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+void fec_ptp_disable_hwts(struct net_device *ndev);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+
+/****************************************************************************/
+#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
new file mode 100644
index 000000000..97d12c7ee
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -0,0 +1,4357 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * Right now, I am very wasteful with the buffers. I allocate memory
+ * pages and then divide them into 2K frame buffers. This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Support for FEC controller of ColdFire processors.
+ * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
+ *
+ * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
+ * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/selftests.h>
+#include <net/tso.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/platform_device.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+#include <linux/if_vlan.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <soc/imx/cpuidle.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+#include <asm/cacheflush.h>
+
+#include "fec.h"
+
+static void set_multicast_list(struct net_device *ndev);
+static void fec_enet_itr_coal_set(struct net_device *ndev);
+
+#define DRIVER_NAME "fec"
+
+static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
+
+/* Pause frame feild and FIFO threshold */
+#define FEC_ENET_FCE (1 << 5)
+#define FEC_ENET_RSEM_V 0x84
+#define FEC_ENET_RSFL_V 16
+#define FEC_ENET_RAEM_V 0x8
+#define FEC_ENET_RAFL_V 0x8
+#define FEC_ENET_OPD_V 0xFFF0
+#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+
+struct fec_devinfo {
+ u32 quirks;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+ .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+};
+
+static const struct fec_devinfo fec_imx8mq_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+};
+
+static const struct fec_devinfo fec_imx8qm_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+};
+
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+};
+
+static struct platform_device_id fec_devtype[] = {
+ {
+ /* keep it for coldfire */
+ .name = DRIVER_NAME,
+ .driver_data = 0,
+ }, {
+ .name = "imx25-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx25_info,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx27_info,
+ }, {
+ .name = "imx28-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx28_info,
+ }, {
+ .name = "imx6q-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx6q_info,
+ }, {
+ .name = "mvf600-fec",
+ .driver_data = (kernel_ulong_t)&fec_mvf600_info,
+ }, {
+ .name = "imx6sx-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx6x_info,
+ }, {
+ .name = "imx6ul-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
+ }, {
+ .name = "imx8mq-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx8mq_info,
+ }, {
+ .name = "imx8qm-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx8qm_info,
+ }, {
+ .name = "s32v234-fec",
+ .driver_data = (kernel_ulong_t)&fec_s32v234_info,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX27_FEC, /* runs on i.mx27/35/51 */
+ IMX28_FEC,
+ IMX6Q_FEC,
+ MVF600_FEC,
+ IMX6SX_FEC,
+ IMX6UL_FEC,
+ IMX8MQ_FEC,
+ IMX8QM_FEC,
+ S32V234_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+ { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+ { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+ { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
+ { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+ { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
+ { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
+ { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
+
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define FEC_FLASHMAC 0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define FEC_FLASHMAC 0xf0006000
+#elif defined(CONFIG_CANCam)
+#define FEC_FLASHMAC 0xf0020000
+#elif defined (CONFIG_M5272C3)
+#define FEC_FLASHMAC (0xffe04000 + 4)
+#elif defined(CONFIG_MOD5272)
+#define FEC_FLASHMAC 0xffc0406b
+#else
+#define FEC_FLASHMAC 0
+#endif
+#endif /* CONFIG_M5272 */
+
+/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
+ *
+ * 2048 byte skbufs are allocated. However, alignment requirements
+ * varies between FEC variants. Worst case is 64, so round down by 64.
+ */
+#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
+#define PKT_MINBUF_SIZE 64
+
+/* FEC receive acceleration */
+#define FEC_RACC_IPDIS (1 << 1)
+#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_SHIFT16 BIT(7)
+#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+
+/* MIB Control Register */
+#define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
+
+/*
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
+#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#else
+#define OPT_FRAME_SIZE 0
+#endif
+
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_ST_C45 (0)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_READ_C45 (3 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE (0)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
+/* FEC ECR bits definition */
+#define FEC_ECR_MAGICEN (1 << 2)
+#define FEC_ECR_SLEEP (1 << 3)
+
+#define FEC_MII_TIMEOUT 30000 /* us */
+
+/* Transmitter timeout */
+#define TX_TIMEOUT (2 * HZ)
+
+#define FEC_PAUSE_FLAG_AUTONEG 0x1
+#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
+#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
+#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
+
+#define COPYBREAK_DEFAULT 256
+
+/* Max number of allowed TCP segments for software TSO */
+#define FEC_MAX_TSO_SEGS 100
+#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
+
+static int mii_cnt;
+
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+ struct bufdesc_prop *bd)
+{
+ return (bdp >= bd->last) ? bd->base
+ : (struct bufdesc *)(((void *)bdp) + bd->dsize);
+}
+
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+ struct bufdesc_prop *bd)
+{
+ return (bdp <= bd->base) ? bd->last
+ : (struct bufdesc *)(((void *)bdp) - bd->dsize);
+}
+
+static int fec_enet_get_bd_index(struct bufdesc *bdp,
+ struct bufdesc_prop *bd)
+{
+ return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
+}
+
+static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
+{
+ int entries;
+
+ entries = (((const char *)txq->dirty_tx -
+ (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
+
+ return entries >= 0 ? entries : entries + txq->bd.ring_size;
+}
+
+static void swap_buffer(void *bufaddr, int len)
+{
+ int i;
+ unsigned int *buf = bufaddr;
+
+ for (i = 0; i < len; i += 4, buf++)
+ swab32s(buf);
+}
+
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+ int i;
+ unsigned int *src = src_buf;
+ unsigned int *dst = dst_buf;
+
+ for (i = 0; i < len; i += 4, src++, dst++)
+ *dst = swab32p(src);
+}
+
+static void fec_dump(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ int index = 0;
+
+ netdev_info(ndev, "TX ring dump\n");
+ pr_info("Nr SC addr len SKB\n");
+
+ txq = fep->tx_queue[0];
+ bdp = txq->bd.base;
+
+ do {
+ pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+ index,
+ bdp == txq->bd.cur ? 'S' : ' ',
+ bdp == txq->dirty_tx ? 'H' : ' ',
+ fec16_to_cpu(bdp->cbd_sc),
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ txq->tx_skbuff[index]);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ index++;
+ } while (bdp != txq->bd.base);
+}
+
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
+static int
+fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+{
+ /* Only run for packets requiring a checksum. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ if (is_ipv4_pkt(skb))
+ ip_hdr(skb)->check = 0;
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+}
+
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = FEC_ENET_RX_FRSIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
+static struct bufdesc *
+fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc_ex *ebdp;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int frag, frag_len;
+ unsigned short status;
+ unsigned int estatus = 0;
+ skb_frag_t *this_frag;
+ unsigned int index;
+ void *bufaddr;
+ dma_addr_t addr;
+ int i;
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
+
+ /* Handle the last BD specially */
+ if (frag == nr_frags - 1) {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus |= BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
+ if (fep->bufdesc_ex) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ bufaddr = skb_frag_address(this_frag);
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, frag_len);
+ }
+
+ addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ goto dma_mapping_error;
+ }
+
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
+ bdp->cbd_datlen = cpu_to_fec16(frag_len);
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
+ }
+
+ return bdp;
+dma_mapping_error:
+ bdp = txq->bd.cur;
+ for (i = 0; i < frag; i++) {
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct bufdesc *bdp, *last_bdp;
+ void *bufaddr;
+ dma_addr_t addr;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned int estatus = 0;
+ unsigned int index;
+ int entries_free;
+
+ entries_free = fec_enet_get_free_txdesc_num(txq);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for SG!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->bd.cur;
+ last_bdp = bdp;
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ /* Set buffer length and buffer pointer */
+ bufaddr = skb->data;
+ buflen = skb_headlen(skb);
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], skb->data, buflen);
+ bufaddr = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, buflen);
+ }
+
+ /* Push the data cache so the CPM does not get stale memory data. */
+ addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
+
+ if (nr_frags) {
+ last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+ if (IS_ERR(last_bdp)) {
+ dma_unmap_single(&fep->pdev->dev, addr,
+ buflen, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ } else {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus = BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
+ bdp->cbd_datlen = cpu_to_fec16(buflen);
+
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ fep->hwts_tx_en))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ index = fec_enet_get_bd_index(last_bdp, &txq->bd);
+ /* Save skb pointer */
+ txq->tx_skbuff[index] = skb;
+
+ /* Make sure the updates to rest of the descriptor are performed before
+ * transferring ownership.
+ */
+ wmb();
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
+
+ skb_tx_timestamp(skb);
+
+ /* Make sure the update to bdp and tx_skbuff are performed before
+ * txq->bd.cur.
+ */
+ wmb();
+ txq->bd.cur = bdp;
+
+ /* Trigger transmission start */
+ writel(0, txq->bd.reg_desc_active);
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+ struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
+ unsigned short status;
+ unsigned int estatus = 0;
+ dma_addr_t addr;
+
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ if (((unsigned long) data) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], data, size);
+ data = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, size);
+ }
+
+ addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
+
+ bdp->cbd_datlen = cpu_to_fec16(size);
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
+
+ if (fep->bufdesc_ex) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ /* Handle the last BD specially */
+ if (last_tcp)
+ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ if (is_last) {
+ status |= BD_ENET_TX_INTR;
+ if (fep->bufdesc_ex)
+ ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
+ }
+
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len = skb_tcp_all_headers(skb);
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
+ void *bufaddr;
+ unsigned long dmabuf;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long)bufaddr) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, hdr_len);
+
+ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
+ hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
+ }
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
+ bdp->cbd_datlen = cpu_to_fec16(hdr_len);
+
+ if (fep->bufdesc_ex) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ return 0;
+}
+
+static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len, total_len, data_left;
+ struct bufdesc *bdp = txq->bd.cur;
+ struct tso_t tso;
+ unsigned int index = 0;
+ int ret;
+
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for TSO!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
+ if (ret)
+ goto err_release;
+
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+ ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
+ bdp, index,
+ tso.data, size,
+ size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ }
+
+ /* Save skb pointer */
+ txq->tx_skbuff[index] = skb;
+
+ skb_tx_timestamp(skb);
+ txq->bd.cur = bdp;
+
+ /* Trigger transmission start */
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
+
+ return 0;
+
+err_release:
+ /* TODO: Release all used data descriptors for TSO */
+ return ret;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int entries_free;
+ unsigned short queue;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(ndev, queue);
+
+ if (skb_is_gso(skb))
+ ret = fec_enet_txq_submit_tso(txq, skb, ndev);
+ else
+ ret = fec_enet_txq_submit_skb(txq, skb, ndev);
+ if (ret)
+ return ret;
+
+ entries_free = fec_enet_get_free_txdesc_num(txq);
+ if (entries_free <= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
+
+ return NETDEV_TX_OK;
+}
+
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp;
+ unsigned int i;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ /* Initialize the receive buffer descriptors. */
+ rxq = fep->rx_queue[q];
+ bdp = rxq->bd.base;
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
+ else
+ bdp->cbd_sc = cpu_to_fec16(0);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+
+ rxq->bd.cur = rxq->bd.base;
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ /* ...and the same for transmit */
+ txq = fep->tx_queue[q];
+ bdp = txq->bd.base;
+ txq->bd.cur = bdp;
+
+ for (i = 0; i < txq->bd.ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = cpu_to_fec16(0);
+ if (bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ if (txq->tx_skbuff[i]) {
+ dev_kfree_skb_any(txq->tx_skbuff[i]);
+ txq->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ txq->dirty_tx = bdp;
+ }
+}
+
+static void fec_enet_active_rxring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ writel(0, fep->rx_queue[i]->bd.reg_desc_active);
+}
+
+static void fec_enet_enable_ring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
+ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(RCMR_MATCHEN | RCMR_CMP(i),
+ fep->hwp + FEC_RCMR(i));
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(DMA_CLASS_EN | IDLE_SLOPE(i),
+ fep->hwp + FEC_DMA_CFG(i));
+ }
+}
+
+static void fec_enet_reset_skb(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ int i, j;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+
+ for (j = 0; j < txq->bd.ring_size; j++) {
+ if (txq->tx_skbuff[j]) {
+ dev_kfree_skb_any(txq->tx_skbuff[j]);
+ txq->tx_skbuff[j] = NULL;
+ }
+ }
+ }
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change, transmit timeout, or to reconfigure the FEC. The network
+ * packet processing for this device must be stopped before this call.
+ */
+static void
+fec_restart(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 ecntl = 0x2; /* ETHEREN */
+
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel((__force u32)cpu_to_be32(temp_mac[0]),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((__force u32)cpu_to_be32(temp_mac[1]),
+ fep->hwp + FEC_ADDR_HIGH);
+
+ /* Clear any outstanding interrupt, except MDIO. */
+ writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
+
+ fec_enet_bd_init(ndev);
+
+ fec_enet_enable_ring(ndev);
+
+ /* Reset tx SKB buffers. */
+ fec_enet_reset_skb(ndev);
+
+ /* Enable MII mode */
+ if (fep->full_duplex == DUPLEX_FULL) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+ /* No Rcv on Xmit */
+ rcntl |= 0x02;
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+#if !defined(CONFIG_M5272)
+ if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+ u32 val = readl(fep->hwp + FEC_RACC);
+
+ /* align IP header */
+ val |= FEC_RACC_SHIFT16;
+ if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ /* set RX checksum */
+ val |= FEC_RACC_OPTIONS;
+ else
+ val &= ~FEC_RACC_OPTIONS;
+ writel(val, fep->hwp + FEC_RACC);
+ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ }
+#endif
+
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ /* Enable flow control and length check */
+ rcntl |= 0x40000000 | 0x00000020;
+
+ /* RGMII, RMII or MII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rcntl |= (1 << 6);
+ else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ rcntl |= (1 << 8);
+ else
+ rcntl &= ~(1 << 8);
+
+ /* 1G, 100M or 10M */
+ if (ndev->phydev) {
+ if (ndev->phydev->speed == SPEED_1000)
+ ecntl |= (1 << 5);
+ else if (ndev->phydev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+ }
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->quirks & FEC_QUIRK_USE_GASKET) {
+ u32 cfgr;
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ * MII, 25 MHz, no loopback, no echo
+ */
+ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+ if (ndev->phydev && ndev->phydev->speed == SPEED_10)
+ cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+ writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
+#endif
+ }
+
+#if !defined(CONFIG_M5272)
+ /* enable pause frame*/
+ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+ ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+ ndev->phydev && ndev->phydev->pause)) {
+ rcntl |= FEC_ENET_FCE;
+
+ /* set FIFO threshold parameter to reduce overrun */
+ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
+ writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
+ writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
+ writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
+
+ /* OPD */
+ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
+ } else {
+ rcntl &= ~FEC_ENET_FCE;
+ }
+#endif /* !defined(CONFIG_M5272) */
+
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+ ecntl |= (1 << 8);
+ /* enable ENET store and forward mode */
+ writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ }
+
+ if (fep->bufdesc_ex)
+ ecntl |= (1 << 4);
+
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_txc_dly)
+ ecntl |= FEC_ENET_TXC_DLY;
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_rxc_dly)
+ ecntl |= FEC_ENET_RXC_DLY;
+
+#ifndef CONFIG_M5272
+ /* Enable the MIB statistic event counters */
+ writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
+#endif
+
+ /* And last, enable the transmit and receive processing */
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+ fec_enet_active_rxring(ndev);
+
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
+ /* Enable interrupts we wish to service */
+ if (fep->link)
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ else
+ writel(0, fep->hwp + FEC_IMASK);
+
+ /* Init the interrupt coalescing */
+ if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
+ fec_enet_itr_coal_set(ndev);
+}
+
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
+}
+
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+ if (stop_gpr->gpr) {
+ if (enabled)
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit),
+ BIT(stop_gpr->bit));
+ else
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit), 0);
+ } else if (pdata && pdata->sleep_mode_enable) {
+ pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
+ }
+}
+
+static void fec_irqs_disable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+}
+
+static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+}
+
+static void
+fec_stop(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 val;
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+ udelay(10);
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+ netdev_err(ndev, "Graceful transmit stop did not complete!\n");
+ }
+
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ /* We have to keep ENET enabled to have MII interrupt stay working */
+ if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+ !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ writel(2, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
+}
+
+
+static void
+fec_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fec_dump(ndev);
+
+ ndev->stats.tx_errors++;
+
+ schedule_work(&fep->tx_timeout_work);
+}
+
+static void fec_enet_timeout_work(struct work_struct *work)
+{
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, tx_timeout_work);
+ struct net_device *ndev = fep->netdev;
+
+ rtnl_lock();
+ if (netif_device_present(ndev) || netif_running(ndev)) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ }
+ rtnl_unlock();
+}
+
+static void
+fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_cyc2time(&fep->tc, ts);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+{
+ struct fec_enet_private *fep;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int index = 0;
+ int entries_free;
+
+ fep = netdev_priv(ndev);
+
+ txq = fep->tx_queue[queue_id];
+ /* get next bdp of dirty_tx */
+ nq = netdev_get_tx_queue(ndev, queue_id);
+ bdp = txq->dirty_tx;
+
+ /* get next bdp of dirty_tx */
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+
+ while (bdp != READ_ONCE(txq->bd.cur)) {
+ /* Order the load of bd.cur and cbd_sc */
+ rmb();
+ status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
+ if (status & BD_ENET_TX_READY)
+ break;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+
+ skb = txq->tx_skbuff[index];
+ txq->tx_skbuff[index] = NULL;
+ if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ if (!skb)
+ goto skb_done;
+
+ /* Check for errors. */
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN |
+ BD_ENET_TX_CSL)) {
+ ndev->stats.tx_errors++;
+ if (status & BD_ENET_TX_HB) /* No heartbeat */
+ ndev->stats.tx_heartbeat_errors++;
+ if (status & BD_ENET_TX_LC) /* Late collision */
+ ndev->stats.tx_window_errors++;
+ if (status & BD_ENET_TX_RL) /* Retrans limit */
+ ndev->stats.tx_aborted_errors++;
+ if (status & BD_ENET_TX_UN) /* Underrun */
+ ndev->stats.tx_fifo_errors++;
+ if (status & BD_ENET_TX_CSL) /* Carrier lost */
+ ndev->stats.tx_carrier_errors++;
+ } else {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ }
+
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) &&
+ fep->bufdesc_ex) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (status & BD_ENET_TX_DEF)
+ ndev->stats.collisions++;
+
+ /* Free the sk buffer associated with this last transmit */
+ dev_kfree_skb_any(skb);
+skb_done:
+ /* Make sure the update to bdp and tx_skbuff are performed
+ * before dirty_tx
+ */
+ wmb();
+ txq->dirty_tx = bdp;
+
+ /* Update pointer to next buffer descriptor to be transmitted */
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+
+ /* Since we have freed up a buffer, the ring is no longer full
+ */
+ if (netif_tx_queue_stopped(nq)) {
+ entries_free = fec_enet_get_free_txdesc_num(txq);
+ if (entries_free >= txq->tx_wake_threshold)
+ netif_tx_wake_queue(nq);
+ }
+ }
+
+ /* ERR006358: Keep the transmitter going */
+ if (bdp != txq->bd.cur &&
+ readl(txq->bd.reg_desc_active) == 0)
+ writel(0, txq->bd.reg_desc_active);
+}
+
+static void fec_enet_tx(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+
+ /* Make sure that AVB queues are processed first. */
+ for (i = fep->num_tx_queues - 1; i >= 0; i--)
+ fec_enet_tx_queue(ndev, i);
+}
+
+static int __maybe_unused
+fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int off;
+
+ off = ((unsigned long)skb->data) & fep->rx_align;
+ if (off)
+ skb_reserve(skb, fep->rx_align + 1 - off);
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "Rx DMA memory map failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static bool __maybe_unused
+fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length, bool swap)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sk_buff *new_skb;
+
+ if (length > fep->rx_copybreak)
+ return false;
+
+ new_skb = netdev_alloc_skb(ndev, length);
+ if (!new_skb)
+ return false;
+
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ if (!swap)
+ memcpy(new_skb->data, (*skb)->data, length);
+ else
+ swap_buffer2(new_skb->data, (*skb)->data, length);
+ *skb = new_skb;
+
+ return true;
+}
+
+static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
+{
+ struct page *new_page;
+ dma_addr_t phys_addr;
+
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ WARN_ON(!new_page);
+ rxq->rx_skb_info[index].page = new_page;
+
+ rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+}
+
+/* During a receive, the bd_rx.cur points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static int
+fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+ ushort pkt_len;
+ __u8 *data;
+ int pkt_received = 0;
+ struct bufdesc_ex *ebdp = NULL;
+ bool vlan_packet_rcvd = false;
+ u16 vlan_tag;
+ int index = 0;
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct page *page;
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
+ rxq = fep->rx_queue[queue_id];
+
+ /* First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = rxq->bd.cur;
+
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
+ BD_ENET_RX_CL)) {
+ ndev->stats.rx_errors++;
+ if (status & BD_ENET_RX_OV) {
+ /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ goto rx_processing_done;
+ }
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
+ | BD_ENET_RX_LAST)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ if (status & BD_ENET_RX_LAST)
+ netdev_err(ndev, "rcv is not +last\n");
+ }
+ if (status & BD_ENET_RX_CR) /* CRC Error */
+ ndev->stats.rx_crc_errors++;
+ /* Report late collisions as a frame error. */
+ if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ ndev->stats.rx_frame_errors++;
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+ ndev->stats.rx_bytes += pkt_len;
+
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
+ page = rxq->rx_skb_info[index].page;
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+ fec_enet_update_cbd(rxq, bdp, index);
+
+ /* The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rxq->page_pool, page);
+ ndev->stats.rx_dropped++;
+
+ netdev_err_once(ndev, "build_skb failed!\n");
+ goto rx_processing_done;
+ }
+
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
+ skb_put(skb, pkt_len - 4);
+ skb_mark_for_recycle(skb);
+ data = skb->data;
+
+ if (need_swap)
+ swap_buffer(data, pkt_len);
+
+#if !defined(CONFIG_M5272)
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ data = skb_pull_inline(skb, 2);
+#endif
+
+ /* Extract the enhanced buffer descriptor */
+ ebdp = NULL;
+ if (fep->bufdesc_ex)
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ /* If this is a VLAN packet remove the VLAN Tag */
+ vlan_packet_rcvd = false;
+ if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
+ /* Push and remove the vlan tag */
+ struct vlan_hdr *vlan_header =
+ (struct vlan_hdr *) (data + ETH_HLEN);
+ vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ vlan_packet_rcvd = true;
+
+ memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en && fep->bufdesc_ex)
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
+ skb_hwtstamps(skb));
+
+ if (fep->bufdesc_ex &&
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+ if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
+ /* don't check it */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+ }
+
+ /* Handle received VLAN packets */
+ if (vlan_packet_rcvd)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+
+ skb_record_rx_queue(skb, queue_id);
+ napi_gro_receive(&fep->napi, skb);
+
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, rxq->bd.reg_desc_active);
+ }
+ rxq->bd.cur = bdp;
+ return pkt_received;
+}
+
+static int fec_enet_rx(struct net_device *ndev, int budget)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i, done = 0;
+
+ /* Make sure that AVB queues are processed first. */
+ for (i = fep->num_rx_queues - 1; i >= 0; i--)
+ done += fec_enet_rx_queue(ndev, budget - done, i);
+
+ return done;
+}
+
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
+{
+ uint int_events;
+
+ int_events = readl(fep->hwp + FEC_IEVENT);
+
+ /* Don't clear MDIO events, we poll for those */
+ int_events &= ~FEC_ENET_MII;
+
+ writel(int_events, fep->hwp + FEC_IEVENT);
+
+ return int_events != 0;
+}
+
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (fec_enet_collect_events(fep) && fep->link) {
+ ret = IRQ_HANDLED;
+
+ if (napi_schedule_prep(&fep->napi)) {
+ /* Disable interrupts */
+ writel(0, fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
+ }
+
+ return ret;
+}
+
+static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int done = 0;
+
+ do {
+ done += fec_enet_rx(ndev, budget - done);
+ fec_enet_tx(ndev);
+ } while ((done < budget) && fec_enet_collect_events(fep));
+
+ if (done < budget) {
+ napi_complete_done(napi, done);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ }
+
+ return done;
+}
+
+/* ------------------------------------------------------------------------- */
+static int fec_get_mac(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+ int ret;
+
+ /*
+ * try to get mac address in following order:
+ *
+ * 1) module parameter via kernel command line in form
+ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+ */
+ iap = macaddr;
+
+ /*
+ * 2) from device tree data
+ */
+ if (!is_valid_ether_addr(iap)) {
+ struct device_node *np = fep->pdev->dev.of_node;
+ if (np) {
+ ret = of_get_mac_address(np, tmpaddr);
+ if (!ret)
+ iap = tmpaddr;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
+ }
+ }
+
+ /*
+ * 3) from flash or fuse (via platform data)
+ */
+ if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+ if (FEC_FLASHMAC)
+ iap = (unsigned char *)FEC_FLASHMAC;
+#else
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
+
+ if (pdata)
+ iap = (unsigned char *)&pdata->mac;
+#endif
+ }
+
+ /*
+ * 4) FEC mac registers set by bootloader
+ */
+ if (!is_valid_ether_addr(iap)) {
+ *((__be32 *) &tmpaddr[0]) =
+ cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
+ *((__be16 *) &tmpaddr[4]) =
+ cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ /*
+ * 5) random mac address
+ */
+ if (!is_valid_ether_addr(iap)) {
+ /* Report it and use a random ethernet address instead */
+ dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
+ eth_hw_addr_random(ndev);
+ dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ return 0;
+ }
+
+ /* Adjust MAC if using macaddr */
+ eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+ int status_change = 0;
+
+ /*
+ * If the netdev is down, or is going down, we're not interested
+ * in link state events, so just mark our idea of the link as down
+ * and ignore the event.
+ */
+ if (!netif_running(ndev) || !netif_device_present(ndev)) {
+ fep->link = 0;
+ } else if (phy_dev->link) {
+ if (!fep->link) {
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+
+ if (fep->full_duplex != phy_dev->duplex) {
+ fep->full_duplex = phy_dev->duplex;
+ status_change = 1;
+ }
+
+ if (phy_dev->speed != fep->speed) {
+ fep->speed = phy_dev->speed;
+ status_change = 1;
+ }
+
+ /* if any of the above changed restart the FEC */
+ if (status_change) {
+ netif_stop_queue(ndev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ }
+ } else {
+ if (fep->link) {
+ netif_stop_queue(ndev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_stop(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+ }
+
+ if (status_change)
+ phy_print_status(phy_dev);
+}
+
+static int fec_enet_mdio_wait(struct fec_enet_private *fep)
+{
+ uint ievent;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
+ ievent & FEC_ENET_MII, 2, 30000);
+
+ if (!ret)
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ return ret;
+}
+
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret = 0, frame_start, frame_addr, frame_op;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
+ }
+
+ frame_op = FEC_MMFR_OP_READ_C45;
+
+ } else {
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
+ /* start a read op */
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO read timeout\n");
+ goto out;
+ }
+
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret, frame_start, frame_addr;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
+ }
+ } else {
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
+ /* start a write op */
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
+ netdev_err(fep->netdev, "MDIO write timeout\n");
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+
+ if (phy_dev) {
+ phy_reset_after_clk_enable(phy_dev);
+ } else if (fep->phy_node) {
+ /*
+ * If the PHY still is not bound to the MAC, but there is
+ * OF PHY node and a matching PHY device instance already,
+ * use the OF PHY node to obtain the PHY device instance,
+ * and then use that PHY device instance when triggering
+ * the PHY reset.
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+ put_device(&phy_dev->mdio.dev);
+ }
+}
+
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ return ret;
+
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret) {
+ mutex_unlock(&fep->ptp_clk_mutex);
+ goto failed_clk_ptp;
+ } else {
+ fep->ptp_clk_on = true;
+ }
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
+
+ ret = clk_prepare_enable(fep->clk_ref);
+ if (ret)
+ goto failed_clk_ref;
+
+ ret = clk_prepare_enable(fep->clk_2x_txclk);
+ if (ret)
+ goto failed_clk_2x_txclk;
+
+ fec_enet_phy_reset_after_clk_enable(ndev);
+ } else {
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
+ clk_disable_unprepare(fep->clk_ref);
+ clk_disable_unprepare(fep->clk_2x_txclk);
+ }
+
+ return 0;
+
+failed_clk_2x_txclk:
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
+failed_clk_ref:
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
+failed_clk_ptp:
+ clk_disable_unprepare(fep->clk_enet_out);
+
+ return ret;
+}
+
+static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
+ struct device_node *np)
+{
+ u32 rgmii_tx_delay, rgmii_rx_delay;
+
+ /* For rgmii tx internal delay, valid values are 0ps and 2000ps */
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
+ if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
+ dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
+ return -EINVAL;
+ } else if (rgmii_tx_delay == 2000) {
+ fep->rgmii_txc_dly = true;
+ }
+ }
+
+ /* For rgmii rx internal delay, valid values are 0ps and 2000ps */
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
+ if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
+ dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
+ return -EINVAL;
+ } else if (rgmii_rx_delay == 2000) {
+ fep->rgmii_rxc_dly = true;
+ }
+ }
+
+ return 0;
+}
+
+static int fec_enet_mii_probe(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = NULL;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int phy_id;
+ int dev_id = fep->dev_id;
+
+ if (fep->phy_node) {
+ phy_dev = of_phy_connect(ndev, fep->phy_node,
+ &fec_enet_adjust_link, 0,
+ fep->phy_interface);
+ if (!phy_dev) {
+ netdev_err(ndev, "Unable to connect to phy\n");
+ return -ENODEV;
+ }
+ } else {
+ /* check for attached phy */
+ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+ if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
+ continue;
+ if (dev_id--)
+ continue;
+ strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ break;
+ }
+
+ if (phy_id >= PHY_MAX_ADDR) {
+ netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
+ strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ phy_id = 0;
+ }
+
+ snprintf(phy_name, sizeof(phy_name),
+ PHY_ID_FMT, mdio_bus_id, phy_id);
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
+ fep->phy_interface);
+ }
+
+ if (IS_ERR(phy_dev)) {
+ netdev_err(ndev, "could not attach to PHY\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ /* mask with MAC supported features */
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
+ phy_set_max_speed(phy_dev, 1000);
+ phy_remove_link_mode(phy_dev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+#if !defined(CONFIG_M5272)
+ phy_support_sym_pause(phy_dev);
+#endif
+ }
+ else
+ phy_set_max_speed(phy_dev, 100);
+
+ fep->link = 0;
+ fep->full_duplex = 0;
+
+ phy_dev->mac_managed_pm = 1;
+
+ phy_attached_info(phy_dev);
+
+ return 0;
+}
+
+static int fec_enet_mii_init(struct platform_device *pdev)
+{
+ static struct mii_bus *fec0_mii_bus;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ bool suppress_preamble = false;
+ struct device_node *node;
+ int err = -ENXIO;
+ u32 mii_speed, holdtime;
+ u32 bus_freq;
+
+ /*
+ * The i.MX28 dual fec interfaces are not equal.
+ * Here are the differences:
+ *
+ * - fec0 supports MII & RMII modes while fec1 only supports RMII
+ * - fec0 acts as the 1588 time master while fec1 is slave
+ * - external phys can only be configured by fec0
+ *
+ * That is to say fec1 can not work independently. It only works
+ * when fec0 is working. The reason behind this design is that the
+ * second interface is added primarily for Switch mode.
+ *
+ * Because of the last point above, both phys are attached on fec0
+ * mdio interface in board design, and need to be configured by
+ * fec0 mii_bus.
+ */
+ if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
+ /* fec1 uses fec0 mii_bus */
+ if (mii_cnt && fec0_mii_bus) {
+ fep->mii_bus = fec0_mii_bus;
+ mii_cnt++;
+ return 0;
+ }
+ return -ENOENT;
+ }
+
+ bus_freq = 2500000; /* 2.5MHz by default */
+ node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (node) {
+ of_property_read_u32(node, "clock-frequency", &bus_freq);
+ suppress_preamble = of_property_read_bool(node,
+ "suppress-preamble");
+ }
+
+ /*
+ * Set MII speed (= clk_get_rate() / 2 * phy_speed)
+ *
+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
+ * Reference Manual has an error on this, and gets fixed on i.MX6Q
+ * document.
+ */
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
+ if (fep->quirks & FEC_QUIRK_ENET_MAC)
+ mii_speed--;
+ if (mii_speed > 63) {
+ dev_err(&pdev->dev,
+ "fec clock (%lu) too fast to get right mii speed\n",
+ clk_get_rate(fep->clk_ipg));
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /*
+ * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+ * MII_SPEED) register that defines the MDIO output hold time. Earlier
+ * versions are RAZ there, so just ignore the difference and write the
+ * register always.
+ * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+ * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+ * output.
+ * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+ * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+ * holdtime cannot result in a value greater than 3.
+ */
+ holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+ fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
+ if (suppress_preamble)
+ fep->phy_speed |= BIT(7);
+
+ if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+ }
+
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ /* Clear any pending transaction complete indication */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, fep->dev_id + 1);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ err = of_mdiobus_register(fep->mii_bus, node);
+ if (err)
+ goto err_out_free_mdiobus;
+ of_node_put(node);
+
+ mii_cnt++;
+
+ /* save fec0 mii_bus */
+ if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
+ fec0_mii_bus = fep->mii_bus;
+
+ return 0;
+
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ of_node_put(node);
+ return err;
+}
+
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
+{
+ if (--mii_cnt == 0) {
+ mdiobus_unregister(fep->mii_bus);
+ mdiobus_free(fep->mii_bus);
+ }
+}
+
+static void fec_enet_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ strscpy(info->driver, fep->pdev->dev.driver->name,
+ sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+}
+
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct resource *r;
+ int s = 0;
+
+ r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+ if (r)
+ s = resource_size(r);
+
+ return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+static __u32 fec_enet_register_version = 2;
+static u32 fec_enet_register_offset[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+ FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+ FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+ FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+ FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+ FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+ FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+ FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+/* for i.MX6ul */
+static u32 fec_enet_register_offset_6ul[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
+ FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
+ FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static __u32 fec_enet_register_version = 1;
+static u32 fec_enet_register_offset[] = {
+ FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+ FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+ FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+ FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+ FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+ FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+ FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+ FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+ FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ struct device *dev = &fep->pdev->dev;
+ u32 *buf = (u32 *)regbuf;
+ u32 i, off;
+ int ret;
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+ u32 *reg_list;
+ u32 reg_cnt;
+
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ reg_list = fec_enet_register_offset;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+ } else {
+ reg_list = fec_enet_register_offset_6ul;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
+ }
+#else
+ /* coldfire */
+ static u32 *reg_list = fec_enet_register_offset;
+ static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+#endif
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return;
+
+ regs->version = fec_enet_register_version;
+
+ memset(buf, 0, regs->len);
+
+ for (i = 0; i < reg_cnt; i++) {
+ off = reg_list[i];
+
+ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+ continue;
+
+ off >>= 2;
+ buf[off] = readl(&theregs[off]);
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static int fec_enet_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->bufdesc_ex) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (fep->ptp_clock)
+ info->phc_index = ptp_clock_index(fep->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+ } else {
+ return ethtool_op_get_ts_info(ndev, info);
+ }
+}
+
+#if !defined(CONFIG_M5272)
+
+static void fec_enet_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
+ pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
+ pause->rx_pause = pause->tx_pause;
+}
+
+static int fec_enet_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!ndev->phydev)
+ return -ENODEV;
+
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+ return -EINVAL;
+ }
+
+ fep->pause_flag = 0;
+
+ /* tx pause must be same as rx pause */
+ fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
+ fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+
+ phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ fec_stop(ndev);
+ phy_start_aneg(ndev->phydev);
+ }
+ if (netif_running(ndev)) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ }
+
+ return 0;
+}
+
+static const struct fec_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} fec_stats[] = {
+ /* RMON TX */
+ { "tx_dropped", RMON_T_DROP },
+ { "tx_packets", RMON_T_PACKETS },
+ { "tx_broadcast", RMON_T_BC_PKT },
+ { "tx_multicast", RMON_T_MC_PKT },
+ { "tx_crc_errors", RMON_T_CRC_ALIGN },
+ { "tx_undersize", RMON_T_UNDERSIZE },
+ { "tx_oversize", RMON_T_OVERSIZE },
+ { "tx_fragment", RMON_T_FRAG },
+ { "tx_jabber", RMON_T_JAB },
+ { "tx_collision", RMON_T_COL },
+ { "tx_64byte", RMON_T_P64 },
+ { "tx_65to127byte", RMON_T_P65TO127 },
+ { "tx_128to255byte", RMON_T_P128TO255 },
+ { "tx_256to511byte", RMON_T_P256TO511 },
+ { "tx_512to1023byte", RMON_T_P512TO1023 },
+ { "tx_1024to2047byte", RMON_T_P1024TO2047 },
+ { "tx_GTE2048byte", RMON_T_P_GTE2048 },
+ { "tx_octets", RMON_T_OCTETS },
+
+ /* IEEE TX */
+ { "IEEE_tx_drop", IEEE_T_DROP },
+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
+ { "IEEE_tx_1col", IEEE_T_1COL },
+ { "IEEE_tx_mcol", IEEE_T_MCOL },
+ { "IEEE_tx_def", IEEE_T_DEF },
+ { "IEEE_tx_lcol", IEEE_T_LCOL },
+ { "IEEE_tx_excol", IEEE_T_EXCOL },
+ { "IEEE_tx_macerr", IEEE_T_MACERR },
+ { "IEEE_tx_cserr", IEEE_T_CSERR },
+ { "IEEE_tx_sqe", IEEE_T_SQE },
+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
+
+ /* RMON RX */
+ { "rx_packets", RMON_R_PACKETS },
+ { "rx_broadcast", RMON_R_BC_PKT },
+ { "rx_multicast", RMON_R_MC_PKT },
+ { "rx_crc_errors", RMON_R_CRC_ALIGN },
+ { "rx_undersize", RMON_R_UNDERSIZE },
+ { "rx_oversize", RMON_R_OVERSIZE },
+ { "rx_fragment", RMON_R_FRAG },
+ { "rx_jabber", RMON_R_JAB },
+ { "rx_64byte", RMON_R_P64 },
+ { "rx_65to127byte", RMON_R_P65TO127 },
+ { "rx_128to255byte", RMON_R_P128TO255 },
+ { "rx_256to511byte", RMON_R_P256TO511 },
+ { "rx_512to1023byte", RMON_R_P512TO1023 },
+ { "rx_1024to2047byte", RMON_R_P1024TO2047 },
+ { "rx_GTE2048byte", RMON_R_P_GTE2048 },
+ { "rx_octets", RMON_R_OCTETS },
+
+ /* IEEE RX */
+ { "IEEE_rx_drop", IEEE_R_DROP },
+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
+ { "IEEE_rx_crc", IEEE_R_CRC },
+ { "IEEE_rx_align", IEEE_R_ALIGN },
+ { "IEEE_rx_macerr", IEEE_R_MACERR },
+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
+};
+
+#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
+
+static void fec_enet_update_ethtool_stats(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
+}
+
+static void fec_enet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ if (netif_running(dev))
+ fec_enet_update_ethtool_stats(dev);
+
+ memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+}
+
+static void fec_enet_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ fec_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
+ }
+}
+
+static int fec_enet_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(fec_stats);
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+
+ /* Disable MIB statistics counters */
+ writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
+
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ writel(0, fep->hwp + fec_stats[i].offset);
+
+ /* Don't disable MIB statistics counters */
+ writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
+}
+
+#else /* !defined(CONFIG_M5272) */
+#define FEC_STATS_SIZE 0
+static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
+{
+}
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
+#endif /* !defined(CONFIG_M5272) */
+
+/* ITR clock source is enet system clock (clk_ahb).
+ * TCTT unit is cycle_ns * 64 cycle
+ * So, the ICTT value = X us / (cycle_ns * 64)
+ */
+static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->itr_clk_rate / 64000) / 1000;
+}
+
+/* Set threshold for interrupt coalescing */
+static void fec_enet_itr_coal_set(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int rx_itr, tx_itr;
+
+ /* Must be greater than zero to avoid unpredictable behavior */
+ if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+ !fep->tx_time_itr || !fep->tx_pkts_itr)
+ return;
+
+ /* Select enet system clock as Interrupt Coalescing
+ * timer Clock Source
+ */
+ rx_itr = FEC_ITR_CLK_SEL;
+ tx_itr = FEC_ITR_CLK_SEL;
+
+ /* set ICFT and ICTT */
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+
+ rx_itr |= FEC_ITR_EN;
+ tx_itr |= FEC_ITR_EN;
+
+ writel(tx_itr, fep->hwp + FEC_TXIC0);
+ writel(rx_itr, fep->hwp + FEC_RXIC0);
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+ }
+}
+
+static int fec_enet_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ ec->rx_coalesce_usecs = fep->rx_time_itr;
+ ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
+
+ ec->tx_coalesce_usecs = fep->tx_time_itr;
+ ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
+
+ return 0;
+}
+
+static int fec_enet_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device *dev = &fep->pdev->dev;
+ unsigned int cycle;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_max_coalesced_frames > 255) {
+ dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
+ return -EINVAL;
+ }
+
+ if (ec->tx_max_coalesced_frames > 255) {
+ dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
+ if (cycle > 0xFFFF) {
+ dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
+ if (cycle > 0xFFFF) {
+ dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
+ return -EINVAL;
+ }
+
+ fep->rx_time_itr = ec->rx_coalesce_usecs;
+ fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
+
+ fep->tx_time_itr = ec->tx_coalesce_usecs;
+ fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
+
+ fec_enet_itr_coal_set(ndev);
+
+ return 0;
+}
+
+static int fec_enet_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fep->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fec_enet_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fep->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+ int ret = 0;
+
+ if (enable) {
+ ret = phy_init_eee(ndev->phydev, false);
+ if (ret)
+ return ret;
+
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ p->tx_lpi_enabled = enable;
+ p->eee_enabled = enable;
+ p->eee_active = enable;
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
+static int
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -ENETDOWN;
+
+ edata->eee_enabled = p->eee_enabled;
+ edata->eee_active = p->eee_active;
+ edata->tx_lpi_timer = p->tx_lpi_timer;
+ edata->tx_lpi_enabled = p->tx_lpi_enabled;
+
+ return phy_ethtool_get_eee(ndev->phydev, edata);
+}
+
+static int
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+ int ret = 0;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -ENETDOWN;
+
+ p->tx_lpi_timer = edata->tx_lpi_timer;
+
+ if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
+ !edata->tx_lpi_timer)
+ ret = fec_enet_eee_mode_set(ndev, false);
+ else
+ ret = fec_enet_eee_mode_set(ndev, true);
+
+ if (ret)
+ return ret;
+
+ return phy_ethtool_set_eee(ndev->phydev, edata);
+}
+
+static void
+fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int
+fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+ if (device_may_wakeup(&ndev->dev))
+ fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
+ else
+ fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
+
+ return 0;
+}
+
+static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .get_regs_len = fec_enet_get_regs_len,
+ .get_regs = fec_enet_get_regs,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fec_enet_get_coalesce,
+ .set_coalesce = fec_enet_set_coalesce,
+#ifndef CONFIG_M5272
+ .get_pauseparam = fec_enet_get_pauseparam,
+ .set_pauseparam = fec_enet_set_pauseparam,
+ .get_strings = fec_enet_get_strings,
+ .get_ethtool_stats = fec_enet_get_ethtool_stats,
+ .get_sset_count = fec_enet_get_sset_count,
+#endif
+ .get_ts_info = fec_enet_get_ts_info,
+ .get_tunable = fec_enet_get_tunable,
+ .set_tunable = fec_enet_set_tunable,
+ .get_wol = fec_enet_get_wol,
+ .set_wol = fec_enet_set_wol,
+ .get_eee = fec_enet_get_eee,
+ .set_eee = fec_enet_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .self_test = net_selftest,
+};
+
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ if (fep->bufdesc_ex) {
+ bool use_fec_hwts = !phy_has_hwtstamp(phydev);
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (use_fec_hwts)
+ return fec_ptp_set(ndev, rq);
+ fec_ptp_disable_hwts(ndev);
+ } else if (cmd == SIOCGHWTSTAMP) {
+ if (use_fec_hwts)
+ return fec_ptp_get(ndev, rq);
+ }
+ }
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+static void fec_enet_free_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct sk_buff *skb;
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ rxq = fep->rx_queue[q];
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ txq = fep->tx_queue[q];
+ for (i = 0; i < txq->bd.ring_size; i++) {
+ kfree(txq->tx_bounce[i]);
+ txq->tx_bounce[i] = NULL;
+ skb = txq->tx_skbuff[i];
+ txq->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+}
+
+static void fec_enet_free_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+ txq = fep->tx_queue[i];
+ dma_free_coherent(&fep->pdev->dev,
+ txq->bd.ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs,
+ txq->tso_hdrs_dma);
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ kfree(fep->rx_queue[i]);
+ for (i = 0; i < fep->num_tx_queues; i++)
+ kfree(fep->tx_queue[i]);
+}
+
+static int fec_enet_alloc_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ int ret = 0;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->tx_queue[i] = txq;
+ txq->bd.ring_size = TX_RING_SIZE;
+ fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
+
+ txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ txq->tx_wake_threshold =
+ (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
+
+ txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
+ txq->bd.ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma,
+ GFP_KERNEL);
+ if (!txq->tso_hdrs) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
+ GFP_KERNEL);
+ if (!fep->rx_queue[i]) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
+ fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
+ }
+ return ret;
+
+alloc_failed:
+ fec_enet_free_queue(ndev);
+ return ret;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
+
+ rxq = fep->rx_queue[queue];
+ bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
+ goto err_alloc;
+
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ rxq->rx_skb_info[i].page = page;
+ rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ return 0;
+
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int
+fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+
+ txq = fep->tx_queue[queue];
+ bdp = txq->bd.base;
+ for (i = 0; i < txq->bd.ring_size; i++) {
+ txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+ if (!txq->tx_bounce[i])
+ goto err_alloc;
+
+ bdp->cbd_sc = cpu_to_fec16(0);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+
+ return 0;
+
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fec_enet_alloc_rxq_buffers(ndev, i))
+ return -ENOMEM;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fec_enet_alloc_txq_buffers(ndev, i))
+ return -ENOMEM;
+ return 0;
+}
+
+static int
+fec_enet_open(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+ bool reset_again;
+
+ ret = pm_runtime_resume_and_get(&fep->pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ goto clk_enable;
+
+ /* During the first fec_enet_open call the PHY isn't probed at this
+ * point. Therefore the phy_reset_after_clk_enable() call within
+ * fec_enet_clk_enable() fails. As we need this reset in order to be
+ * sure the PHY is working correctly we check if we need to reset again
+ * later when the PHY is probed
+ */
+ if (ndev->phydev && ndev->phydev->drv)
+ reset_again = false;
+ else
+ reset_again = true;
+
+ /* I should reset the ring buffers here, but I don't yet know
+ * a simple way to do that.
+ */
+
+ ret = fec_enet_alloc_buffers(ndev);
+ if (ret)
+ goto err_enet_alloc;
+
+ /* Init MAC prior to mii bus probe */
+ fec_restart(ndev);
+
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret)
+ goto err_enet_mii_probe;
+
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
+ napi_enable(&fep->napi);
+ phy_start(ndev->phydev);
+ netif_tx_start_all_queues(ndev);
+
+ device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+ FEC_WOL_FLAG_ENABLE);
+
+ return 0;
+
+err_enet_mii_probe:
+ fec_enet_free_buffers(ndev);
+err_enet_alloc:
+ fec_enet_clk_enable(ndev, false);
+clk_enable:
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ return ret;
+}
+
+static int
+fec_enet_close(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ phy_stop(ndev->phydev);
+
+ if (netif_device_present(ndev)) {
+ napi_disable(&fep->napi);
+ netif_tx_disable(ndev);
+ fec_stop(ndev);
+ }
+
+ phy_disconnect(ndev->phydev);
+
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_unused();
+
+ fec_enet_update_ethtool_stats(ndev);
+
+ fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
+
+ fec_enet_free_buffers(ndev);
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering. Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not. I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+#define FEC_HASH_BITS 6 /* #bits in hash */
+
+static void set_multicast_list(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ unsigned int crc, tmp;
+ unsigned char hash;
+ unsigned int hash_high = 0, hash_low = 0;
+
+ if (ndev->flags & IFF_PROMISC) {
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp |= 0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+ return;
+ }
+
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp &= ~0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's
+ */
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+ return;
+ }
+
+ /* Add the addresses in hash register */
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* calculate crc32 value of mac address */
+ crc = ether_crc_le(ndev->addr_len, ha->addr);
+
+ /* only upper 6 bits (FEC_HASH_BITS) are used
+ * which point to specific bit in the hash registers
+ */
+ hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
+
+ if (hash > 31)
+ hash_high |= 1 << (hash - 32);
+ else
+ hash_low |= 1 << hash;
+ }
+
+ writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+}
+
+/* Set a MAC change in hardware. */
+static int
+fec_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ if (addr) {
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ eth_hw_addr_set(ndev, addr->sa_data);
+ }
+
+ /* Add netif status check here to avoid system hang in below case:
+ * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
+ * After ethx down, fec all clocks are gated off and then register
+ * access causes system hang.
+ */
+ if (!netif_running(ndev))
+ return 0;
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * fec_poll_controller - FEC Poll controller function
+ * @dev: The FEC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+static void fec_poll_controller(struct net_device *dev)
+{
+ int i;
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ if (fep->irq[i] > 0) {
+ disable_irq(fep->irq[i]);
+ fec_enet_interrupt(fep->irq[i], dev);
+ enable_irq(fep->irq[i]);
+ }
+ }
+}
+#endif
+
+static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ netdev->features = features;
+
+ /* Receive checksum has been changed */
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ else
+ fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
+ }
+}
+
+static int fec_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(netdev);
+ fec_stop(netdev);
+ fec_enet_set_netdev_features(netdev, features);
+ fec_restart(netdev);
+ netif_tx_wake_all_queues(netdev);
+ netif_tx_unlock_bh(netdev);
+ napi_enable(&fep->napi);
+ } else {
+ fec_enet_set_netdev_features(netdev, features);
+ }
+
+ return 0;
+}
+
+static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u16 vlan_tag = 0;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return netdev_pick_tx(ndev, skb, NULL);
+
+ /* VLAN is present in the payload.*/
+ if (eth_type_vlan(skb->protocol)) {
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+
+ vlan_tag = ntohs(vhdr->h_vlan_TCI);
+ /* VLAN is present in the skb but not yet pushed in the payload.*/
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_tag = skb->vlan_tci;
+ } else {
+ return vlan_tag;
+ }
+
+ return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+}
+
+static const struct net_device_ops fec_netdev_ops = {
+ .ndo_open = fec_enet_open,
+ .ndo_stop = fec_enet_close,
+ .ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_select_queue = fec_enet_select_queue,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = fec_timeout,
+ .ndo_set_mac_address = fec_set_mac_address,
+ .ndo_eth_ioctl = fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fec_poll_controller,
+#endif
+ .ndo_set_features = fec_set_features,
+};
+
+static const unsigned short offset_des_active_rxq[] = {
+ FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
+};
+
+static const unsigned short offset_des_active_txq[] = {
+ FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
+};
+
+ /*
+ * XXX: We need to clean up on failure exits here.
+ *
+ */
+static int fec_enet_init(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc *cbd_base;
+ dma_addr_t bd_dma;
+ int bd_size;
+ unsigned int i;
+ unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+ sizeof(struct bufdesc);
+ unsigned dsize_log2 = __fls(dsize);
+ int ret;
+
+ WARN_ON(dsize != (1 << dsize_log2));
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ fep->rx_align = 0xf;
+ fep->tx_align = 0xf;
+#else
+ fep->rx_align = 0x3;
+ fep->tx_align = 0x3;
+#endif
+ fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
+ fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
+
+ /* Check mask of the streaming and coherent API */
+ ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
+ return ret;
+ }
+
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
+
+ bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
+
+ /* Allocate memory for buffer descriptors. */
+ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+ GFP_KERNEL);
+ if (!cbd_base) {
+ ret = -ENOMEM;
+ goto free_queue_mem;
+ }
+
+ /* Get the Ethernet address */
+ ret = fec_get_mac(ndev);
+ if (ret)
+ goto free_queue_mem;
+
+ /* make sure MAC we just acquired is programmed into the hw */
+ fec_set_mac_address(ndev, NULL);
+
+ /* Set receive and transmit descriptor base. */
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
+ unsigned size = dsize * rxq->bd.ring_size;
+
+ rxq->bd.qid = i;
+ rxq->bd.base = cbd_base;
+ rxq->bd.cur = cbd_base;
+ rxq->bd.dma = bd_dma;
+ rxq->bd.dsize = dsize;
+ rxq->bd.dsize_log2 = dsize_log2;
+ rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
+ bd_dma += size;
+ cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+ rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
+ unsigned size = dsize * txq->bd.ring_size;
+
+ txq->bd.qid = i;
+ txq->bd.base = cbd_base;
+ txq->bd.cur = cbd_base;
+ txq->bd.dma = bd_dma;
+ txq->bd.dsize = dsize;
+ txq->bd.dsize_log2 = dsize_log2;
+ txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
+ bd_dma += size;
+ cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+ txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
+ }
+
+
+ /* The FEC Ethernet specific entries in the device structure */
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->netdev_ops = &fec_netdev_ops;
+ ndev->ethtool_ops = &fec_enet_ethtool_ops;
+
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
+
+ if (fep->quirks & FEC_QUIRK_HAS_VLAN)
+ /* enable hw VLAN support */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
+ netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
+
+ /* enable hw accelerator */
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ }
+
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ fep->tx_align = 0;
+ fep->rx_align = 0x3f;
+ }
+
+ ndev->hw_features = ndev->features;
+
+ fec_restart(ndev);
+
+ if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
+ fec_enet_clear_ethtool_stats(ndev);
+ else
+ fec_enet_update_ethtool_stats(ndev);
+
+ return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static int fec_reset_phy(struct platform_device *pdev)
+{
+ int err, phy_reset;
+ bool active_high = false;
+ int msec = 1, phy_post_delay = 0;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return 0;
+
+ err = of_property_read_u32(np, "phy-reset-duration", &msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (!err && msec > 1000)
+ msec = 1;
+
+ phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ if (phy_reset == -EPROBE_DEFER)
+ return phy_reset;
+ else if (!gpio_is_valid(phy_reset))
+ return 0;
+
+ err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
+ /* valid reset duration should be less than 1s */
+ if (!err && phy_post_delay > 1000)
+ return -EINVAL;
+
+ active_high = of_property_read_bool(np, "phy-reset-active-high");
+
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ "phy-reset");
+ if (err) {
+ dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
+ return err;
+ }
+
+ if (msec > 20)
+ msleep(msec);
+ else
+ usleep_range(msec * 1000, msec * 1000 + 1000);
+
+ gpio_set_value_cansleep(phy_reset, !active_high);
+
+ if (!phy_post_delay)
+ return 0;
+
+ if (phy_post_delay > 20)
+ msleep(phy_post_delay);
+ else
+ usleep_range(phy_post_delay * 1000,
+ phy_post_delay * 1000 + 1000);
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static int fec_reset_phy(struct platform_device *pdev)
+{
+ /*
+ * In case of platform probe, the reset has been done
+ * by machine code.
+ */
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static void
+fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ *num_tx = *num_rx = 1;
+
+ if (!np || !of_device_is_available(np))
+ return;
+
+ /* parse the num of tx and rx queues */
+ of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
+
+ of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
+
+ if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
+ *num_tx);
+ *num_tx = 1;
+ return;
+ }
+
+ if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
+ *num_rx);
+ *num_rx = 1;
+ return;
+ }
+
+}
+
+static int fec_enet_get_irq_cnt(struct platform_device *pdev)
+{
+ int irq_cnt = platform_irq_count(pdev);
+
+ if (irq_cnt > FEC_IRQ_NUM)
+ irq_cnt = FEC_IRQ_NUM; /* last for pps */
+ else if (irq_cnt == 2)
+ irq_cnt = 1; /* last for pps */
+ else if (irq_cnt <= 0)
+ irq_cnt = 1; /* At least 1 irq is needed */
+ return irq_cnt;
+}
+
+static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
+ fep->wake_irq = fep->irq[2];
+ else
+ fep->wake_irq = fep->irq[0];
+}
+
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+ struct device_node *np)
+{
+ struct device_node *gpr_np;
+ u32 out_val[3];
+ int ret = 0;
+
+ gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
+ if (!gpr_np)
+ return 0;
+
+ ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
+ ARRAY_SIZE(out_val));
+ if (ret) {
+ dev_dbg(&fep->pdev->dev, "no stop mode property\n");
+ goto out;
+ }
+
+ fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(fep->stop_gpr.gpr)) {
+ dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+ ret = PTR_ERR(fep->stop_gpr.gpr);
+ fep->stop_gpr.gpr = NULL;
+ goto out;
+ }
+
+ fep->stop_gpr.reg = out_val[1];
+ fep->stop_gpr.bit = out_val[2];
+
+out:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
+static int
+fec_probe(struct platform_device *pdev)
+{
+ struct fec_enet_private *fep;
+ struct fec_platform_data *pdata;
+ phy_interface_t interface;
+ struct net_device *ndev;
+ int i, irq, ret = 0;
+ const struct of_device_id *of_id;
+ static int dev_id;
+ struct device_node *np = pdev->dev.of_node, *phy_node;
+ int num_tx_qs;
+ int num_rx_qs;
+ char irq_name[8];
+ int irq_cnt;
+ struct fec_devinfo *dev_info;
+
+ fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+
+ /* Init network device */
+ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
+ FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* setup board info structure */
+ fep = netdev_priv(ndev);
+
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ if (dev_info)
+ fep->quirks = dev_info->quirks;
+
+ fep->netdev = ndev;
+ fep->num_rx_queues = num_rx_qs;
+ fep->num_tx_queues = num_tx_qs;
+
+#if !defined(CONFIG_M5272)
+ /* default enable pause frame auto negotiation */
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT)
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+#endif
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
+ fep->hwp = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fep->hwp)) {
+ ret = PTR_ERR(fep->hwp);
+ goto failed_ioremap;
+ }
+
+ fep->pdev = pdev;
+ fep->dev_id = dev_id++;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if ((of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl")) &&
+ !of_property_read_bool(np, "fsl,err006687-workaround-present"))
+ fep->quirks |= FEC_QUIRK_ERR006687;
+
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
+ if (of_get_property(np, "fsl,magic-packet", NULL))
+ fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
+ ret = fec_enet_init_stop_mode(fep, np);
+ if (ret)
+ goto failed_stop_mode;
+
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "broken fixed-link specification\n");
+ goto failed_phy;
+ }
+ phy_node = of_node_get(np);
+ }
+ fep->phy_node = phy_node;
+
+ ret = of_get_phy_mode(pdev->dev.of_node, &interface);
+ if (ret) {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+ else
+ fep->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ fep->phy_interface = interface;
+ }
+
+ ret = fec_enet_parse_rgmii_delay(fep, np);
+ if (ret)
+ goto failed_rgmii_delay;
+
+ fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fep->clk_ipg)) {
+ ret = PTR_ERR(fep->clk_ipg);
+ goto failed_clk;
+ }
+
+ fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fep->clk_ahb)) {
+ ret = PTR_ERR(fep->clk_ahb);
+ goto failed_clk;
+ }
+
+ fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
+
+ /* enet_out is optional, depends on board */
+ fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out)) {
+ ret = PTR_ERR(fep->clk_enet_out);
+ goto failed_clk;
+ }
+
+ fep->ptp_clk_on = false;
+ mutex_init(&fep->ptp_clk_mutex);
+
+ /* clk_ref is optional, depends on board */
+ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref)) {
+ ret = PTR_ERR(fep->clk_ref);
+ goto failed_clk;
+ }
+ fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
+
+ /* clk_2x_txclk is optional, depends on board */
+ if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
+ fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
+ if (IS_ERR(fep->clk_2x_txclk))
+ fep->clk_2x_txclk = NULL;
+ }
+
+ fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
+ fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ if (IS_ERR(fep->clk_ptp)) {
+ fep->clk_ptp = NULL;
+ fep->bufdesc_ex = false;
+ }
+
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ goto failed_clk;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
+
+ fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
+ if (!IS_ERR(fep->reg_phy)) {
+ ret = regulator_enable(fep->reg_phy);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable phy regulator: %d\n", ret);
+ goto failed_regulator;
+ }
+ } else {
+ if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto failed_regulator;
+ }
+ fep->reg_phy = NULL;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = fec_reset_phy(pdev);
+ if (ret)
+ goto failed_reset;
+
+ irq_cnt = fec_enet_get_irq_cnt(pdev);
+ if (fep->bufdesc_ex)
+ fec_ptp_init(pdev, irq_cnt);
+
+ ret = fec_enet_init(ndev);
+ if (ret)
+ goto failed_init;
+
+ for (i = 0; i < irq_cnt; i++) {
+ snprintf(irq_name, sizeof(irq_name), "int%d", i);
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq < 0)
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto failed_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
+ 0, pdev->name, ndev);
+ if (ret)
+ goto failed_irq;
+
+ fep->irq[i] = irq;
+ }
+
+ /* Decide which interrupt line is wakeup capable */
+ fec_enet_get_wakeup_irq(pdev);
+
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto failed_register;
+
+ device_init_wakeup(&ndev->dev, fep->wol_flag &
+ FEC_WOL_HAS_MAGIC_PACKET);
+
+ if (fep->bufdesc_ex && fep->ptp_clock)
+ netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+
+ fep->rx_copybreak = COPYBREAK_DEFAULT;
+ INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
+failed_irq:
+failed_init:
+ fec_ptp_stop(pdev);
+failed_reset:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+failed_regulator:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ fec_enet_clk_enable(ndev, false);
+failed_clk:
+failed_rgmii_delay:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(phy_node);
+failed_stop_mode:
+failed_ipc_init:
+failed_phy:
+ dev_id--;
+failed_ioremap:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int
+fec_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ dev_err(&pdev->dev,
+ "Failed to resume device in remove callback (%pe)\n",
+ ERR_PTR(ret));
+
+ cancel_work_sync(&fep->tx_timeout_work);
+ fec_ptp_stop(pdev);
+ unregister_netdev(ndev);
+ fec_enet_mii_remove(fep);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(fep->phy_node);
+
+ /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+ * disabling them again.
+ */
+ if (ret >= 0) {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static int __maybe_unused fec_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+ fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
+ phy_stop(ndev->phydev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ netif_device_detach(ndev);
+ netif_tx_unlock_bh(ndev);
+ fec_stop(ndev);
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ fec_irqs_disable(ndev);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ } else {
+ fec_irqs_disable_except_wakeup(ndev);
+ if (fep->wake_irq > 0) {
+ disable_irq(fep->wake_irq);
+ enable_irq_wake(fep->wake_irq);
+ }
+ fec_enet_stop_mode(fep, true);
+ }
+ /* It's safe to disable clocks since interrupts are masked */
+ fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
+ }
+ rtnl_unlock();
+
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ regulator_disable(fep->reg_phy);
+
+ /* SOC supply clock to phy, when clock is disabled, phy link down
+ * SOC control phy regulator, when regulator is disabled, phy link down
+ */
+ if (fep->clk_enet_out || fep->reg_phy)
+ fep->link = 0;
+
+ return 0;
+}
+
+static int __maybe_unused fec_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+ int val;
+
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ ret = regulator_enable(fep->reg_phy);
+ if (ret)
+ return ret;
+ }
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret) {
+ rtnl_unlock();
+ goto failed_clk;
+ }
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+ fec_enet_stop_mode(fep, false);
+ if (fep->wake_irq) {
+ disable_irq_wake(fep->wake_irq);
+ enable_irq(fep->wake_irq);
+ }
+
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
+ } else {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ }
+ fec_restart(ndev);
+ netif_tx_lock_bh(ndev);
+ netif_device_attach(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ phy_init_hw(ndev->phydev);
+ phy_start(ndev->phydev);
+ }
+ rtnl_unlock();
+
+ return 0;
+
+failed_clk:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
+}
+
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ return 0;
+
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+ return ret;
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
+
+static struct platform_driver fec_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &fec_pm_ops,
+ .of_match_table = fec_dt_ids,
+ .suppress_bind_attrs = true,
+ },
+ .id_table = fec_devtype,
+ .probe = fec_probe,
+ .remove = fec_drv_remove,
+};
+
+module_platform_driver(fec_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
new file mode 100644
index 000000000..a7f4c3c29
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -0,0 +1,1082 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
+ * now maintained by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/crc32.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/fec.h>
+
+#include "fec_mpc52xx.h"
+
+#define DRIVER_NAME "mpc52xx-fec"
+
+/* Private driver data structure */
+struct mpc52xx_fec_priv {
+ struct net_device *ndev;
+ int duplex;
+ int speed;
+ int r_irq;
+ int t_irq;
+ struct mpc52xx_fec __iomem *fec;
+ struct bcom_task *rx_dmatsk;
+ struct bcom_task *tx_dmatsk;
+ spinlock_t lock;
+ int msg_enable;
+
+ /* MDIO link details */
+ unsigned int mdio_speed;
+ struct device_node *phy_node;
+ enum phy_state link;
+ int seven_wire_mode;
+};
+
+
+static irqreturn_t mpc52xx_fec_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
+static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep);
+static void mpc52xx_fec_start(struct net_device *dev);
+static void mpc52xx_fec_reset(struct net_device *dev);
+
+#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+static int debug = -1; /* the above default */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debugging messages level");
+
+static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ dev_warn(&dev->dev, "transmit timed out\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ mpc52xx_fec_reset(dev);
+ dev->stats.tx_errors++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ netif_wake_queue(dev);
+}
+
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+}
+
+static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sock = addr;
+
+ eth_hw_addr_set(dev, sock->sa_data);
+
+ mpc52xx_fec_set_paddr(dev, sock->sa_data);
+ return 0;
+}
+
+static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
+{
+ while (!bcom_queue_empty(s)) {
+ struct bcom_fec_bd *bd;
+ struct sk_buff *skb;
+
+ skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
+ dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ }
+}
+
+static void
+mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+
+ bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
+}
+
+static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
+{
+ struct sk_buff *skb;
+
+ while (!bcom_queue_full(rxtsk)) {
+ skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
+ if (!skb)
+ return -EAGAIN;
+
+ /* zero out the initial receive buffers to aid debugging */
+ memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
+ mpc52xx_fec_rx_submit(dev, skb);
+ }
+ return 0;
+}
+
+/* based on generic_adjust_link from fs_enet-main.c */
+static void mpc52xx_fec_adjust_link(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != priv->duplex) {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+
+ rcntrl = in_be32(&fec->r_cntrl);
+ tcntrl = in_be32(&fec->x_cntrl);
+
+ rcntrl &= ~FEC_RCNTRL_DRT;
+ tcntrl &= ~FEC_TCNTRL_FDEN;
+ if (phydev->duplex == DUPLEX_FULL)
+ tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
+ else
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+
+ if (priv->link == PHY_DOWN) {
+ new_state = 1;
+ priv->link = phydev->link;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = PHY_DOWN;
+ priv->speed = 0;
+ priv->duplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+static int mpc52xx_fec_open(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int err = -EBUSY;
+
+ if (priv->phy_node) {
+ phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!phydev) {
+ dev_err(&dev->dev, "of_phy_connect failed\n");
+ return -ENODEV;
+ }
+ phy_start(phydev);
+ }
+
+ if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
+ DRIVER_NAME "_ctrl", dev)) {
+ dev_err(&dev->dev, "ctrl interrupt request failed\n");
+ goto free_phy;
+ }
+ if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
+ DRIVER_NAME "_rx", dev)) {
+ dev_err(&dev->dev, "rx interrupt request failed\n");
+ goto free_ctrl_irq;
+ }
+ if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
+ DRIVER_NAME "_tx", dev)) {
+ dev_err(&dev->dev, "tx interrupt request failed\n");
+ goto free_2irqs;
+ }
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+ if (err) {
+ dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
+ goto free_irqs;
+ }
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+ free_irqs:
+ free_irq(priv->t_irq, dev);
+ free_2irqs:
+ free_irq(priv->r_irq, dev);
+ free_ctrl_irq:
+ free_irq(dev->irq, dev);
+ free_phy:
+ if (phydev) {
+ phy_stop(phydev);
+ phy_disconnect(phydev);
+ }
+
+ return err;
+}
+
+static int mpc52xx_fec_close(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+
+ netif_stop_queue(dev);
+
+ mpc52xx_fec_stop(dev, true);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ free_irq(dev->irq, dev);
+ free_irq(priv->r_irq, dev);
+ free_irq(priv->t_irq, dev);
+
+ if (phydev) {
+ /* power down phy */
+ phy_stop(phydev);
+ phy_disconnect(phydev);
+ }
+
+ return 0;
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static netdev_tx_t
+mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+ unsigned long flags;
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ if (net_ratelimit())
+ dev_err(&dev->dev, "transmit queue overrun\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ bd = (struct bcom_fec_bd *)
+ bcom_prepare_next_buffer(priv->tx_dmatsk);
+
+ bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
+ bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+ bcom_submit_next_buffer(priv->tx_dmatsk, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mpc52xx_fec_poll_controller(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->t_irq);
+ mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
+ enable_irq(priv->t_irq);
+ disable_irq(priv->r_irq);
+ mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
+ enable_irq(priv->r_irq);
+}
+#endif
+
+
+/* This handles BestComm transmit task interrupts
+ */
+static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ while (bcom_buffer_done(priv->tx_dmatsk)) {
+ struct sk_buff *skb;
+ struct bcom_fec_bd *bd;
+ skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
+ (struct bcom_bd **)&bd);
+ dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
+ DMA_TO_DEVICE);
+
+ dev_consume_skb_irq(skb);
+ }
+ spin_unlock(&priv->lock);
+
+ netif_wake_queue(dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct sk_buff *rskb; /* received sk_buff */
+ struct sk_buff *skb; /* new sk_buff to enqueue in its place */
+ struct bcom_fec_bd *bd;
+ u32 status, physaddr;
+ int length;
+
+ spin_lock(&priv->lock);
+
+ while (bcom_buffer_done(priv->rx_dmatsk)) {
+
+ rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
+ (struct bcom_bd **)&bd);
+ physaddr = bd->skb_pa;
+
+ /* Test for errors in received frame */
+ if (status & BCOM_FEC_RX_BD_ERRORS) {
+ /* Drop packet and reuse the buffer */
+ mpc52xx_fec_rx_submit(dev, rskb);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* skbs are allocated on open, so now we allocate a new one,
+ * and remove the old (with the packet) */
+ skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
+ if (!skb) {
+ /* Can't get a new one : reuse the same & drop pkt */
+ dev_notice(&dev->dev, "Low memory - dropped packet.\n");
+ mpc52xx_fec_rx_submit(dev, rskb);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* Enqueue the new sk_buff back on the hardware */
+ mpc52xx_fec_rx_submit(dev, skb);
+
+ /* Process the received skb - Drop the spin lock while
+ * calling into the network stack */
+ spin_unlock(&priv->lock);
+
+ dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
+ DMA_FROM_DEVICE);
+ length = status & BCOM_FEC_RX_BD_LEN_MASK;
+ skb_put(rskb, length - 4); /* length without CRC32 */
+ rskb->protocol = eth_type_trans(rskb, dev);
+ if (!skb_defer_rx_timestamp(rskb))
+ netif_rx(rskb);
+
+ spin_lock(&priv->lock);
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 ievent;
+
+ ievent = in_be32(&fec->ievent);
+
+ ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
+ if (!ievent)
+ return IRQ_NONE;
+
+ out_be32(&fec->ievent, ievent); /* clear pending events */
+
+ /* on fifo error, soft-reset fec */
+ if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
+
+ if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
+ if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+
+ spin_lock(&priv->lock);
+ mpc52xx_fec_reset(dev);
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+ }
+
+ if (ievent & ~FEC_IEVENT_TFINT)
+ dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ stats->rx_bytes = in_be32(&fec->rmon_r_octets);
+ stats->rx_packets = in_be32(&fec->rmon_r_packets);
+ stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
+ in_be32(&fec->rmon_r_undersize) +
+ in_be32(&fec->rmon_r_oversize) +
+ in_be32(&fec->rmon_r_frag) +
+ in_be32(&fec->rmon_r_jab);
+
+ stats->tx_bytes = in_be32(&fec->rmon_t_octets);
+ stats->tx_packets = in_be32(&fec->rmon_t_packets);
+ stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
+ in_be32(&fec->rmon_t_undersize) +
+ in_be32(&fec->rmon_t_oversize) +
+ in_be32(&fec->rmon_t_frag) +
+ in_be32(&fec->rmon_t_jab);
+
+ stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
+ stats->collisions = in_be32(&fec->rmon_t_col);
+
+ /* detailed rx_errors: */
+ stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+ + in_be32(&fec->rmon_r_oversize)
+ + in_be32(&fec->rmon_r_frag)
+ + in_be32(&fec->rmon_r_jab);
+ stats->rx_over_errors = in_be32(&fec->r_macerr);
+ stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
+ stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
+ stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
+ stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
+
+ /* detailed tx_errors: */
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
+ stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
+ stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
+ stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
+
+ return stats;
+}
+
+/*
+ * Read MIB counters in order to reset them,
+ * then zero all the stats fields in memory
+ */
+static void mpc52xx_fec_reset_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->mib_control, FEC_MIB_DISABLE);
+ memset_io(&fec->rmon_t_drop, 0,
+ offsetof(struct mpc52xx_fec, reserved10) -
+ offsetof(struct mpc52xx_fec, rmon_t_drop));
+ out_be32(&fec->mib_control, 0);
+
+ memset(&dev->stats, 0, sizeof(dev->stats));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rx_control;
+
+ rx_control = in_be32(&fec->r_cntrl);
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_control |= FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+ } else {
+ rx_control &= ~FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ out_be32(&fec->gaddr1, 0xffffffff);
+ out_be32(&fec->gaddr2, 0xffffffff);
+ } else {
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ u32 gaddr1 = 0x00000000;
+ u32 gaddr2 = 0x00000000;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
+ if (crc >= 32)
+ gaddr1 |= 1 << (crc-32);
+ else
+ gaddr2 |= 1 << crc;
+ }
+ out_be32(&fec->gaddr1, gaddr1);
+ out_be32(&fec->gaddr2, gaddr2);
+ }
+ }
+}
+
+/**
+ * mpc52xx_fec_hw_init
+ * @dev: network device
+ *
+ * Setup various hardware setting, only needed once on start
+ */
+static void mpc52xx_fec_hw_init(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ int i;
+
+ /* Whack a reset. We should wait for this. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; ++i) {
+ if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
+ break;
+ udelay(1);
+ }
+ if (i == FEC_RESET_DELAY)
+ dev_err(&dev->dev, "FEC Reset timeout!\n");
+
+ /* set pause to 0x20 frames */
+ out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
+
+ /* high service request will be deasserted when there's < 7 bytes in fifo
+ * low service request will be deasserted when there's < 4*7 bytes in fifo
+ */
+ out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+ out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+
+ /* alarm when <= x bytes in FIFO */
+ out_be32(&fec->rfifo_alarm, 0x0000030c);
+ out_be32(&fec->tfifo_alarm, 0x00000100);
+
+ /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
+
+ /* enable crc generation */
+ out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
+ out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
+ out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
+
+ /* set phy speed.
+ * this can't be done in phy driver, since it needs to be called
+ * before fec stuff (even on resume) */
+ out_be32(&fec->mii_speed, priv->mdio_speed);
+}
+
+/**
+ * mpc52xx_fec_start
+ * @dev: network device
+ *
+ * This function is called to start or restart the FEC during a link
+ * change. This happens on fifo errors or when switching between half
+ * and full duplex.
+ */
+static void mpc52xx_fec_start(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+ u32 tmp;
+
+ /* clear sticky error bits */
+ tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
+ out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
+
+ /* FIFOs will reset on mpc52xx_fec_enable */
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
+
+ /* Set station address. */
+ mpc52xx_fec_set_paddr(dev, dev->dev_addr);
+
+ mpc52xx_fec_set_multicast_list(dev);
+
+ /* set max frame len, enable flow control, select mii mode */
+ rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
+ rcntrl |= FEC_RCNTRL_FCE;
+
+ if (!priv->seven_wire_mode)
+ rcntrl |= FEC_RCNTRL_MII_MODE;
+
+ if (priv->duplex == DUPLEX_FULL)
+ tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
+ else {
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+ tcntrl = 0;
+ }
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+
+ /* Clear any outstanding interrupt. */
+ out_be32(&fec->ievent, 0xffffffff);
+
+ /* Enable interrupts we wish to service. */
+ out_be32(&fec->imask, FEC_IMASK_ENABLE);
+
+ /* And last, enable the transmit and receive processing. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->r_des_active, 0x01000000);
+}
+
+/**
+ * mpc52xx_fec_stop
+ * @dev: network device
+ *
+ * stop all activity on fec and empty dma buffers
+ */
+static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ unsigned long timeout;
+
+ /* disable all interrupts */
+ out_be32(&fec->imask, 0);
+
+ /* Disable the rx task. */
+ bcom_disable(priv->rx_dmatsk);
+
+ /* Wait for tx queue to drain, but only if we're in process context */
+ if (may_sleep) {
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout) &&
+ !bcom_queue_empty(priv->tx_dmatsk))
+ msleep(100);
+
+ if (time_after_eq(jiffies, timeout))
+ dev_err(&dev->dev, "queues didn't drain\n");
+#if 1
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
+ priv->tx_dmatsk->index,
+ priv->tx_dmatsk->outdex);
+ dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
+ priv->rx_dmatsk->index,
+ priv->rx_dmatsk->outdex);
+ }
+#endif
+ }
+
+ bcom_disable(priv->tx_dmatsk);
+
+ /* Stop FEC */
+ out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
+}
+
+/* reset fec and bestcomm tasks */
+static void mpc52xx_fec_reset(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ mpc52xx_fec_stop(dev, false);
+
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ mpc52xx_fec_hw_init(dev);
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_wake_queue(dev);
+}
+
+
+/* ethtool interface */
+
+static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mpc52xx_fec_get_msglevel,
+ .set_msglevel = mpc52xx_fec_set_msglevel,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+
+static const struct net_device_ops mpc52xx_fec_netdev_ops = {
+ .ndo_open = mpc52xx_fec_open,
+ .ndo_stop = mpc52xx_fec_close,
+ .ndo_start_xmit = mpc52xx_fec_start_xmit,
+ .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
+ .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
+ .ndo_get_stats = mpc52xx_fec_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mpc52xx_fec_poll_controller,
+#endif
+};
+
+/* ======================================================================== */
+/* OF Driver */
+/* ======================================================================== */
+
+static int mpc52xx_fec_probe(struct platform_device *op)
+{
+ int rv;
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv = NULL;
+ struct resource mem;
+ const u32 *prop;
+ int prop_size;
+ struct device_node *np = op->dev.of_node;
+
+ phys_addr_t rx_fifo;
+ phys_addr_t tx_fifo;
+
+ /* Get the ether ndev & it's private zone */
+ ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ /* Reserve FEC control zone */
+ rv = of_address_to_resource(np, 0, &mem);
+ if (rv) {
+ pr_err("Error while parsing device node resource\n");
+ goto err_netdev;
+ }
+ if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
+ pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
+ (unsigned long)resource_size(&mem),
+ sizeof(struct mpc52xx_fec));
+ rv = -EINVAL;
+ goto err_netdev;
+ }
+
+ if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
+ DRIVER_NAME)) {
+ rv = -EBUSY;
+ goto err_netdev;
+ }
+
+ /* Init ether ndev with what we have */
+ ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
+ ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
+ ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
+ ndev->base_addr = mem.start;
+ SET_NETDEV_DEV(ndev, &op->dev);
+
+ spin_lock_init(&priv->lock);
+
+ /* ioremap the zones */
+ priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
+
+ if (!priv->fec) {
+ rv = -ENOMEM;
+ goto err_mem_region;
+ }
+
+ /* Bestcomm init */
+ rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
+ tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
+
+ priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
+ priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
+
+ if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
+ pr_err("Can not init SDMA tasks\n");
+ rv = -ENOMEM;
+ goto err_rx_tx_dmatsk;
+ }
+
+ /* Get the IRQ we need one by one */
+ /* Control */
+ ndev->irq = irq_of_parse_and_map(np, 0);
+
+ /* RX */
+ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
+
+ /* TX */
+ priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
+
+ /*
+ * MAC address init:
+ *
+ * First try to read MAC address from DT
+ */
+ rv = of_get_ethdev_address(np, ndev);
+ if (rv) {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
+
+ /*
+ * If the MAC addresse is not provided via DT then read
+ * it back from the controller regs
+ */
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
+ }
+
+ /*
+ * Check if the MAC address is valid, if not get a random one
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(&ndev->dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
+
+ /*
+ * Link mode configuration
+ */
+
+ /* Start with safe defaults for link connection */
+ priv->speed = 100;
+ priv->duplex = DUPLEX_HALF;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
+
+ /* The current speed preconfigures the speed of the MII link */
+ prop = of_get_property(np, "current-speed", &prop_size);
+ if (prop && (prop_size >= sizeof(u32) * 2)) {
+ priv->speed = prop[0];
+ priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* If there is a phy handle, then get the PHY node */
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* the 7-wire property means don't use MII mode */
+ if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+ priv->seven_wire_mode = 1;
+ dev_info(&ndev->dev, "using 7-wire PHY mode\n");
+ }
+
+ /* Hardware init */
+ mpc52xx_fec_hw_init(ndev);
+ mpc52xx_fec_reset_stats(ndev);
+
+ rv = register_netdev(ndev);
+ if (rv < 0)
+ goto err_node;
+
+ /* We're done ! */
+ platform_set_drvdata(op, ndev);
+ netdev_info(ndev, "%pOF MAC %pM\n",
+ op->dev.of_node, ndev->dev_addr);
+
+ return 0;
+
+err_node:
+ of_node_put(priv->phy_node);
+ irq_dispose_mapping(ndev->irq);
+err_rx_tx_dmatsk:
+ if (priv->rx_dmatsk)
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ if (priv->tx_dmatsk)
+ bcom_fec_tx_release(priv->tx_dmatsk);
+ iounmap(priv->fec);
+err_mem_region:
+ release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
+err_netdev:
+ free_netdev(ndev);
+
+ return rv;
+}
+
+static int
+mpc52xx_fec_remove(struct platform_device *op)
+{
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv;
+
+ ndev = platform_get_drvdata(op);
+ priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ of_node_put(priv->phy_node);
+ priv->phy_node = NULL;
+
+ irq_dispose_mapping(ndev->irq);
+
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ bcom_fec_tx_release(priv->tx_dmatsk);
+
+ iounmap(priv->fec);
+
+ release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(op);
+
+ if (netif_running(dev))
+ mpc52xx_fec_close(dev);
+
+ return 0;
+}
+
+static int mpc52xx_fec_of_resume(struct platform_device *op)
+{
+ struct net_device *dev = platform_get_drvdata(op);
+
+ mpc52xx_fec_hw_init(dev);
+ mpc52xx_fec_reset_stats(dev);
+
+ if (netif_running(dev))
+ mpc52xx_fec_open(dev);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id mpc52xx_fec_match[] = {
+ { .compatible = "fsl,mpc5200b-fec", },
+ { .compatible = "fsl,mpc5200-fec", },
+ { .compatible = "mpc5200-fec", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
+
+static struct platform_driver mpc52xx_fec_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mpc52xx_fec_match,
+ },
+ .probe = mpc52xx_fec_probe,
+ .remove = mpc52xx_fec_remove,
+#ifdef CONFIG_PM
+ .suspend = mpc52xx_fec_of_suspend,
+ .resume = mpc52xx_fec_of_resume,
+#endif
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static struct platform_driver * const drivers[] = {
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ &mpc52xx_fec_mdio_driver,
+#endif
+ &mpc52xx_fec_driver,
+};
+
+static int __init
+mpc52xx_fec_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+
+static void __exit
+mpc52xx_fec_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+
+
+module_init(mpc52xx_fec_init);
+module_exit(mpc52xx_fec_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dale Farnsworth");
+MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h
new file mode 100644
index 000000000..10afa54dd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h
@@ -0,0 +1,294 @@
+/*
+ * drivers/net/ethernet/freescale/fec_mpc52xx.h
+ *
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __DRIVERS_NET_MPC52XX_FEC_H__
+#define __DRIVERS_NET_MPC52XX_FEC_H__
+
+#include <linux/phy.h>
+
+/* Tunable constant */
+/* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */
+#define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */
+#define FEC_RX_NUM_BD 256
+#define FEC_TX_NUM_BD 64
+
+#define FEC_RESET_DELAY 50 /* uS */
+
+#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000)
+
+/* ======================================================================== */
+/* Hardware register sets & bits */
+/* ======================================================================== */
+
+struct mpc52xx_fec {
+ u32 fec_id; /* FEC + 0x000 */
+ u32 ievent; /* FEC + 0x004 */
+ u32 imask; /* FEC + 0x008 */
+
+ u32 reserved0[1]; /* FEC + 0x00C */
+ u32 r_des_active; /* FEC + 0x010 */
+ u32 x_des_active; /* FEC + 0x014 */
+ u32 r_des_active_cl; /* FEC + 0x018 */
+ u32 x_des_active_cl; /* FEC + 0x01C */
+ u32 ivent_set; /* FEC + 0x020 */
+ u32 ecntrl; /* FEC + 0x024 */
+
+ u32 reserved1[6]; /* FEC + 0x028-03C */
+ u32 mii_data; /* FEC + 0x040 */
+ u32 mii_speed; /* FEC + 0x044 */
+ u32 mii_status; /* FEC + 0x048 */
+
+ u32 reserved2[5]; /* FEC + 0x04C-05C */
+ u32 mib_data; /* FEC + 0x060 */
+ u32 mib_control; /* FEC + 0x064 */
+
+ u32 reserved3[6]; /* FEC + 0x068-7C */
+ u32 r_activate; /* FEC + 0x080 */
+ u32 r_cntrl; /* FEC + 0x084 */
+ u32 r_hash; /* FEC + 0x088 */
+ u32 r_data; /* FEC + 0x08C */
+ u32 ar_done; /* FEC + 0x090 */
+ u32 r_test; /* FEC + 0x094 */
+ u32 r_mib; /* FEC + 0x098 */
+ u32 r_da_low; /* FEC + 0x09C */
+ u32 r_da_high; /* FEC + 0x0A0 */
+
+ u32 reserved4[7]; /* FEC + 0x0A4-0BC */
+ u32 x_activate; /* FEC + 0x0C0 */
+ u32 x_cntrl; /* FEC + 0x0C4 */
+ u32 backoff; /* FEC + 0x0C8 */
+ u32 x_data; /* FEC + 0x0CC */
+ u32 x_status; /* FEC + 0x0D0 */
+ u32 x_mib; /* FEC + 0x0D4 */
+ u32 x_test; /* FEC + 0x0D8 */
+ u32 fdxfc_da1; /* FEC + 0x0DC */
+ u32 fdxfc_da2; /* FEC + 0x0E0 */
+ u32 paddr1; /* FEC + 0x0E4 */
+ u32 paddr2; /* FEC + 0x0E8 */
+ u32 op_pause; /* FEC + 0x0EC */
+
+ u32 reserved5[4]; /* FEC + 0x0F0-0FC */
+ u32 instr_reg; /* FEC + 0x100 */
+ u32 context_reg; /* FEC + 0x104 */
+ u32 test_cntrl; /* FEC + 0x108 */
+ u32 acc_reg; /* FEC + 0x10C */
+ u32 ones; /* FEC + 0x110 */
+ u32 zeros; /* FEC + 0x114 */
+ u32 iaddr1; /* FEC + 0x118 */
+ u32 iaddr2; /* FEC + 0x11C */
+ u32 gaddr1; /* FEC + 0x120 */
+ u32 gaddr2; /* FEC + 0x124 */
+ u32 random; /* FEC + 0x128 */
+ u32 rand1; /* FEC + 0x12C */
+ u32 tmp; /* FEC + 0x130 */
+
+ u32 reserved6[3]; /* FEC + 0x134-13C */
+ u32 fifo_id; /* FEC + 0x140 */
+ u32 x_wmrk; /* FEC + 0x144 */
+ u32 fcntrl; /* FEC + 0x148 */
+ u32 r_bound; /* FEC + 0x14C */
+ u32 r_fstart; /* FEC + 0x150 */
+ u32 r_count; /* FEC + 0x154 */
+ u32 r_lag; /* FEC + 0x158 */
+ u32 r_read; /* FEC + 0x15C */
+ u32 r_write; /* FEC + 0x160 */
+ u32 x_count; /* FEC + 0x164 */
+ u32 x_lag; /* FEC + 0x168 */
+ u32 x_retry; /* FEC + 0x16C */
+ u32 x_write; /* FEC + 0x170 */
+ u32 x_read; /* FEC + 0x174 */
+
+ u32 reserved7[2]; /* FEC + 0x178-17C */
+ u32 fm_cntrl; /* FEC + 0x180 */
+ u32 rfifo_data; /* FEC + 0x184 */
+ u32 rfifo_status; /* FEC + 0x188 */
+ u32 rfifo_cntrl; /* FEC + 0x18C */
+ u32 rfifo_lrf_ptr; /* FEC + 0x190 */
+ u32 rfifo_lwf_ptr; /* FEC + 0x194 */
+ u32 rfifo_alarm; /* FEC + 0x198 */
+ u32 rfifo_rdptr; /* FEC + 0x19C */
+ u32 rfifo_wrptr; /* FEC + 0x1A0 */
+ u32 tfifo_data; /* FEC + 0x1A4 */
+ u32 tfifo_status; /* FEC + 0x1A8 */
+ u32 tfifo_cntrl; /* FEC + 0x1AC */
+ u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */
+ u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */
+ u32 tfifo_alarm; /* FEC + 0x1B8 */
+ u32 tfifo_rdptr; /* FEC + 0x1BC */
+ u32 tfifo_wrptr; /* FEC + 0x1C0 */
+
+ u32 reset_cntrl; /* FEC + 0x1C4 */
+ u32 xmit_fsm; /* FEC + 0x1C8 */
+
+ u32 reserved8[3]; /* FEC + 0x1CC-1D4 */
+ u32 rdes_data0; /* FEC + 0x1D8 */
+ u32 rdes_data1; /* FEC + 0x1DC */
+ u32 r_length; /* FEC + 0x1E0 */
+ u32 x_length; /* FEC + 0x1E4 */
+ u32 x_addr; /* FEC + 0x1E8 */
+ u32 cdes_data; /* FEC + 0x1EC */
+ u32 status; /* FEC + 0x1F0 */
+ u32 dma_control; /* FEC + 0x1F4 */
+ u32 des_cmnd; /* FEC + 0x1F8 */
+ u32 data; /* FEC + 0x1FC */
+
+ u32 rmon_t_drop; /* FEC + 0x200 */
+ u32 rmon_t_packets; /* FEC + 0x204 */
+ u32 rmon_t_bc_pkt; /* FEC + 0x208 */
+ u32 rmon_t_mc_pkt; /* FEC + 0x20C */
+ u32 rmon_t_crc_align; /* FEC + 0x210 */
+ u32 rmon_t_undersize; /* FEC + 0x214 */
+ u32 rmon_t_oversize; /* FEC + 0x218 */
+ u32 rmon_t_frag; /* FEC + 0x21C */
+ u32 rmon_t_jab; /* FEC + 0x220 */
+ u32 rmon_t_col; /* FEC + 0x224 */
+ u32 rmon_t_p64; /* FEC + 0x228 */
+ u32 rmon_t_p65to127; /* FEC + 0x22C */
+ u32 rmon_t_p128to255; /* FEC + 0x230 */
+ u32 rmon_t_p256to511; /* FEC + 0x234 */
+ u32 rmon_t_p512to1023; /* FEC + 0x238 */
+ u32 rmon_t_p1024to2047; /* FEC + 0x23C */
+ u32 rmon_t_p_gte2048; /* FEC + 0x240 */
+ u32 rmon_t_octets; /* FEC + 0x244 */
+ u32 ieee_t_drop; /* FEC + 0x248 */
+ u32 ieee_t_frame_ok; /* FEC + 0x24C */
+ u32 ieee_t_1col; /* FEC + 0x250 */
+ u32 ieee_t_mcol; /* FEC + 0x254 */
+ u32 ieee_t_def; /* FEC + 0x258 */
+ u32 ieee_t_lcol; /* FEC + 0x25C */
+ u32 ieee_t_excol; /* FEC + 0x260 */
+ u32 ieee_t_macerr; /* FEC + 0x264 */
+ u32 ieee_t_cserr; /* FEC + 0x268 */
+ u32 ieee_t_sqe; /* FEC + 0x26C */
+ u32 t_fdxfc; /* FEC + 0x270 */
+ u32 ieee_t_octets_ok; /* FEC + 0x274 */
+
+ u32 reserved9[2]; /* FEC + 0x278-27C */
+ u32 rmon_r_drop; /* FEC + 0x280 */
+ u32 rmon_r_packets; /* FEC + 0x284 */
+ u32 rmon_r_bc_pkt; /* FEC + 0x288 */
+ u32 rmon_r_mc_pkt; /* FEC + 0x28C */
+ u32 rmon_r_crc_align; /* FEC + 0x290 */
+ u32 rmon_r_undersize; /* FEC + 0x294 */
+ u32 rmon_r_oversize; /* FEC + 0x298 */
+ u32 rmon_r_frag; /* FEC + 0x29C */
+ u32 rmon_r_jab; /* FEC + 0x2A0 */
+
+ u32 rmon_r_resvd_0; /* FEC + 0x2A4 */
+
+ u32 rmon_r_p64; /* FEC + 0x2A8 */
+ u32 rmon_r_p65to127; /* FEC + 0x2AC */
+ u32 rmon_r_p128to255; /* FEC + 0x2B0 */
+ u32 rmon_r_p256to511; /* FEC + 0x2B4 */
+ u32 rmon_r_p512to1023; /* FEC + 0x2B8 */
+ u32 rmon_r_p1024to2047; /* FEC + 0x2BC */
+ u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */
+ u32 rmon_r_octets; /* FEC + 0x2C4 */
+ u32 ieee_r_drop; /* FEC + 0x2C8 */
+ u32 ieee_r_frame_ok; /* FEC + 0x2CC */
+ u32 ieee_r_crc; /* FEC + 0x2D0 */
+ u32 ieee_r_align; /* FEC + 0x2D4 */
+ u32 r_macerr; /* FEC + 0x2D8 */
+ u32 r_fdxfc; /* FEC + 0x2DC */
+ u32 ieee_r_octets_ok; /* FEC + 0x2E0 */
+
+ u32 reserved10[7]; /* FEC + 0x2E4-2FC */
+
+ u32 reserved11[64]; /* FEC + 0x300-3FF */
+};
+
+#define FEC_MIB_DISABLE 0x80000000
+
+#define FEC_IEVENT_HBERR 0x80000000
+#define FEC_IEVENT_BABR 0x40000000
+#define FEC_IEVENT_BABT 0x20000000
+#define FEC_IEVENT_GRA 0x10000000
+#define FEC_IEVENT_TFINT 0x08000000
+#define FEC_IEVENT_MII 0x00800000
+#define FEC_IEVENT_LATE_COL 0x00200000
+#define FEC_IEVENT_COL_RETRY_LIM 0x00100000
+#define FEC_IEVENT_XFIFO_UN 0x00080000
+#define FEC_IEVENT_XFIFO_ERROR 0x00040000
+#define FEC_IEVENT_RFIFO_ERROR 0x00020000
+
+#define FEC_IMASK_HBERR 0x80000000
+#define FEC_IMASK_BABR 0x40000000
+#define FEC_IMASK_BABT 0x20000000
+#define FEC_IMASK_GRA 0x10000000
+#define FEC_IMASK_MII 0x00800000
+#define FEC_IMASK_LATE_COL 0x00200000
+#define FEC_IMASK_COL_RETRY_LIM 0x00100000
+#define FEC_IMASK_XFIFO_UN 0x00080000
+#define FEC_IMASK_XFIFO_ERROR 0x00040000
+#define FEC_IMASK_RFIFO_ERROR 0x00020000
+
+/* all but MII, which is enabled separately */
+#define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \
+ FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \
+ FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \
+ FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR)
+
+#define FEC_RCNTRL_MAX_FL_SHIFT 16
+#define FEC_RCNTRL_LOOP 0x01
+#define FEC_RCNTRL_DRT 0x02
+#define FEC_RCNTRL_MII_MODE 0x04
+#define FEC_RCNTRL_PROM 0x08
+#define FEC_RCNTRL_BC_REJ 0x10
+#define FEC_RCNTRL_FCE 0x20
+
+#define FEC_TCNTRL_GTS 0x00000001
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_TFC_PAUSE 0x00000008
+#define FEC_TCNTRL_RFC_PAUSE 0x00000010
+
+#define FEC_ECNTRL_RESET 0x00000001
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+
+#define FEC_MII_DATA_ST 0x40000000 /* Start frame */
+#define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */
+#define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */
+#define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */
+#define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */
+#define FEC_MII_DATA_TA 0x00020000 /* Turnaround */
+#define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */
+
+#define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA)
+#define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA)
+
+#define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */
+#define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */
+
+#define FEC_PADDR2_TYPE 0x8808
+
+#define FEC_OP_PAUSE_OPCODE 0x00010000
+
+#define FEC_FIFO_WMRK_256B 0x3
+
+#define FEC_FIFO_STATUS_ERR 0x00400000
+#define FEC_FIFO_STATUS_UF 0x00200000
+#define FEC_FIFO_STATUS_OF 0x00100000
+
+#define FEC_FIFO_CNTRL_FRAME 0x08000000
+#define FEC_FIFO_CNTRL_LTG_7 0x07000000
+
+#define FEC_RESET_CNTRL_RESET_FIFO 0x02000000
+#define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000
+
+#define FEC_XMIT_FSM_APPEND_CRC 0x02000000
+#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
+
+
+extern struct platform_driver mpc52xx_fec_mdio_driver;
+
+#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
new file mode 100644
index 000000000..95f778cce
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -0,0 +1,154 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <asm/io.h>
+#include <asm/mpc52xx.h>
+#include "fec_mpc52xx.h"
+
+struct mpc52xx_fec_mdio_priv {
+ struct mpc52xx_fec __iomem *regs;
+};
+
+static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+ int reg, u32 value)
+{
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+ struct mpc52xx_fec __iomem *fec = priv->regs;
+ int tries = 3;
+
+ value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
+
+ out_be32(&fec->ievent, FEC_IEVENT_MII);
+ out_be32(&fec->mii_data, value);
+
+ /* wait for it to finish, this takes about 23 us on lite5200b */
+ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
+ msleep(1);
+
+ if (!tries)
+ return -ETIMEDOUT;
+
+ return value & FEC_MII_DATA_OP_RD ?
+ in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
+}
+
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
+}
+
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 data)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
+ data | FEC_MII_WRITE_FRAME);
+}
+
+static int mpc52xx_fec_mdio_probe(struct platform_device *of)
+{
+ struct device *dev = &of->dev;
+ struct device_node *np = of->dev.of_node;
+ struct mii_bus *bus;
+ struct mpc52xx_fec_mdio_priv *priv;
+ struct resource res;
+ int err;
+
+ bus = mdiobus_alloc();
+ if (bus == NULL)
+ return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ bus->name = "mpc52xx MII bus";
+ bus->read = mpc52xx_fec_mdio_read;
+ bus->write = mpc52xx_fec_mdio_write;
+
+ /* setup registers */
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ goto out_free;
+ priv->regs = ioremap(res.start, resource_size(&res));
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ bus->priv = priv;
+
+ bus->parent = dev;
+ dev_set_drvdata(dev, bus);
+
+ /* set MII speed */
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ goto out_unmap;
+
+ return 0;
+
+ out_unmap:
+ iounmap(priv->regs);
+ out_free:
+ kfree(priv);
+ mdiobus_free(bus);
+
+ return err;
+}
+
+static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+{
+ struct mii_bus *bus = platform_get_drvdata(of);
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+
+ mdiobus_unregister(bus);
+ iounmap(priv->regs);
+ kfree(priv);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_fec_mdio_match[] = {
+ { .compatible = "fsl,mpc5200b-mdio", },
+ { .compatible = "fsl,mpc5200-mdio", },
+ { .compatible = "mpc5200b-fec-phy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
+
+struct platform_driver mpc52xx_fec_mdio_driver = {
+ .driver = {
+ .name = "mpc5200b-fec-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_mdio_match,
+ },
+ .probe = mpc52xx_fec_mdio_probe,
+ .remove = mpc52xx_fec_mdio_remove,
+};
+
+/* let fec driver call it, since this has to be registered before it */
+EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 000000000..cffd9ad49
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE 0x00002000
+#define FEC_T_CTRL_CAPTURE 0x00000800
+#define FEC_T_CTRL_RESTART 0x00000200
+#define FEC_T_CTRL_PERIOD_RST 0x00000030
+#define FEC_T_CTRL_PERIOD_EN 0x00000010
+#define FEC_T_CTRL_ENABLE 0x00000001
+
+#define FEC_T_INC_MASK 0x0000007f
+#define FEC_T_INC_OFFSET 0
+#define FEC_T_INC_CORR_MASK 0x00007f00
+#define FEC_T_INC_CORR_OFFSET 8
+
+#define FEC_T_CTRL_PINPER 0x00000080
+#define FEC_T_TF0_MASK 0x00000001
+#define FEC_T_TF0_OFFSET 0
+#define FEC_T_TF1_MASK 0x00000002
+#define FEC_T_TF1_OFFSET 1
+#define FEC_T_TF2_MASK 0x00000004
+#define FEC_T_TF2_OFFSET 2
+#define FEC_T_TF3_MASK 0x00000008
+#define FEC_T_TF3_OFFSET 3
+#define FEC_T_TDRE_MASK 0x00000001
+#define FEC_T_TDRE_OFFSET 0
+#define FEC_T_TMODE_MASK 0x0000003C
+#define FEC_T_TMODE_OFFSET 2
+#define FEC_T_TIE_MASK 0x00000040
+#define FEC_T_TIE_OFFSET 6
+#define FEC_T_TF_MASK 0x00000080
+#define FEC_T_TF_OFFSET 7
+
+#define FEC_ATIME_CTRL 0x400
+#define FEC_ATIME 0x404
+#define FEC_ATIME_EVT_OFFSET 0x408
+#define FEC_ATIME_EVT_PERIOD 0x40c
+#define FEC_ATIME_CORR 0x410
+#define FEC_ATIME_INC 0x414
+#define FEC_TS_TIMESTAMP 0x418
+
+#define FEC_TGSR 0x604
+#define FEC_TCSR(n) (0x608 + n * 0x08)
+#define FEC_TCCR(n) (0x60C + n * 0x08)
+#define MAX_TIMER_CHANNEL 3
+#define FEC_TMODE_TOGGLE 0x05
+#define FEC_HIGH_PULSE 0x0F
+
+#define FEC_CC_MULT (1 << 31)
+#define FEC_COUNTER_PERIOD (1 << 31)
+#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
+#define FEC_CHANNLE_0 0
+#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+
+/**
+ * fec_ptp_enable_pps
+ * @fep: the fec_enet_private structure handle
+ * @enable: enable the channel pps output
+ *
+ * This function enble the PPS ouput on the timer channel.
+ */
+static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+{
+ unsigned long flags;
+ u32 val, tempval;
+ struct timespec64 ts;
+ u64 ns;
+
+ if (fep->pps_enable == enable)
+ return 0;
+
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (enable) {
+ /* clear capture or output compare interrupt status if have.
+ */
+ writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* It is recommended to double check the TMODE field in the
+ * TCSR register to be cleared before the first compare counter
+ * is written into TCCR register. Just add a double check.
+ */
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ do {
+ val &= ~(FEC_T_TMODE_MASK);
+ writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ } while (val & FEC_T_TMODE_MASK);
+
+ /* Dummy read counter to update the counter */
+ timecounter_read(&fep->tc);
+ /* We want to find the first compare event in the next
+ * second point. So we need to know what the ptp time
+ * is now and how many nanoseconds is ahead to get next second.
+ * The remaining nanosecond ahead before the next second would be
+ * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
+ * to current timer would be next second.
+ */
+ tempval = fep->cc.read(&fep->cc);
+ /* Convert the ptp local counter to 1588 timestamp */
+ ns = timecounter_cyc2time(&fep->tc, tempval);
+ ts = ns_to_timespec64(ns);
+
+ /* The tempval is less than 3 seconds, and so val is less than
+ * 4 seconds. No overflow for 32bit calculation.
+ */
+ val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
+
+ /* Need to consider the situation that the current time is
+ * very close to the second point, which means NSEC_PER_SEC
+ * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
+ * is still running when we calculate the first compare event, it is
+ * possible that the remaining nanoseonds run out before the compare
+ * counter is calculated and written into TCCR register. To avoid
+ * this possibility, we will set the compare event to be the next
+ * of next second. The current setting is 31-bit timer and wrap
+ * around over 2 seconds. So it is okay to set the next of next
+ * seond for the timer.
+ */
+ val += NSEC_PER_SEC;
+
+ /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
+ * ptp counter, which maybe cause 32-bit wrap. Since the
+ * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
+ * We can ensure the wrap will not cause issue. If the offset
+ * is bigger than fep->cc.mask would be a error.
+ */
+ val &= fep->cc.mask;
+ writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
+
+ /* Calculate the second the compare event timestamp */
+ fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
+
+ /* * Enable compare event when overflow */
+ val = readl(fep->hwp + FEC_ATIME_CTRL);
+ val |= FEC_T_CTRL_PINPER;
+ writel(val, fep->hwp + FEC_ATIME_CTRL);
+
+ /* Compare channel setting. */
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+ val &= ~(1 << FEC_T_TDRE_OFFSET);
+ val &= ~(FEC_T_TMODE_MASK);
+ val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
+ writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* Write the second compare event timestamp and calculate
+ * the third timestamp. Refer the TCCR register detail in the spec.
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+ } else {
+ writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
+ }
+
+ fep->pps_enable = enable;
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
+ int inc;
+
+ inc = 1000000000 / fep->cycle_speed;
+
+ /* grab the ptp lock */
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* 1ns counter */
+ writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+ /* use 31-bit timer counter */
+ writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+ writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
+ fep->hwp + FEC_ATIME_CTRL);
+
+ memset(&fep->cc, 0, sizeof(fep->cc));
+ fep->cc.read = fec_ptp_read;
+ fep->cc.mask = CLOCKSOURCE_MASK(31);
+ fep->cc.shift = 31;
+ fep->cc.mult = FEC_CC_MULT;
+
+ /* reset the ns time counter */
+ timecounter_init(&fep->tc, &fep->cc, 0);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ unsigned long flags;
+ int neg_adj = 0;
+ u32 i, tmp;
+ u32 corr_inc, corr_period;
+ u32 corr_ns;
+ u64 lhs, rhs;
+
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ if (ppb == 0)
+ return 0;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ neg_adj = 1;
+ }
+
+ /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
+ * Try to find the corr_inc between 1 to fep->ptp_inc to
+ * meet adjustment requirement.
+ */
+ lhs = NSEC_PER_SEC;
+ rhs = (u64)ppb * (u64)fep->ptp_inc;
+ for (i = 1; i <= fep->ptp_inc; i++) {
+ if (lhs >= rhs) {
+ corr_inc = i;
+ corr_period = div_u64(lhs, rhs);
+ break;
+ }
+ lhs += NSEC_PER_SEC;
+ }
+ /* Not found? Set it to high value - double speed
+ * correct in every clock step.
+ */
+ if (i > fep->ptp_inc) {
+ corr_inc = fep->ptp_inc;
+ corr_period = 1;
+ }
+
+ if (neg_adj)
+ corr_ns = fep->ptp_inc - corr_inc;
+ else
+ corr_ns = fep->ptp_inc + corr_inc;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
+ writel(tmp, fep->hwp + FEC_ATIME_INC);
+ corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
+ writel(corr_period, fep->hwp + FEC_ATIME_CORR);
+ /* dummy read to update the timer. */
+ timecounter_read(&fep->tc);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ timecounter_adjtime(&fep->tc, delta);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct fec_enet_private *adapter =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ u64 ns;
+ unsigned long flags;
+
+ mutex_lock(&adapter->ptp_clk_mutex);
+ /* Check the ptp clock */
+ if (!adapter->ptp_clk_on) {
+ mutex_unlock(&adapter->ptp_clk_mutex);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptp_clk_mutex);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ u64 ns;
+ unsigned long flags;
+ u32 counter;
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ /* Check the ptp clock */
+ if (!fep->ptp_clk_on) {
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return -EINVAL;
+ }
+
+ ns = timespec64_to_ns(ts);
+ /* Get the timer value based on timestamp.
+ * Update the counter with the masked value.
+ */
+ counter = ns & fep->cc.mask;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ writel(counter, fep->hwp + FEC_ATIME);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ int ret = 0;
+
+ if (rq->type == PTP_CLK_REQ_PPS) {
+ ret = fec_ptp_enable_pps(fep, on);
+
+ return ret;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_disable_hwts - disable hardware time stamping
+ * @ndev: pointer to net_device
+ */
+void fec_ptp_disable_hwts(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fep->hwts_tx_en = 0;
+ fep->hwts_rx_en = 0;
+}
+
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ fep->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ fep->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ fep->hwts_rx_en = 0;
+ break;
+
+ default:
+ fep->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/*
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ * because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
+ unsigned long flags;
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ if (fep->ptp_clk_on) {
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ schedule_delayed_work(&fep->time_keep, HZ);
+}
+
+/* This function checks the pps event and reloads the timer compare counter. */
+static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 val;
+ u8 channel = fep->pps_channel;
+ struct ptp_clock_event event;
+
+ val = readl(fep->hwp + FEC_TCSR(channel));
+ if (val & FEC_T_TF_MASK) {
+ /* Write the next next compare(not the next according the spec)
+ * value to the register
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
+ do {
+ writel(val, fep->hwp + FEC_TCSR(channel));
+ } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
+
+ /* Update the counter; */
+ fep->next_counter = (fep->next_counter + fep->reload_period) &
+ fep->cc.mask;
+
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/**
+ * fec_ptp_init
+ * @pdev: The FEC network adapter
+ * @irq_idx: the interrupt index
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int irq;
+ int ret;
+
+ fep->ptp_caps.owner = THIS_MODULE;
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+
+ fep->ptp_caps.max_adj = 250000000;
+ fep->ptp_caps.n_alarm = 0;
+ fep->ptp_caps.n_ext_ts = 0;
+ fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_pins = 0;
+ fep->ptp_caps.pps = 1;
+ fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjtime = fec_ptp_adjtime;
+ fep->ptp_caps.gettime64 = fec_ptp_gettime;
+ fep->ptp_caps.settime64 = fec_ptp_settime;
+ fep->ptp_caps.enable = fec_ptp_enable;
+
+ fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ if (!fep->cycle_speed) {
+ fep->cycle_speed = NSEC_PER_SEC;
+ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+ }
+ fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+
+ spin_lock_init(&fep->tmreg_lock);
+
+ fec_ptp_start_cyclecounter(ndev);
+
+ INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+
+ irq = platform_get_irq_byname_optional(pdev, "pps");
+ if (irq < 0)
+ irq = platform_get_irq_optional(pdev, irq_idx);
+ /* Failure to get an irq is not fatal,
+ * only the PTP_CLOCK_PPS clock events should stop
+ */
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
+ 0, pdev->name, ndev);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
+ ret);
+ }
+
+ fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+ if (IS_ERR(fep->ptp_clock)) {
+ fep->ptp_clock = NULL;
+ dev_err(&pdev->dev, "ptp_clock_register failed\n");
+ }
+
+ schedule_delayed_work(&fep->time_keep, HZ);
+}
+
+void fec_ptp_stop(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ cancel_delayed_work_sync(&fep->time_keep);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+}
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
new file mode 100644
index 000000000..48bf80887
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config FSL_FMAN
+ tristate "FMan support"
+ depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ select PHYLIB
+ select CRC32
+ default n
+ help
+ Freescale Data-Path Acceleration Architecture Frame Manager
+ (FMan) support
+
+config DPAA_ERRATUM_A050385
+ bool
+ depends on ARM64 && FSL_DPAA
+ default y
+ help
+ DPAA FMan erratum A050385 software workaround implementation:
+ align buffers, data start, SG fragment length to avoid FMan DMA
+ splits.
+ FMAN DMA read or writes under heavy traffic load may cause FMAN
+ internal resource leak thus stopping further packet processing.
+ The FMAN internal queue can overflow when FMAN splits single
+ read or write transactions into multiple smaller transactions
+ such that more than 17 AXI transactions are in flight from FMAN
+ to interconnect. When the FMAN internal queue overflows, it can
+ stall further packet processing. The issue can occur with any
+ one of the following three conditions:
+ 1. FMAN AXI transaction crosses 4K address boundary (Errata
+ A010022)
+ 2. FMAN DMA address for an AXI transaction is not 16 byte
+ aligned, i.e. the last 4 bits of an address are non-zero
+ 3. Scatter Gather (SG) frames have more than one SG buffer in
+ the SG list and any one of the buffers, except the last
+ buffer in the SG list has data size that is not a multiple
+ of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
+ With any one of the above three conditions present, there is
+ likelihood of stalled FMAN packet processing, especially under
+ stress with multiple ports injecting line-rate traffic.
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
new file mode 100644
index 000000000..b618091db
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
+
+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman.o
+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman_port.o
+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_mac.o
+
+fsl_dpaa_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
+fsl_dpaa_fman_port-objs := fman_port.o
+fsl_dpaa_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
new file mode 100644
index 000000000..9d85fb136
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -0,0 +1,2934 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/fsl/guts.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/libfdt_env.h>
+
+#include "fman.h"
+#include "fman_muram.h"
+#include "fman_keygen.h"
+
+/* General defines */
+#define FMAN_LIODN_TBL 64 /* size of LIODN table */
+#define MAX_NUM_OF_MACS 10
+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
+#define BASE_RX_PORTID 0x08
+#define BASE_TX_PORTID 0x28
+
+/* Modules registers offsets */
+#define BMI_OFFSET 0x00080000
+#define QMI_OFFSET 0x00080400
+#define KG_OFFSET 0x000C1000
+#define DMA_OFFSET 0x000C2000
+#define FPM_OFFSET 0x000C3000
+#define IMEM_OFFSET 0x000C4000
+#define HWP_OFFSET 0x000C7000
+#define CGP_OFFSET 0x000DB000
+
+/* Exceptions bit map */
+#define EX_DMA_BUS_ERROR 0x80000000
+#define EX_DMA_READ_ECC 0x40000000
+#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000
+#define EX_DMA_FM_WRITE_ECC 0x10000000
+#define EX_FPM_STALL_ON_TASKS 0x08000000
+#define EX_FPM_SINGLE_ECC 0x04000000
+#define EX_FPM_DOUBLE_ECC 0x02000000
+#define EX_QMI_SINGLE_ECC 0x01000000
+#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
+#define EX_QMI_DOUBLE_ECC 0x00400000
+#define EX_BMI_LIST_RAM_ECC 0x00200000
+#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000
+#define EX_BMI_STATISTICS_RAM_ECC 0x00080000
+#define EX_IRAM_ECC 0x00040000
+#define EX_MURAM_ECC 0x00020000
+#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
+#define EX_DMA_SINGLE_PORT_ECC 0x00008000
+
+/* DMA defines */
+/* masks */
+#define DMA_MODE_BER 0x00200000
+#define DMA_MODE_ECC 0x00000020
+#define DMA_MODE_SECURE_PROT 0x00000800
+#define DMA_MODE_AXI_DBG_MASK 0x0F000000
+
+#define DMA_TRANSFER_PORTID_MASK 0xFF000000
+#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
+#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
+
+#define DMA_STATUS_BUS_ERR 0x08000000
+#define DMA_STATUS_READ_ECC 0x04000000
+#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
+#define DMA_STATUS_FM_WRITE_ECC 0x01000000
+#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
+
+#define DMA_MODE_CACHE_OR_SHIFT 30
+#define DMA_MODE_AXI_DBG_SHIFT 24
+#define DMA_MODE_CEN_SHIFT 13
+#define DMA_MODE_CEN_MASK 0x00000007
+#define DMA_MODE_DBG_SHIFT 7
+#define DMA_MODE_AID_MODE_SHIFT 4
+
+#define DMA_THRESH_COMMQ_SHIFT 24
+#define DMA_THRESH_READ_INT_BUF_SHIFT 16
+#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f
+#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f
+
+#define DMA_TRANSFER_PORTID_SHIFT 24
+#define DMA_TRANSFER_TNUM_SHIFT 16
+
+#define DMA_CAM_SIZEOF_ENTRY 0x40
+#define DMA_CAM_UNITS 8
+
+#define DMA_LIODN_SHIFT 16
+#define DMA_LIODN_BASE_MASK 0x00000FFF
+
+/* FPM defines */
+#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
+#define FPM_EV_MASK_STALL 0x40000000
+#define FPM_EV_MASK_SINGLE_ECC 0x20000000
+#define FPM_EV_MASK_RELEASE_FM 0x00010000
+#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
+#define FPM_EV_MASK_STALL_EN 0x00004000
+#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
+#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
+#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
+
+#define FPM_RAM_MURAM_ECC 0x00008000
+#define FPM_RAM_IRAM_ECC 0x00004000
+#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
+#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
+#define FPM_RAM_IRAM_ECC_EN 0x40000000
+#define FPM_RAM_RAMS_ECC_EN 0x80000000
+#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
+
+#define FPM_REV1_MAJOR_MASK 0x0000FF00
+#define FPM_REV1_MINOR_MASK 0x000000FF
+
+#define FPM_DISP_LIMIT_SHIFT 24
+
+#define FPM_PRT_FM_CTL1 0x00000001
+#define FPM_PRT_FM_CTL2 0x00000002
+#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
+#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
+
+#define FPM_THR1_PRS_SHIFT 24
+#define FPM_THR1_KG_SHIFT 16
+#define FPM_THR1_PLCR_SHIFT 8
+#define FPM_THR1_BMI_SHIFT 0
+
+#define FPM_THR2_QMI_ENQ_SHIFT 24
+#define FPM_THR2_QMI_DEQ_SHIFT 0
+#define FPM_THR2_FM_CTL1_SHIFT 16
+#define FPM_THR2_FM_CTL2_SHIFT 8
+
+#define FPM_EV_MASK_CAT_ERR_SHIFT 1
+#define FPM_EV_MASK_DMA_ERR_SHIFT 0
+
+#define FPM_REV1_MAJOR_SHIFT 8
+
+#define FPM_RSTC_FM_RESET 0x80000000
+#define FPM_RSTC_MAC0_RESET 0x40000000
+#define FPM_RSTC_MAC1_RESET 0x20000000
+#define FPM_RSTC_MAC2_RESET 0x10000000
+#define FPM_RSTC_MAC3_RESET 0x08000000
+#define FPM_RSTC_MAC8_RESET 0x04000000
+#define FPM_RSTC_MAC4_RESET 0x02000000
+#define FPM_RSTC_MAC5_RESET 0x01000000
+#define FPM_RSTC_MAC6_RESET 0x00800000
+#define FPM_RSTC_MAC7_RESET 0x00400000
+#define FPM_RSTC_MAC9_RESET 0x00200000
+
+#define FPM_TS_INT_SHIFT 16
+#define FPM_TS_CTL_EN 0x80000000
+
+/* BMI defines */
+#define BMI_INIT_START 0x80000000
+#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
+#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
+#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
+#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
+#define BMI_NUM_OF_TASKS_MASK 0x3F000000
+#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
+#define BMI_NUM_OF_DMAS_MASK 0x00000F00
+#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
+#define BMI_FIFO_SIZE_MASK 0x000003FF
+#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
+#define BMI_CFG2_DMAS_MASK 0x0000003F
+#define BMI_CFG2_TASKS_MASK 0x0000003F
+
+#define BMI_CFG2_TASKS_SHIFT 16
+#define BMI_CFG2_DMAS_SHIFT 0
+#define BMI_CFG1_FIFO_SIZE_SHIFT 16
+#define BMI_NUM_OF_TASKS_SHIFT 24
+#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
+#define BMI_NUM_OF_DMAS_SHIFT 8
+#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
+
+#define BMI_FIFO_ALIGN 0x100
+
+#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
+
+/* QMI defines */
+#define QMI_CFG_ENQ_EN 0x80000000
+#define QMI_CFG_DEQ_EN 0x40000000
+#define QMI_CFG_EN_COUNTERS 0x10000000
+#define QMI_CFG_DEQ_MASK 0x0000003F
+#define QMI_CFG_ENQ_MASK 0x00003F00
+#define QMI_CFG_ENQ_SHIFT 8
+
+#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
+#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
+#define QMI_INTR_EN_SINGLE_ECC 0x80000000
+
+#define QMI_GS_HALT_NOT_BUSY 0x00000002
+
+/* HWP defines */
+#define HWP_RPIMAC_PEN 0x00000001
+
+/* IRAM defines */
+#define IRAM_IADD_AIE 0x80000000
+#define IRAM_READY 0x80000000
+
+/* Default values */
+#define DEFAULT_CATASTROPHIC_ERR 0
+#define DEFAULT_DMA_ERR 0
+#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM
+#define DEFAULT_DMA_COMM_Q_LOW 0x2A
+#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
+#define DEFAULT_CACHE_OVERRIDE 0
+#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
+#define DEFAULT_DMA_DBG_CNT_MODE 0
+#define DEFAULT_DMA_SOS_EMERGENCY 0
+#define DEFAULT_DMA_WATCHDOG 0
+#define DEFAULT_DISP_LIMIT 0
+#define DEFAULT_PRS_DISP_TH 16
+#define DEFAULT_PLCR_DISP_TH 16
+#define DEFAULT_KG_DISP_TH 16
+#define DEFAULT_BMI_DISP_TH 16
+#define DEFAULT_QMI_ENQ_DISP_TH 16
+#define DEFAULT_QMI_DEQ_DISP_TH 16
+#define DEFAULT_FM_CTL1_DISP_TH 16
+#define DEFAULT_FM_CTL2_DISP_TH 16
+
+#define DFLT_AXI_DBG_NUM_OF_BEATS 1
+
+#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) * 3 / 4)
+#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \
+ ((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
+ ((dma_thresh_max_buf + 1) * 3 / 4)
+
+#define DMA_COMM_Q_LOW_FMAN_V3 0x2A
+#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \
+ ((dma_thresh_max_commq + 1) / 2)
+#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \
+ ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \
+ DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
+
+#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f
+#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \
+ ((dma_thresh_max_commq + 1) * 3 / 4)
+#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \
+ ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \
+ DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
+
+#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59
+#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124
+#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \
+ ((major == 6) ? ((minor == 1 || minor == 4) ? \
+ TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \
+ bmi_max_num_of_tasks)
+
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32
+#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \
+ (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \
+ DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
+
+#define FM_TIMESTAMP_1_USEC_BIT 8
+
+/* Defines used for enabling/disabling FMan interrupts */
+#define ERR_INTR_EN_DMA 0x00010000
+#define ERR_INTR_EN_FPM 0x80000000
+#define ERR_INTR_EN_BMI 0x00800000
+#define ERR_INTR_EN_QMI 0x00400000
+#define ERR_INTR_EN_MURAM 0x00040000
+#define ERR_INTR_EN_MAC0 0x00004000
+#define ERR_INTR_EN_MAC1 0x00002000
+#define ERR_INTR_EN_MAC2 0x00001000
+#define ERR_INTR_EN_MAC3 0x00000800
+#define ERR_INTR_EN_MAC4 0x00000400
+#define ERR_INTR_EN_MAC5 0x00000200
+#define ERR_INTR_EN_MAC6 0x00000100
+#define ERR_INTR_EN_MAC7 0x00000080
+#define ERR_INTR_EN_MAC8 0x00008000
+#define ERR_INTR_EN_MAC9 0x00000040
+
+#define INTR_EN_QMI 0x40000000
+#define INTR_EN_MAC0 0x00080000
+#define INTR_EN_MAC1 0x00040000
+#define INTR_EN_MAC2 0x00020000
+#define INTR_EN_MAC3 0x00010000
+#define INTR_EN_MAC4 0x00000040
+#define INTR_EN_MAC5 0x00000020
+#define INTR_EN_MAC6 0x00000008
+#define INTR_EN_MAC7 0x00000002
+#define INTR_EN_MAC8 0x00200000
+#define INTR_EN_MAC9 0x00100000
+#define INTR_EN_REV0 0x00008000
+#define INTR_EN_REV1 0x00004000
+#define INTR_EN_REV2 0x00002000
+#define INTR_EN_REV3 0x00001000
+#define INTR_EN_TMR 0x01000000
+
+enum fman_dma_aid_mode {
+ FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */
+ FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */
+};
+
+struct fman_iram_regs {
+ u32 iadd; /* FM IRAM instruction address register */
+ u32 idata; /* FM IRAM instruction data register */
+ u32 itcfg; /* FM IRAM timing config register */
+ u32 iready; /* FM IRAM ready register */
+};
+
+struct fman_fpm_regs {
+ u32 fmfp_tnc; /* FPM TNUM Control 0x00 */
+ u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */
+ u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */
+ u32 fmfp_mxd; /* FPM Flush Control 0x0c */
+ u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */
+ u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */
+ u32 fm_epi; /* FM Error Pending Interrupts 0x18 */
+ u32 fm_rie; /* FM Error Interrupt Enable 0x1c */
+ u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */
+ u32 res0030[4]; /* res 0x30 - 0x3f */
+ u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */
+ u32 res0050[4]; /* res 0x50-0x5f */
+ u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */
+ u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */
+ u32 fmfp_tsp; /* FPM Time Stamp 0x68 */
+ u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */
+ u32 fm_rcr; /* FM Rams Control 0x70 */
+ u32 fmfp_extc; /* FPM External Requests Control 0x74 */
+ u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */
+ u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */
+ u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
+ u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */
+ u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */
+ u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */
+ u32 fm_rstc; /* FM Reset Command 0xcc */
+ u32 fm_cld; /* FM Classifier Debug 0xd0 */
+ u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */
+ u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */
+ u32 fmfp_ee; /* FPM Event&Mask 0xdc */
+ u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */
+ u32 res00f0[4]; /* res 0xf0-0xff */
+ u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */
+ u32 res01c8[14]; /* res 0x1c8-0x1ff */
+ u32 fmfp_clfabc; /* FPM CLFABC 0x200 */
+ u32 fmfp_clfcc; /* FPM CLFCC 0x204 */
+ u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */
+ u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */
+ u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */
+ u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */
+ u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */
+ u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */
+ u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */
+ u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */
+ u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */
+ u32 fmfp_decceh; /* FPM DECCEH 0x22c */
+ u32 res0230[116]; /* res 0x230 - 0x3ff */
+ u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */
+ u32 res0600[0x400 - 384];
+};
+
+struct fman_bmi_regs {
+ u32 fmbm_init; /* BMI Initialization 0x00 */
+ u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */
+ u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */
+ u32 res000c[5]; /* 0x0c - 0x1f */
+ u32 fmbm_ievr; /* Interrupt Event Register 0x20 */
+ u32 fmbm_ier; /* Interrupt Enable Register 0x24 */
+ u32 fmbm_ifr; /* Interrupt Force Register 0x28 */
+ u32 res002c[5]; /* 0x2c - 0x3f */
+ u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */
+ u32 res0060[12]; /* 0x60 - 0x8f */
+ u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */
+ u32 res009c; /* 0x9c */
+ u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */
+ u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */
+ u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */
+ u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */
+ u32 res0200; /* 0x200 */
+ u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */
+ u32 res0300; /* 0x300 */
+ u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */
+};
+
+struct fman_qmi_regs {
+ u32 fmqm_gc; /* General Configuration Register 0x00 */
+ u32 res0004; /* 0x04 */
+ u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */
+ u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */
+ u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */
+ u32 fmqm_ie; /* Interrupt Event Register 0x14 */
+ u32 fmqm_ien; /* Interrupt Enable Register 0x18 */
+ u32 fmqm_if; /* Interrupt Force Register 0x1c */
+ u32 fmqm_gs; /* Global Status Register 0x20 */
+ u32 fmqm_ts; /* Task Status Register 0x24 */
+ u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */
+ u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */
+ u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */
+ u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */
+ u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */
+ u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */
+ u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */
+ u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */
+ u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */
+ u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */
+ u32 res0050[7]; /* 0x50 - 0x6b */
+ u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */
+ u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */
+ u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */
+ u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */
+ u32 res007c; /* 0x7c */
+ u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */
+ u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
+ u32 res0088[2]; /* 0x88 - 0x8f */
+ struct {
+ u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */
+ u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */
+ u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */
+ u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */
+ u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */
+ u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */
+ u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */
+ u32 res001c; /* 0x1c */
+ } dbg_traps[3]; /* 0x90 - 0xef */
+ u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */
+};
+
+struct fman_dma_regs {
+ u32 fmdmsr; /* FM DMA status register 0x00 */
+ u32 fmdmmr; /* FM DMA mode register 0x04 */
+ u32 fmdmtr; /* FM DMA bus threshold register 0x08 */
+ u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */
+ u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */
+ u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */
+ u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */
+ u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */
+ u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */
+ u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */
+ u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */
+ u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */
+ u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */
+ u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
+ u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
+ u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */
+ u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */
+ u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */
+ u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
+ u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
+ u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */
+ u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */
+ u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */
+ u32 res005c; /* 0x5c */
+ u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */
+ u32 res00e0[0x400 - 56];
+};
+
+struct fman_hwp_regs {
+ u32 res0000[0x844 / 4]; /* 0x000..0x843 */
+ u32 fmprrpimac; /* FM Parser Internal memory access control */
+ u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
+};
+
+/* Structure that holds current FMan state.
+ * Used for saving run time information.
+ */
+struct fman_state_struct {
+ u8 fm_id;
+ u16 fm_clk_freq;
+ struct fman_rev_info rev_info;
+ bool enabled_time_stamp;
+ u8 count1_micro_bit;
+ u8 total_num_of_tasks;
+ u8 accumulated_num_of_tasks;
+ u32 accumulated_fifo_size;
+ u8 accumulated_num_of_open_dmas;
+ u8 accumulated_num_of_deq_tnums;
+ u32 exceptions;
+ u32 extra_fifo_pool_size;
+ u8 extra_tasks_pool_size;
+ u8 extra_open_dmas_pool_size;
+ u16 port_mfl[MAX_NUM_OF_MACS];
+ u16 mac_mfl[MAX_NUM_OF_MACS];
+
+ /* SOC specific */
+ u32 fm_iram_size;
+ /* DMA */
+ u32 dma_thresh_max_commq;
+ u32 dma_thresh_max_buf;
+ u32 max_num_of_open_dmas;
+ /* QMI */
+ u32 qmi_max_num_of_tnums;
+ u32 qmi_def_tnums_thresh;
+ /* BMI */
+ u32 bmi_max_num_of_tasks;
+ u32 bmi_max_fifo_size;
+ /* General */
+ u32 fm_port_num_of_cg;
+ u32 num_of_rx_ports;
+ u32 total_fifo_size;
+
+ u32 qman_channel_base;
+ u32 num_of_qman_channels;
+
+ struct resource *res;
+};
+
+/* Structure that holds FMan initial configuration */
+struct fman_cfg {
+ u8 disp_limit_tsh;
+ u8 prs_disp_tsh;
+ u8 plcr_disp_tsh;
+ u8 kg_disp_tsh;
+ u8 bmi_disp_tsh;
+ u8 qmi_enq_disp_tsh;
+ u8 qmi_deq_disp_tsh;
+ u8 fm_ctl1_disp_tsh;
+ u8 fm_ctl2_disp_tsh;
+ int dma_cache_override;
+ enum fman_dma_aid_mode dma_aid_mode;
+ u32 dma_axi_dbg_num_of_beats;
+ u32 dma_cam_num_of_entries;
+ u32 dma_watchdog;
+ u8 dma_comm_qtsh_asrt_emer;
+ u32 dma_write_buf_tsh_asrt_emer;
+ u32 dma_read_buf_tsh_asrt_emer;
+ u8 dma_comm_qtsh_clr_emer;
+ u32 dma_write_buf_tsh_clr_emer;
+ u32 dma_read_buf_tsh_clr_emer;
+ u32 dma_sos_emergency;
+ int dma_dbg_cnt_mode;
+ int catastrophic_err;
+ int dma_err;
+ u32 exceptions;
+ u16 clk_freq;
+ u32 cam_base_addr;
+ u32 fifo_base_addr;
+ u32 total_fifo_size;
+ u32 total_num_of_tasks;
+ u32 qmi_def_tnums_thresh;
+};
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static bool fman_has_err_a050385;
+#endif
+
+static irqreturn_t fman_exceptions(struct fman *fman,
+ enum fman_exceptions exception)
+{
+ dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
+ __func__, fman->state->fm_id, exception);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+ u64 __maybe_unused addr,
+ u8 __maybe_unused tnum,
+ u16 __maybe_unused liodn)
+{
+ dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
+ __func__, fman->state->fm_id, port_id);
+
+ return IRQ_HANDLED;
+}
+
+static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
+{
+ if (fman->intr_mng[id].isr_cb) {
+ fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
+{
+ u8 sw_port_id = 0;
+
+ if (hw_port_id >= BASE_TX_PORTID)
+ sw_port_id = hw_port_id - BASE_TX_PORTID;
+ else if (hw_port_id >= BASE_RX_PORTID)
+ sw_port_id = hw_port_id - BASE_RX_PORTID;
+ else
+ sw_port_id = 0;
+
+ return sw_port_id;
+}
+
+static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
+ u8 port_id)
+{
+ u32 tmp = 0;
+
+ tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
+
+ tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
+
+ /* order restoration */
+ if (port_id % 2)
+ tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
+ else
+ tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
+
+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
+}
+
+static void set_port_liodn(struct fman *fman, u8 port_id,
+ u32 liodn_base, u32 liodn_ofst)
+{
+ u32 tmp;
+
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ return;
+ /* set LIODN base for this port */
+ tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
+ if (port_id % 2) {
+ tmp &= ~DMA_LIODN_BASE_MASK;
+ tmp |= liodn_base;
+ } else {
+ tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
+ tmp |= liodn_base << DMA_LIODN_SHIFT;
+ }
+ iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+}
+
+static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fm_rcr);
+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+ iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+ else
+ iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
+ FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+}
+
+static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fm_rcr);
+ if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+ iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+ else
+ iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
+ &fpm_rg->fm_rcr);
+}
+
+static void fman_defconfig(struct fman_cfg *cfg)
+{
+ memset(cfg, 0, sizeof(struct fman_cfg));
+
+ cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
+ cfg->dma_err = DEFAULT_DMA_ERR;
+ cfg->dma_aid_mode = DEFAULT_AID_MODE;
+ cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
+ cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
+ cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
+ cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
+ cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
+ cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
+ cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
+ cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
+ cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
+ cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
+ cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
+ cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
+ cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
+ cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
+ cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
+ cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
+}
+
+static int dma_init(struct fman *fman)
+{
+ struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ struct fman_cfg *cfg = fman->cfg;
+ u32 tmp_reg;
+
+ /* Init DMA Registers */
+
+ /* clear status reg events */
+ tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
+ DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
+ iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
+
+ /* configure mode register */
+ tmp_reg = 0;
+ tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
+ if (cfg->exceptions & EX_DMA_BUS_ERROR)
+ tmp_reg |= DMA_MODE_BER;
+ if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
+ (cfg->exceptions & EX_DMA_READ_ECC) |
+ (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
+ tmp_reg |= DMA_MODE_ECC;
+ if (cfg->dma_axi_dbg_num_of_beats)
+ tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
+ ((cfg->dma_axi_dbg_num_of_beats - 1)
+ << DMA_MODE_AXI_DBG_SHIFT));
+
+ tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
+ DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
+ tmp_reg |= DMA_MODE_SECURE_PROT;
+ tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
+ tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmmr);
+
+ /* configure thresholds register */
+ tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
+ DMA_THRESH_COMMQ_SHIFT);
+ tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
+ DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+ tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
+ DMA_THRESH_WRITE_INT_BUF_MASK;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmtr);
+
+ /* configure hysteresis register */
+ tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
+ DMA_THRESH_COMMQ_SHIFT);
+ tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
+ DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+ tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
+ DMA_THRESH_WRITE_INT_BUF_MASK;
+
+ iowrite32be(tmp_reg, &dma_rg->fmdmhy);
+
+ /* configure emergency threshold */
+ iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
+
+ /* configure Watchdog */
+ iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
+
+ iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
+
+ /* Allocate MURAM for CAM */
+ fman->cam_size =
+ (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
+ fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (fman->state->rev_info.major == 2) {
+ u32 __iomem *cam_base_addr;
+
+ fman_muram_free_mem(fman->muram, fman->cam_offset,
+ fman->cam_size);
+
+ fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
+ fman->cam_offset = fman_muram_alloc(fman->muram,
+ fman->cam_size);
+ if (IS_ERR_VALUE(fman->cam_offset)) {
+ dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (fman->cfg->dma_cam_num_of_entries % 8 ||
+ fman->cfg->dma_cam_num_of_entries > 32) {
+ dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cam_base_addr = (u32 __iomem *)
+ fman_muram_offset_to_vbase(fman->muram,
+ fman->cam_offset);
+ iowrite32be(~((1 <<
+ (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
+ cam_base_addr);
+ }
+
+ fman->cfg->cam_base_addr = fman->cam_offset;
+
+ return 0;
+}
+
+static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+ int i;
+
+ /* Init FPM Registers */
+
+ tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
+
+ tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
+ ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
+ ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
+ ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
+
+ tmp_reg =
+ (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
+ ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
+ ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
+ ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
+
+ /* define exceptions and error behavior */
+ tmp_reg = 0;
+ /* Clear events */
+ tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
+ FPM_EV_MASK_SINGLE_ECC);
+ /* enable interrupts */
+ if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
+ tmp_reg |= FPM_EV_MASK_STALL_EN;
+ if (cfg->exceptions & EX_FPM_SINGLE_ECC)
+ tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
+ if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
+ tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
+ tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
+ tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
+ /* FMan is not halted upon external halt activation */
+ tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+ /* Man is not halted upon Unrecoverable ECC error behavior */
+ tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+ iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
+
+ /* clear all fmCtls event registers */
+ for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
+ iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
+
+ /* RAM ECC - enable and clear events */
+ /* first we need to clear all parser memory,
+ * as it is uninitialized and may cause ECC errors
+ */
+ /* event bits */
+ tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
+
+ iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
+
+ tmp_reg = 0;
+ if (cfg->exceptions & EX_IRAM_ECC) {
+ tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
+ enable_rams_ecc(fpm_rg);
+ }
+ if (cfg->exceptions & EX_MURAM_ECC) {
+ tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
+ enable_rams_ecc(fpm_rg);
+ }
+ iowrite32be(tmp_reg, &fpm_rg->fm_rie);
+}
+
+static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
+ struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+
+ /* Init BMI Registers */
+
+ /* define common resources */
+ tmp_reg = cfg->fifo_base_addr;
+ tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
+
+ tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
+ BMI_CFG1_FIFO_SIZE_SHIFT);
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
+
+ tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
+ BMI_CFG2_TASKS_SHIFT;
+ /* num of DMA's will be dynamically updated when each port is set */
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
+
+ /* define unmaskable exceptions, enable and clear events */
+ tmp_reg = 0;
+ iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
+ BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
+ BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
+ BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
+
+ if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
+ tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
+}
+
+static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
+ struct fman_cfg *cfg)
+{
+ u32 tmp_reg;
+
+ /* Init QMI Registers */
+
+ /* Clear error interrupt events */
+
+ iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
+ &qmi_rg->fmqm_eie);
+ tmp_reg = 0;
+ if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
+ tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
+ tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+ /* enable events */
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
+
+ tmp_reg = 0;
+ /* Clear interrupt events */
+ iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
+ if (cfg->exceptions & EX_QMI_SINGLE_ECC)
+ tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
+ /* enable events */
+ iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
+}
+
+static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
+{
+ /* enable HW Parser */
+ iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
+}
+
+static int enable(struct fman *fman, struct fman_cfg *cfg)
+{
+ u32 cfg_reg = 0;
+
+ /* Enable all modules */
+
+ /* clear&enable global counters - calculate reg and save for later,
+ * because it's the same reg for QMI enable
+ */
+ cfg_reg = QMI_CFG_EN_COUNTERS;
+
+ /* Set enqueue and dequeue thresholds */
+ cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
+
+ iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
+ iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
+ &fman->qmi_regs->fmqm_gc);
+
+ return 0;
+}
+
+static int set_exception(struct fman *fman,
+ enum fman_exceptions exception, bool enable)
+{
+ u32 tmp;
+
+ switch (exception) {
+ case FMAN_EX_DMA_BUS_ERROR:
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
+ if (enable)
+ tmp |= DMA_MODE_BER;
+ else
+ tmp &= ~DMA_MODE_BER;
+ /* disable bus error */
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
+ break;
+ case FMAN_EX_DMA_READ_ECC:
+ case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+ case FMAN_EX_DMA_FM_WRITE_ECC:
+ tmp = ioread32be(&fman->dma_regs->fmdmmr);
+ if (enable)
+ tmp |= DMA_MODE_ECC;
+ else
+ tmp &= ~DMA_MODE_ECC;
+ iowrite32be(tmp, &fman->dma_regs->fmdmmr);
+ break;
+ case FMAN_EX_FPM_STALL_ON_TASKS:
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_STALL_EN;
+ else
+ tmp &= ~FPM_EV_MASK_STALL_EN;
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+ break;
+ case FMAN_EX_FPM_SINGLE_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
+ else
+ tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+ break;
+ case FMAN_EX_FPM_DOUBLE_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+ if (enable)
+ tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
+ else
+ tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
+ iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+ break;
+ case FMAN_EX_QMI_SINGLE_ECC:
+ tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
+ if (enable)
+ tmp |= QMI_INTR_EN_SINGLE_ECC;
+ else
+ tmp &= ~QMI_INTR_EN_SINGLE_ECC;
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
+ break;
+ case FMAN_EX_QMI_DOUBLE_ECC:
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
+ if (enable)
+ tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+ else
+ tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
+ break;
+ case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+ tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
+ if (enable)
+ tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ else
+ tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+ iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
+ break;
+ case FMAN_EX_BMI_LIST_RAM_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+ tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+ if (enable)
+ tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ else
+ tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+ iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+ break;
+ case FMAN_EX_IRAM_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
+ if (enable) {
+ /* enable ECC if not enabled */
+ enable_rams_ecc(fman->fpm_regs);
+ /* enable ECC interrupts */
+ tmp |= FPM_IRAM_ECC_ERR_EX_EN;
+ } else {
+ /* ECC mechanism may be disabled,
+ * depending on driver status
+ */
+ disable_rams_ecc(fman->fpm_regs);
+ tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
+ }
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
+ break;
+ case FMAN_EX_MURAM_ECC:
+ tmp = ioread32be(&fman->fpm_regs->fm_rie);
+ if (enable) {
+ /* enable ECC if not enabled */
+ enable_rams_ecc(fman->fpm_regs);
+ /* enable ECC interrupts */
+ tmp |= FPM_MURAM_ECC_ERR_EX_EN;
+ } else {
+ /* ECC mechanism may be disabled,
+ * depending on driver status
+ */
+ disable_rams_ecc(fman->fpm_regs);
+ tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
+ }
+ iowrite32be(tmp, &fman->fpm_regs->fm_rie);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void resume(struct fman_fpm_regs __iomem *fpm_rg)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fpm_rg->fmfp_ee);
+ /* clear tmp_reg event bits in order not to clear standing events */
+ tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
+ FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
+ tmp |= FPM_EV_MASK_RELEASE_FM;
+
+ iowrite32be(tmp, &fpm_rg->fmfp_ee);
+}
+
+static int fill_soc_specific_params(struct fman_state_struct *state)
+{
+ u8 minor = state->rev_info.minor;
+ /* P4080 - Major 2
+ * P2041/P3041/P5020/P5040 - Major 3
+ * Tx/Bx - Major 6
+ */
+ switch (state->rev_info.major) {
+ case 3:
+ state->bmi_max_fifo_size = 160 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->dma_thresh_max_commq = 31;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 48;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 32;
+ state->fm_port_num_of_cg = 256;
+ state->num_of_rx_ports = 6;
+ state->total_fifo_size = 136 * 1024;
+ break;
+
+ case 2:
+ state->bmi_max_fifo_size = 160 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->dma_thresh_max_commq = 31;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 48;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 32;
+ state->fm_port_num_of_cg = 256;
+ state->num_of_rx_ports = 5;
+ state->total_fifo_size = 100 * 1024;
+ break;
+
+ case 6:
+ state->dma_thresh_max_commq = 83;
+ state->dma_thresh_max_buf = 127;
+ state->qmi_max_num_of_tnums = 64;
+ state->qmi_def_tnums_thresh = 32;
+ state->fm_port_num_of_cg = 256;
+
+ /* FManV3L */
+ if (minor == 1 || minor == 4) {
+ state->bmi_max_fifo_size = 192 * 1024;
+ state->bmi_max_num_of_tasks = 64;
+ state->max_num_of_open_dmas = 32;
+ state->num_of_rx_ports = 5;
+ if (minor == 1)
+ state->fm_iram_size = 32 * 1024;
+ else
+ state->fm_iram_size = 64 * 1024;
+ state->total_fifo_size = 156 * 1024;
+ }
+ /* FManV3H */
+ else if (minor == 0 || minor == 2 || minor == 3) {
+ state->bmi_max_fifo_size = 384 * 1024;
+ state->fm_iram_size = 64 * 1024;
+ state->bmi_max_num_of_tasks = 128;
+ state->max_num_of_open_dmas = 84;
+ state->num_of_rx_ports = 8;
+ state->total_fifo_size = 295 * 1024;
+ } else {
+ pr_err("Unsupported FManv3 version\n");
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ pr_err("Unsupported FMan version\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool is_init_done(struct fman_cfg *cfg)
+{
+ /* Checks if FMan driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static void free_init_resources(struct fman *fman)
+{
+ if (fman->cam_offset)
+ fman_muram_free_mem(fman->muram, fman->cam_offset,
+ fman->cam_size);
+ if (fman->fifo_offset)
+ fman_muram_free_mem(fman->muram, fman->fifo_offset,
+ fman->fifo_size);
+}
+
+static irqreturn_t bmi_err_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&bmi_rg->fmbm_ievr);
+ mask = ioread32be(&bmi_rg->fmbm_ier);
+ event &= mask;
+ /* clear the forced events */
+ force = ioread32be(&bmi_rg->fmbm_ifr);
+ if (force & event)
+ iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
+ /* clear the acknowledged events */
+ iowrite32be(event, &bmi_rg->fmbm_ievr);
+
+ if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+ if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+ if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+ if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+
+ return ret;
+}
+
+static irqreturn_t qmi_err_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&qmi_rg->fmqm_eie);
+ mask = ioread32be(&qmi_rg->fmqm_eien);
+ event &= mask;
+
+ /* clear the forced events */
+ force = ioread32be(&qmi_rg->fmqm_eif);
+ if (force & event)
+ iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
+ /* clear the acknowledged events */
+ iowrite32be(event, &qmi_rg->fmqm_eie);
+
+ if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+ if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
+ ret = fman->exception_cb(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+
+ return ret;
+}
+
+static irqreturn_t dma_err_event(struct fman *fman)
+{
+ u32 status, mask, com_id;
+ u8 tnum, port_id, relative_port_id;
+ u16 liodn;
+ struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ status = ioread32be(&dma_rg->fmdmsr);
+ mask = ioread32be(&dma_rg->fmdmmr);
+
+ /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
+ if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
+ status &= ~DMA_STATUS_BUS_ERR;
+
+ /* clear relevant bits if mask has no DMA_MODE_ECC */
+ if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
+ status &= ~(DMA_STATUS_FM_SPDAT_ECC |
+ DMA_STATUS_READ_ECC |
+ DMA_STATUS_SYSTEM_WRITE_ECC |
+ DMA_STATUS_FM_WRITE_ECC);
+
+ /* clear set events */
+ iowrite32be(status, &dma_rg->fmdmsr);
+
+ if (status & DMA_STATUS_BUS_ERR) {
+ u64 addr;
+
+ addr = (u64)ioread32be(&dma_rg->fmdmtal);
+ addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
+
+ com_id = ioread32be(&dma_rg->fmdmtcid);
+ port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
+ DMA_TRANSFER_PORTID_SHIFT));
+ relative_port_id =
+ hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+ tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
+ DMA_TRANSFER_TNUM_SHIFT);
+ liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
+ ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
+ liodn);
+ }
+ if (status & DMA_STATUS_FM_SPDAT_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+ if (status & DMA_STATUS_READ_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+ if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+ if (status & DMA_STATUS_FM_WRITE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+
+ return ret;
+}
+
+static irqreturn_t fpm_err_event(struct fman *fman)
+{
+ u32 event;
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&fpm_rg->fmfp_ee);
+ /* clear the all occurred events */
+ iowrite32be(event, &fpm_rg->fmfp_ee);
+
+ if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
+ (event & FPM_EV_MASK_DOUBLE_ECC_EN))
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+ if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+ if ((event & FPM_EV_MASK_SINGLE_ECC) &&
+ (event & FPM_EV_MASK_SINGLE_ECC_EN))
+ ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+
+ return ret;
+}
+
+static irqreturn_t muram_err_intr(struct fman *fman)
+{
+ u32 event, mask;
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&fpm_rg->fm_rcr);
+ mask = ioread32be(&fpm_rg->fm_rie);
+
+ /* clear MURAM event bit (do not clear IRAM event) */
+ iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
+
+ if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
+ ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+
+ return ret;
+}
+
+static irqreturn_t qmi_event(struct fman *fman)
+{
+ u32 event, mask, force;
+ struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+ irqreturn_t ret = IRQ_NONE;
+
+ event = ioread32be(&qmi_rg->fmqm_ie);
+ mask = ioread32be(&qmi_rg->fmqm_ien);
+ event &= mask;
+ /* clear the forced events */
+ force = ioread32be(&qmi_rg->fmqm_if);
+ if (force & event)
+ iowrite32be(force & ~event, &qmi_rg->fmqm_if);
+ /* clear the acknowledged events */
+ iowrite32be(event, &qmi_rg->fmqm_ie);
+
+ if (event & QMI_INTR_EN_SINGLE_ECC)
+ ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+
+ return ret;
+}
+
+static void enable_time_stamp(struct fman *fman)
+{
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u16 fm_clk_freq = fman->state->fm_clk_freq;
+ u32 tmp, intgr, ts_freq, frac;
+
+ ts_freq = (u32)(1 << fman->state->count1_micro_bit);
+ /* configure timestamp so that bit 8 will count 1 microsecond
+ * Find effective count rate at TIMESTAMP least significant bits:
+ * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
+ * Find frequency ratio between effective count rate and the clock:
+ * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
+ * 256/600 = 0.4266666...
+ */
+
+ intgr = ts_freq / fm_clk_freq;
+ /* we multiply by 2^16 to keep the fraction of the division
+ * we do not div back, since we write this value as a fraction
+ * see spec
+ */
+
+ frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
+ /* we check remainder of the division in order to round up if not int */
+ if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
+ frac++;
+
+ tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
+ iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
+
+ /* enable timestamp with original clock */
+ iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
+ fman->state->enabled_time_stamp = true;
+}
+
+static int clear_iram(struct fman *fman)
+{
+ struct fman_iram_regs __iomem *iram;
+ int i, count;
+
+ iram = fman->base_addr + IMEM_OFFSET;
+
+ /* Enable the auto-increment */
+ iowrite32be(IRAM_IADD_AIE, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
+ if (count == 0)
+ return -EBUSY;
+
+ for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
+ iowrite32be(0xffffffff, &iram->idata);
+
+ iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u32 get_exception_flag(enum fman_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FMAN_EX_DMA_BUS_ERROR:
+ bit_mask = EX_DMA_BUS_ERROR;
+ break;
+ case FMAN_EX_DMA_SINGLE_PORT_ECC:
+ bit_mask = EX_DMA_SINGLE_PORT_ECC;
+ break;
+ case FMAN_EX_DMA_READ_ECC:
+ bit_mask = EX_DMA_READ_ECC;
+ break;
+ case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+ bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
+ break;
+ case FMAN_EX_DMA_FM_WRITE_ECC:
+ bit_mask = EX_DMA_FM_WRITE_ECC;
+ break;
+ case FMAN_EX_FPM_STALL_ON_TASKS:
+ bit_mask = EX_FPM_STALL_ON_TASKS;
+ break;
+ case FMAN_EX_FPM_SINGLE_ECC:
+ bit_mask = EX_FPM_SINGLE_ECC;
+ break;
+ case FMAN_EX_FPM_DOUBLE_ECC:
+ bit_mask = EX_FPM_DOUBLE_ECC;
+ break;
+ case FMAN_EX_QMI_SINGLE_ECC:
+ bit_mask = EX_QMI_SINGLE_ECC;
+ break;
+ case FMAN_EX_QMI_DOUBLE_ECC:
+ bit_mask = EX_QMI_DOUBLE_ECC;
+ break;
+ case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+ bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
+ break;
+ case FMAN_EX_BMI_LIST_RAM_ECC:
+ bit_mask = EX_BMI_LIST_RAM_ECC;
+ break;
+ case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+ bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
+ break;
+ case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+ bit_mask = EX_BMI_STATISTICS_RAM_ECC;
+ break;
+ case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+ bit_mask = EX_BMI_DISPATCH_RAM_ECC;
+ break;
+ case FMAN_EX_MURAM_ECC:
+ bit_mask = EX_MURAM_ECC;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static int get_module_event(enum fman_event_modules module, u8 mod_id,
+ enum fman_intr_type intr_type)
+{
+ int event;
+
+ switch (module) {
+ case FMAN_MOD_MAC:
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_ERR_MAC0 + mod_id;
+ else
+ event = FMAN_EV_MAC0 + mod_id;
+ break;
+ case FMAN_MOD_FMAN_CTRL:
+ if (intr_type == FMAN_INTR_TYPE_ERR)
+ event = FMAN_EV_CNT;
+ else
+ event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
+ break;
+ case FMAN_MOD_DUMMY_LAST:
+ event = FMAN_EV_CNT;
+ break;
+ default:
+ event = FMAN_EV_CNT;
+ break;
+ }
+
+ return event;
+}
+
+static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
+ u32 *extra_size_of_fifo)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u32 fifo = *size_of_fifo;
+ u32 extra_fifo = *extra_size_of_fifo;
+ u32 tmp;
+
+ /* if this is the first time a port requires extra_fifo_pool_size,
+ * the total extra_fifo_pool_size must be initialized to 1 buffer per
+ * port
+ */
+ if (extra_fifo && !fman->state->extra_fifo_pool_size)
+ fman->state->extra_fifo_pool_size =
+ fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
+
+ fman->state->extra_fifo_pool_size =
+ max(fman->state->extra_fifo_pool_size, extra_fifo);
+
+ /* check that there are enough uncommitted fifo size */
+ if ((fman->state->accumulated_fifo_size + fifo) >
+ (fman->state->total_fifo_size -
+ fman->state->extra_fifo_pool_size)) {
+ dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ /* Read, modify and write to HW */
+ tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
+ ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+ BMI_EXTRA_FIFO_SIZE_SHIFT);
+ iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
+
+ /* update accumulated */
+ fman->state->accumulated_fifo_size += fifo;
+
+ return 0;
+}
+
+static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
+ u8 *num_of_extra_tasks)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u8 tasks = *num_of_tasks;
+ u8 extra_tasks = *num_of_extra_tasks;
+ u32 tmp;
+
+ if (extra_tasks)
+ fman->state->extra_tasks_pool_size =
+ max(fman->state->extra_tasks_pool_size, extra_tasks);
+
+ /* check that there are enough uncommitted tasks */
+ if ((fman->state->accumulated_num_of_tasks + tasks) >
+ (fman->state->total_num_of_tasks -
+ fman->state->extra_tasks_pool_size)) {
+ dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+ __func__, fman->state->fm_id);
+ return -EAGAIN;
+ }
+ /* update accumulated */
+ fman->state->accumulated_num_of_tasks += tasks;
+
+ /* Write to HW */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+ ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
+ tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
+ (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+ return 0;
+}
+
+static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
+ u8 *num_of_open_dmas,
+ u8 *num_of_extra_open_dmas)
+{
+ struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+ u8 open_dmas = *num_of_open_dmas;
+ u8 extra_open_dmas = *num_of_extra_open_dmas;
+ u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
+ u32 tmp;
+
+ if (!open_dmas) {
+ /* Configuration according to values in the HW.
+ * read the current number of open Dma's
+ */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+ current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
+ BMI_EXTRA_NUM_OF_DMAS_SHIFT);
+
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+ current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
+ BMI_NUM_OF_DMAS_SHIFT) + 1);
+
+ /* This is the first configuration and user did not
+ * specify value (!open_dmas), reset values will be used
+ * and we just save these values for resource management
+ */
+ fman->state->extra_open_dmas_pool_size =
+ (u8)max(fman->state->extra_open_dmas_pool_size,
+ current_extra_val);
+ fman->state->accumulated_num_of_open_dmas += current_val;
+ *num_of_open_dmas = current_val;
+ *num_of_extra_open_dmas = current_extra_val;
+ return 0;
+ }
+
+ if (extra_open_dmas > current_extra_val)
+ fman->state->extra_open_dmas_pool_size =
+ (u8)max(fman->state->extra_open_dmas_pool_size,
+ extra_open_dmas);
+
+ if ((fman->state->rev_info.major < 6) &&
+ (fman->state->accumulated_num_of_open_dmas - current_val +
+ open_dmas > fman->state->max_num_of_open_dmas)) {
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+ __func__, fman->state->fm_id);
+ return -EAGAIN;
+ } else if ((fman->state->rev_info.major >= 6) &&
+ !((fman->state->rev_info.major == 6) &&
+ (fman->state->rev_info.minor == 0)) &&
+ (fman->state->accumulated_num_of_open_dmas -
+ current_val + open_dmas >
+ fman->state->dma_thresh_max_commq + 1)) {
+ dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+ __func__, fman->state->fm_id,
+ fman->state->dma_thresh_max_commq + 1);
+ return -EAGAIN;
+ }
+
+ WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
+ /* update acummulated */
+ fman->state->accumulated_num_of_open_dmas -= current_val;
+ fman->state->accumulated_num_of_open_dmas += open_dmas;
+
+ if (fman->state->rev_info.major < 6)
+ total_num_dmas =
+ (u8)(fman->state->accumulated_num_of_open_dmas +
+ fman->state->extra_open_dmas_pool_size);
+
+ /* calculate reg */
+ tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+ ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
+ tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
+ (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
+ iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+ /* update total num of DMA's with committed number of open DMAS,
+ * and max uncommitted pool.
+ */
+ if (total_num_dmas) {
+ tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
+ tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
+ iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
+ }
+
+ return 0;
+}
+
+static int fman_config(struct fman *fman)
+{
+ void __iomem *base_addr;
+ int err;
+
+ base_addr = fman->dts_params.base_addr;
+
+ fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
+ if (!fman->state)
+ goto err_fm_state;
+
+ /* Allocate the FM driver's parameters structure */
+ fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
+ if (!fman->cfg)
+ goto err_fm_drv;
+
+ /* Initialize MURAM block */
+ fman->muram =
+ fman_muram_init(fman->dts_params.muram_res.start,
+ resource_size(&fman->dts_params.muram_res));
+ if (!fman->muram)
+ goto err_fm_soc_specific;
+
+ /* Initialize FM parameters which will be kept by the driver */
+ fman->state->fm_id = fman->dts_params.id;
+ fman->state->fm_clk_freq = fman->dts_params.clk_freq;
+ fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
+ fman->state->num_of_qman_channels =
+ fman->dts_params.num_of_qman_channels;
+ fman->state->res = fman->dts_params.res;
+ fman->exception_cb = fman_exceptions;
+ fman->bus_error_cb = fman_bus_error;
+ fman->fpm_regs = base_addr + FPM_OFFSET;
+ fman->bmi_regs = base_addr + BMI_OFFSET;
+ fman->qmi_regs = base_addr + QMI_OFFSET;
+ fman->dma_regs = base_addr + DMA_OFFSET;
+ fman->hwp_regs = base_addr + HWP_OFFSET;
+ fman->kg_regs = base_addr + KG_OFFSET;
+ fman->base_addr = base_addr;
+
+ spin_lock_init(&fman->spinlock);
+ fman_defconfig(fman->cfg);
+
+ fman->state->extra_fifo_pool_size = 0;
+ fman->state->exceptions = (EX_DMA_BUS_ERROR |
+ EX_DMA_READ_ECC |
+ EX_DMA_SYSTEM_WRITE_ECC |
+ EX_DMA_FM_WRITE_ECC |
+ EX_FPM_STALL_ON_TASKS |
+ EX_FPM_SINGLE_ECC |
+ EX_FPM_DOUBLE_ECC |
+ EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
+ EX_BMI_LIST_RAM_ECC |
+ EX_BMI_STORAGE_PROFILE_ECC |
+ EX_BMI_STATISTICS_RAM_ECC |
+ EX_MURAM_ECC |
+ EX_BMI_DISPATCH_RAM_ECC |
+ EX_QMI_DOUBLE_ECC |
+ EX_QMI_SINGLE_ECC);
+
+ /* Read FMan revision for future use*/
+ fman_get_revision(fman, &fman->state->rev_info);
+
+ err = fill_soc_specific_params(fman->state);
+ if (err)
+ goto err_fm_soc_specific;
+
+ /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
+ if (fman->state->rev_info.major >= 6)
+ fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
+
+ fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
+
+ fman->state->total_num_of_tasks =
+ (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
+ fman->state->rev_info.minor,
+ fman->state->bmi_max_num_of_tasks);
+
+ if (fman->state->rev_info.major < 6) {
+ fman->cfg->dma_comm_qtsh_clr_emer =
+ (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
+ fman->state->dma_thresh_max_commq);
+
+ fman->cfg->dma_comm_qtsh_asrt_emer =
+ (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
+ fman->state->dma_thresh_max_commq);
+
+ fman->cfg->dma_cam_num_of_entries =
+ DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
+
+ fman->cfg->dma_read_buf_tsh_clr_emer =
+ DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_read_buf_tsh_asrt_emer =
+ DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_write_buf_tsh_clr_emer =
+ DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_write_buf_tsh_asrt_emer =
+ DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+ fman->cfg->dma_axi_dbg_num_of_beats =
+ DFLT_AXI_DBG_NUM_OF_BEATS;
+ }
+
+ return 0;
+
+err_fm_soc_specific:
+ kfree(fman->cfg);
+err_fm_drv:
+ kfree(fman->state);
+err_fm_state:
+ kfree(fman);
+ return -EINVAL;
+}
+
+static int fman_reset(struct fman *fman)
+{
+ u32 count;
+ int err = 0;
+
+ if (fman->state->rev_info.major < 6) {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ err = -EBUSY;
+
+ goto _return;
+ } else {
+#ifdef CONFIG_PPC
+ struct device_node *guts_node;
+ struct ccsr_guts __iomem *guts_regs;
+ u32 devdisr2, reg;
+
+ /* Errata A007273 */
+ guts_node =
+ of_find_compatible_node(NULL, NULL,
+ "fsl,qoriq-device-config-2.0");
+ if (!guts_node) {
+ dev_err(fman->dev, "%s: Couldn't find guts node\n",
+ __func__);
+ goto guts_node;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ if (!guts_regs) {
+ dev_err(fman->dev, "%s: Couldn't map %pOF regs\n",
+ __func__, guts_node);
+ goto guts_regs;
+ }
+#define FMAN1_ALL_MACS_MASK 0xFCC00000
+#define FMAN2_ALL_MACS_MASK 0x000FCC00
+ /* Read current state */
+ devdisr2 = ioread32be(&guts_regs->devdisr2);
+ if (fman->dts_params.id == 0)
+ reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+ else
+ reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+
+ /* Enable all MACs */
+ iowrite32be(reg, &guts_regs->devdisr2);
+#endif
+
+ /* Perform FMan reset */
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0) {
+#ifdef CONFIG_PPC
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+ err = -EBUSY;
+ goto _return;
+ }
+#ifdef CONFIG_PPC
+
+ /* Restore devdisr2 value */
+ iowrite32be(devdisr2, &guts_regs->devdisr2);
+
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+#endif
+
+ goto _return;
+
+#ifdef CONFIG_PPC
+guts_regs:
+ of_node_put(guts_node);
+guts_node:
+ dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+ __func__);
+#endif
+ }
+_return:
+ return err;
+}
+
+static int fman_init(struct fman *fman)
+{
+ struct fman_cfg *cfg = NULL;
+ int err = 0, i, count;
+
+ if (is_init_done(fman->cfg))
+ return -EINVAL;
+
+ fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
+
+ cfg = fman->cfg;
+
+ /* clear revision-dependent non existing exception */
+ if (fman->state->rev_info.major < 6)
+ fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
+
+ if (fman->state->rev_info.major >= 6)
+ fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
+
+ /* clear CPG */
+ memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
+ fman->state->fm_port_num_of_cg);
+
+ /* Save LIODN info before FMan reset
+ * Skipping non-existent port 0 (i = 1)
+ */
+ for (i = 1; i < FMAN_LIODN_TBL; i++) {
+ u32 liodn_base;
+
+ fman->liodn_offset[i] =
+ ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ continue;
+ liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
+ if (i % 2) {
+ /* FMDM_PLR LSB holds LIODN base for odd ports */
+ liodn_base &= DMA_LIODN_BASE_MASK;
+ } else {
+ /* FMDM_PLR MSB holds LIODN base for even ports */
+ liodn_base >>= DMA_LIODN_SHIFT;
+ liodn_base &= DMA_LIODN_BASE_MASK;
+ }
+ fman->liodn_base[i] = liodn_base;
+ }
+
+ err = fman_reset(fman);
+ if (err)
+ return err;
+
+ if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
+ resume(fman->fpm_regs);
+ /* Wait until QMI is not in halt not busy state */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
+ QMI_GS_HALT_NOT_BUSY) && --count);
+ if (count == 0)
+ dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
+ __func__);
+ }
+
+ if (clear_iram(fman) != 0)
+ return -EINVAL;
+
+ cfg->exceptions = fman->state->exceptions;
+
+ /* Init DMA Registers */
+
+ err = dma_init(fman);
+ if (err != 0) {
+ free_init_resources(fman);
+ return err;
+ }
+
+ /* Init FPM Registers */
+ fpm_init(fman->fpm_regs, fman->cfg);
+
+ /* define common resources */
+ /* allocate MURAM for FIFO according to total size */
+ fman->fifo_offset = fman_muram_alloc(fman->muram,
+ fman->state->total_fifo_size);
+ if (IS_ERR_VALUE(fman->fifo_offset)) {
+ free_init_resources(fman);
+ dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ cfg->fifo_base_addr = fman->fifo_offset;
+ cfg->total_fifo_size = fman->state->total_fifo_size;
+ cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
+ cfg->clk_freq = fman->state->fm_clk_freq;
+
+ /* Init BMI Registers */
+ bmi_init(fman->bmi_regs, fman->cfg);
+
+ /* Init QMI Registers */
+ qmi_init(fman->qmi_regs, fman->cfg);
+
+ /* Init HW Parser */
+ hwp_init(fman->hwp_regs);
+
+ /* Init KeyGen */
+ fman->keygen = keygen_init(fman->kg_regs);
+ if (!fman->keygen)
+ return -EINVAL;
+
+ err = enable(fman, cfg);
+ if (err != 0)
+ return err;
+
+ enable_time_stamp(fman);
+
+ kfree(fman->cfg);
+ fman->cfg = NULL;
+
+ return 0;
+}
+
+static int fman_set_exception(struct fman *fman,
+ enum fman_exceptions exception, bool enable)
+{
+ u32 bit_mask = 0;
+
+ if (!is_init_done(fman->cfg))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ fman->state->exceptions |= bit_mask;
+ else
+ fman->state->exceptions &= ~bit_mask;
+ } else {
+ dev_err(fman->dev, "%s: Undefined exception (%d)\n",
+ __func__, exception);
+ return -EINVAL;
+ }
+
+ return set_exception(fman, exception, enable);
+}
+
+/**
+ * fman_register_intr
+ * @fman: A Pointer to FMan device
+ * @module: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ * @isr_cb: The interrupt service routine.
+ * @src_arg: Argument to be passed to isr_cb.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_register_intr(struct fman *fman, enum fman_event_modules module,
+ u8 mod_id, enum fman_intr_type intr_type,
+ void (*isr_cb)(void *src_arg), void *src_arg)
+{
+ int event = 0;
+
+ event = get_module_event(module, mod_id, intr_type);
+ WARN_ON(event >= FMAN_EV_CNT);
+
+ /* register in local FM structure */
+ fman->intr_mng[event].isr_cb = isr_cb;
+ fman->intr_mng[event].src_handle = src_arg;
+}
+EXPORT_SYMBOL(fman_register_intr);
+
+/**
+ * fman_unregister_intr
+ * @fman: A Pointer to FMan device
+ * @module: Calling module
+ * @mod_id: Module id (if more than 1 exists, '0' if not)
+ * @intr_type: Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
+ u8 mod_id, enum fman_intr_type intr_type)
+{
+ int event = 0;
+
+ event = get_module_event(module, mod_id, intr_type);
+ WARN_ON(event >= FMAN_EV_CNT);
+
+ fman->intr_mng[event].isr_cb = NULL;
+ fman->intr_mng[event].src_handle = NULL;
+}
+EXPORT_SYMBOL(fman_unregister_intr);
+
+/**
+ * fman_set_port_params
+ * @fman: A Pointer to FMan device
+ * @port_params: Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_port_params(struct fman *fman,
+ struct fman_port_init_params *port_params)
+{
+ int err;
+ unsigned long flags;
+ u8 port_id = port_params->port_id, mac_id;
+
+ spin_lock_irqsave(&fman->spinlock, flags);
+
+ err = set_num_of_tasks(fman, port_params->port_id,
+ &port_params->num_of_tasks,
+ &port_params->num_of_extra_tasks);
+ if (err)
+ goto return_err;
+
+ /* TX Ports */
+ if (port_params->port_type != FMAN_PORT_TYPE_RX) {
+ u32 enq_th, deq_th, reg;
+
+ /* update qmi ENQ/DEQ threshold */
+ fman->state->accumulated_num_of_deq_tnums +=
+ port_params->deq_pipeline_depth;
+ enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
+ QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
+ /* if enq_th is too big, we reduce it to the max value
+ * that is still 0
+ */
+ if (enq_th >= (fman->state->qmi_max_num_of_tnums -
+ fman->state->accumulated_num_of_deq_tnums)) {
+ enq_th =
+ fman->state->qmi_max_num_of_tnums -
+ fman->state->accumulated_num_of_deq_tnums - 1;
+
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
+ reg &= ~QMI_CFG_ENQ_MASK;
+ reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
+ }
+
+ deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
+ QMI_CFG_DEQ_MASK;
+ /* if deq_th is too small, we enlarge it to the min
+ * value that is still 0.
+ * depTh may not be larger than 63
+ * (fman->state->qmi_max_num_of_tnums-1).
+ */
+ if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
+ (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
+ deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
+ reg = ioread32be(&fman->qmi_regs->fmqm_gc);
+ reg &= ~QMI_CFG_DEQ_MASK;
+ reg |= deq_th;
+ iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
+ }
+ }
+
+ err = set_size_of_fifo(fman, port_params->port_id,
+ &port_params->size_of_fifo,
+ &port_params->extra_size_of_fifo);
+ if (err)
+ goto return_err;
+
+ err = set_num_of_open_dmas(fman, port_params->port_id,
+ &port_params->num_of_open_dmas,
+ &port_params->num_of_extra_open_dmas);
+ if (err)
+ goto return_err;
+
+ set_port_liodn(fman, port_id, fman->liodn_base[port_id],
+ fman->liodn_offset[port_id]);
+
+ if (fman->state->rev_info.major < 6)
+ set_port_order_restoration(fman->fpm_regs, port_id);
+
+ mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+
+ if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
+ fman->state->port_mfl[mac_id] = port_params->max_frame_length;
+ } else {
+ dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
+ __func__, port_id, mac_id);
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+
+ return 0;
+
+return_err:
+ spin_unlock_irqrestore(&fman->spinlock, flags);
+ return err;
+}
+EXPORT_SYMBOL(fman_set_port_params);
+
+/**
+ * fman_reset_mac
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_reset_mac(struct fman *fman, u8 mac_id)
+{
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u32 msk, timeout = 100;
+
+ if (fman->state->rev_info.major >= 6) {
+ dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Get the relevant bit mask */
+ switch (mac_id) {
+ case 0:
+ msk = FPM_RSTC_MAC0_RESET;
+ break;
+ case 1:
+ msk = FPM_RSTC_MAC1_RESET;
+ break;
+ case 2:
+ msk = FPM_RSTC_MAC2_RESET;
+ break;
+ case 3:
+ msk = FPM_RSTC_MAC3_RESET;
+ break;
+ case 4:
+ msk = FPM_RSTC_MAC4_RESET;
+ break;
+ case 5:
+ msk = FPM_RSTC_MAC5_RESET;
+ break;
+ case 6:
+ msk = FPM_RSTC_MAC6_RESET;
+ break;
+ case 7:
+ msk = FPM_RSTC_MAC7_RESET;
+ break;
+ case 8:
+ msk = FPM_RSTC_MAC8_RESET;
+ break;
+ case 9:
+ msk = FPM_RSTC_MAC9_RESET;
+ break;
+ default:
+ dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
+ __func__, mac_id);
+ return -EINVAL;
+ }
+
+ /* reset */
+ iowrite32be(msk, &fpm_rg->fm_rstc);
+ while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
+ udelay(10);
+
+ if (!timeout)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_reset_mac);
+
+/**
+ * fman_set_mac_max_frame
+ * @fman: A Pointer to FMan device
+ * @mac_id: MAC id
+ * @mfl: Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
+{
+ /* if port is already initialized, check that MaxFrameLength is smaller
+ * or equal to the port's max
+ */
+ if ((!fman->state->port_mfl[mac_id]) ||
+ (mfl <= fman->state->port_mfl[mac_id])) {
+ fman->state->mac_mfl[mac_id] = mfl;
+ } else {
+ dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(fman_set_mac_max_frame);
+
+/**
+ * fman_get_clock_freq
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
+u16 fman_get_clock_freq(struct fman *fman)
+{
+ return fman->state->fm_clk_freq;
+}
+
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
+u32 fman_get_bmi_max_fifo_size(struct fman *fman)
+{
+ return fman->state->bmi_max_fifo_size;
+}
+EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
+
+/**
+ * fman_get_revision
+ * @fman: - Pointer to the FMan module
+ * @rev_info: - A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
+ rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
+ FPM_REV1_MAJOR_SHIFT);
+ rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
+}
+EXPORT_SYMBOL(fman_get_revision);
+
+/**
+ * fman_get_qman_channel_id
+ * @fman: A Pointer to FMan device
+ * @port_id: Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
+{
+ int i;
+
+ if (fman->state->rev_info.major >= 6) {
+ static const u32 port_ids[] = {
+ 0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7
+ };
+
+ for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+ if (port_ids[i] == port_id)
+ break;
+ }
+ } else {
+ static const u32 port_ids[] = {
+ 0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
+ 0x2, 0x3, 0x4, 0x5, 0x7, 0x7
+ };
+
+ for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+ if (port_ids[i] == port_id)
+ break;
+ }
+ }
+
+ if (i == fman->state->num_of_qman_channels)
+ return 0;
+
+ return fman->state->qman_channel_base + i;
+}
+EXPORT_SYMBOL(fman_get_qman_channel_id);
+
+/**
+ * fman_get_mem_region
+ * @fman: A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
+struct resource *fman_get_mem_region(struct fman *fman)
+{
+ return fman->state->res;
+}
+EXPORT_SYMBOL(fman_get_mem_region);
+
+/* Bootargs defines */
+/* Extra headroom for RX buffers - Default, min and max */
+#define FSL_FM_RX_EXTRA_HEADROOM 64
+#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
+#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
+
+/* Maximum frame length */
+#define FSL_FM_MAX_FRAME_SIZE 1522
+#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600
+#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64
+
+/* Extra headroom for Rx buffers.
+ * FMan is instructed to allocate, on the Rx path, this amount of
+ * space at the beginning of a data buffer, beside the DPA private
+ * data area and the IC fields.
+ * Does not impact Tx buffer layout.
+ * Configurable from bootargs. 64 by default, it's needed on
+ * particular forwarding scenarios that add extra headers to the
+ * forwarded frame.
+ */
+static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+module_param(fsl_fm_rx_extra_headroom, int, 0);
+MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
+
+/* Max frame size, across all interfaces.
+ * Configurable from bootargs, to avoid allocating oversized (socket)
+ * buffers when not using jumbo frames.
+ * Must be large enough to accommodate the network MTU, but small enough
+ * to avoid wasting skb memory.
+ */
+static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+module_param(fsl_fm_max_frm, int, 0);
+MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
+u16 fman_get_max_frm(void)
+{
+ static bool fm_check_mfl;
+
+ if (!fm_check_mfl) {
+ if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
+ fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
+ pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+ fsl_fm_max_frm,
+ FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
+ FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
+ FSL_FM_MAX_FRAME_SIZE);
+ fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+ }
+ fm_check_mfl = true;
+ }
+
+ return fsl_fm_max_frm;
+}
+EXPORT_SYMBOL(fman_get_max_frm);
+
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
+int fman_get_rx_extra_headroom(void)
+{
+ static bool fm_check_rx_extra_headroom;
+
+ if (!fm_check_rx_extra_headroom) {
+ if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
+ fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
+ pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+ fsl_fm_rx_extra_headroom,
+ FSL_FM_RX_EXTRA_HEADROOM_MIN,
+ FSL_FM_RX_EXTRA_HEADROOM_MAX,
+ FSL_FM_RX_EXTRA_HEADROOM);
+ fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+ }
+
+ fm_check_rx_extra_headroom = true;
+ fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
+ }
+
+ return fsl_fm_rx_extra_headroom;
+}
+EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+
+/**
+ * fman_bind
+ * @fm_dev: FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
+struct fman *fman_bind(struct device *fm_dev)
+{
+ return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
+}
+EXPORT_SYMBOL(fman_bind);
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void)
+{
+ return fman_has_err_a050385;
+}
+EXPORT_SYMBOL(fman_has_errata_a050385);
+#endif
+
+static irqreturn_t fman_err_irq(int irq, void *handle)
+{
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
+
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* error interrupts */
+ pending = ioread32be(&fpm_rg->fm_epi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & ERR_INTR_EN_BMI) {
+ single_ret = bmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_QMI) {
+ single_ret = qmi_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_FPM) {
+ single_ret = fpm_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_DMA) {
+ single_ret = dma_err_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MURAM) {
+ single_ret = muram_err_intr(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC error interrupts */
+ if (pending & ERR_INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & ERR_INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t fman_irq(int irq, void *handle)
+{
+ struct fman *fman = (struct fman *)handle;
+ u32 pending;
+ struct fman_fpm_regs __iomem *fpm_rg;
+ irqreturn_t single_ret, ret = IRQ_NONE;
+
+ if (!is_init_done(fman->cfg))
+ return IRQ_NONE;
+
+ fpm_rg = fman->fpm_regs;
+
+ /* normal interrupts */
+ pending = ioread32be(&fpm_rg->fm_npi);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & INTR_EN_QMI) {
+ single_ret = qmi_event(fman);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ /* MAC interrupts */
+ if (pending & INTR_EN_MAC0) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC1) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC2) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC3) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC4) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC5) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC6) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC7) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC8) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ if (pending & INTR_EN_MAC9) {
+ single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+ if (single_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id fman_muram_match[] = {
+ {
+ .compatible = "fsl,fman-muram"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, fman_muram_match);
+
+static struct fman *read_dts_node(struct platform_device *of_dev)
+{
+ struct fman *fman;
+ struct device_node *fm_node, *muram_node;
+ struct resource *res;
+ u32 val, range[2];
+ int err, irq;
+ struct clk *clk;
+ u32 clk_rate;
+ phys_addr_t phys_base_addr;
+ resource_size_t mem_size;
+
+ fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+ if (!fman)
+ return ERR_PTR(-ENOMEM);
+
+ fm_node = of_node_get(of_dev->dev.of_node);
+
+ err = of_property_read_u32(fm_node, "cell-index", &val);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read cell-index for %pOF\n",
+ __func__, fm_node);
+ goto fman_node_put;
+ }
+ fman->dts_params.id = (u8)val;
+
+ /* Get the FM interrupt */
+ err = platform_get_irq(of_dev, 0);
+ if (err < 0)
+ goto fman_node_put;
+ irq = err;
+
+ /* Get the FM error interrupt */
+ err = platform_get_irq(of_dev, 1);
+ if (err < 0)
+ goto fman_node_put;
+ fman->dts_params.err_irq = err;
+
+ /* Get the FM address */
+ res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
+ __func__);
+ goto fman_node_put;
+ }
+
+ phys_base_addr = res->start;
+ mem_size = resource_size(res);
+
+ clk = of_clk_get(fm_node, 0);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
+ __func__, fman->dts_params.id);
+ goto fman_node_put;
+ }
+
+ clk_rate = clk_get_rate(clk);
+ if (!clk_rate) {
+ err = -EINVAL;
+ dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
+ __func__, fman->dts_params.id);
+ goto fman_node_put;
+ }
+ /* Rounding to MHz */
+ fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
+
+ err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+ &range[0], 2);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %pOF\n",
+ __func__, fm_node);
+ goto fman_node_put;
+ }
+ fman->dts_params.qman_channel_base = range[0];
+ fman->dts_params.num_of_qman_channels = range[1];
+
+ /* Get the MURAM base address and size */
+ muram_node = of_find_matching_node(fm_node, fman_muram_match);
+ if (!muram_node) {
+ err = -EINVAL;
+ dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+ __func__);
+ goto fman_free;
+ }
+
+ err = of_address_to_resource(muram_node, 0,
+ &fman->dts_params.muram_res);
+ if (err) {
+ of_node_put(muram_node);
+ dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+ __func__, err);
+ goto fman_free;
+ }
+
+ of_node_put(muram_node);
+
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
+ "fman", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, irq, err);
+ goto fman_free;
+ }
+
+ if (fman->dts_params.err_irq != 0) {
+ err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
+ fman_err_irq, IRQF_SHARED,
+ "fman-err", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, fman->dts_params.err_irq, err);
+ goto fman_free;
+ }
+ }
+
+ fman->dts_params.res =
+ devm_request_mem_region(&of_dev->dev, phys_base_addr,
+ mem_size, "fman");
+ if (!fman->dts_params.res) {
+ err = -EBUSY;
+ dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
+ __func__);
+ goto fman_free;
+ }
+
+ fman->dts_params.base_addr =
+ devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
+ if (!fman->dts_params.base_addr) {
+ err = -ENOMEM;
+ dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
+ goto fman_free;
+ }
+
+ fman->dev = &of_dev->dev;
+
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
+ goto fman_free;
+ }
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ fman_has_err_a050385 =
+ of_property_read_bool(fm_node, "fsl,erratum-a050385");
+#endif
+
+ return fman;
+
+fman_node_put:
+ of_node_put(fm_node);
+fman_free:
+ kfree(fman);
+ return ERR_PTR(err);
+}
+
+static int fman_probe(struct platform_device *of_dev)
+{
+ struct fman *fman;
+ struct device *dev;
+ int err;
+
+ dev = &of_dev->dev;
+
+ fman = read_dts_node(of_dev);
+ if (IS_ERR(fman))
+ return PTR_ERR(fman);
+
+ err = fman_config(fman);
+ if (err) {
+ dev_err(dev, "%s: FMan config failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (fman_init(fman) != 0) {
+ dev_err(dev, "%s: FMan init failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (fman->dts_params.err_irq == 0) {
+ fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
+ fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
+ fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
+ fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
+ fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
+ fman_set_exception(fman,
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
+ fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
+ fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
+ false);
+ fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
+ fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
+ }
+
+ dev_set_drvdata(dev, fman);
+
+ dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
+
+ return 0;
+}
+
+static const struct of_device_id fman_match[] = {
+ {
+ .compatible = "fsl,fman"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, fman_match);
+
+static struct platform_driver fman_driver = {
+ .driver = {
+ .name = "fsl-fman",
+ .of_match_table = fman_match,
+ },
+ .probe = fman_probe,
+};
+
+static int __init fman_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_load);
+
+static void __exit fman_unload(void)
+{
+ platform_driver_unregister(&fman_driver);
+}
+module_exit(fman_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
new file mode 100644
index 000000000..2ea575a46
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
+ */
+
+#ifndef __FM_H
+#define __FM_H
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+/* FM Frame descriptor macros */
+/* Frame queue Context Override */
+#define FM_FD_CMD_FCO 0x80000000
+#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
+#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
+
+/* TX-Port: Unsupported Format */
+#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000
+/* TX Port: Length Error */
+#define FM_FD_ERR_LENGTH 0x02000000
+#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */
+
+/* IPR frame (not error) */
+#define FM_FD_IPR 0x00000001
+/* IPR non-consistent-sp */
+#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR)
+/* IPR error */
+#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR)
+/* IPR timeout */
+#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR)
+/* TX Port: Length Error */
+#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
+
+/* Rx FIFO overflow, FCS error, code error, running disparity error
+ * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
+ * PHY error control character detected.
+ */
+#define FM_FD_ERR_PHYSICAL 0x00080000
+/* Frame too long OR Frame size exceeds max_length_frame */
+#define FM_FD_ERR_SIZE 0x00040000
+/* classification discard */
+#define FM_FD_ERR_CLS_DISCARD 0x00020000
+/* Extract Out of Frame */
+#define FM_FD_ERR_EXTRACTION 0x00008000
+/* No Scheme Selected */
+#define FM_FD_ERR_NO_SCHEME 0x00004000
+/* Keysize Overflow */
+#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000
+/* Frame color is red */
+#define FM_FD_ERR_COLOR_RED 0x00000800
+/* Frame color is yellow */
+#define FM_FD_ERR_COLOR_YELLOW 0x00000400
+/* Parser Time out Exceed */
+#define FM_FD_ERR_PRS_TIMEOUT 0x00000080
+/* Invalid Soft Parser instruction */
+#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040
+/* Header error was identified during parsing */
+#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
+/* Frame parsed beyind 256 first bytes */
+#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
+
+/* non Frame-Manager error */
+#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000
+
+/* FMan driver defines */
+#define FMAN_BMI_FIFO_UNITS 0x100
+#define OFFSET_UNITS 16
+
+/* BMan defines */
+#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+
+struct fman; /* FMan data */
+
+/* Enum for defining port types */
+enum fman_port_type {
+ FMAN_PORT_TYPE_TX = 0, /* TX Port */
+ FMAN_PORT_TYPE_RX, /* RX Port */
+};
+
+struct fman_rev_info {
+ u8 major; /* Major revision */
+ u8 minor; /* Minor revision */
+};
+
+enum fman_exceptions {
+ FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */
+ FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */
+ FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */
+ FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */
+ FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */
+ FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */
+ FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */
+ FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */
+ FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
+ FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
+ FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
+ FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */
+ FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
+ FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
+ FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */
+ FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */
+ FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */
+};
+
+/* Parse results memory layout */
+struct fman_prs_result {
+ u8 lpid; /* Logical port id */
+ u8 shimr; /* Shim header result */
+ __be16 l2r; /* Layer 2 result */
+ __be16 l3r; /* Layer 3 result */
+ u8 l4r; /* Layer 4 result */
+ u8 cplan; /* Classification plan id */
+ __be16 nxthdr; /* Next Header */
+ __be16 cksum; /* Running-sum */
+ /* Flags&fragment-offset field of the last IP-header */
+ __be16 flags_frag_off;
+ /* Routing type field of a IPV6 routing extension header */
+ u8 route_type;
+ /* Routing Extension Header Present; last bit is IP valid */
+ u8 rhp_ip_valid;
+ u8 shim_off[2]; /* Shim offset */
+ u8 ip_pid_off; /* IP PID (last IP-proto) offset */
+ u8 eth_off; /* ETH offset */
+ u8 llc_snap_off; /* LLC_SNAP offset */
+ u8 vlan_off[2]; /* VLAN offset */
+ u8 etype_off; /* ETYPE offset */
+ u8 pppoe_off; /* PPP offset */
+ u8 mpls_off[2]; /* MPLS offset */
+ u8 ip_off[2]; /* IP offset */
+ u8 gre_off; /* GRE offset */
+ u8 l4_off; /* Layer 4 offset */
+ u8 nxthdr_off; /* Parser end point */
+};
+
+/* A structure for defining buffer prefix area content. */
+struct fman_buffer_prefix_content {
+ /* Number of bytes to be left at the beginning of the external
+ * buffer; Note that the private-area will start from the base
+ * of the buffer address.
+ */
+ u16 priv_data_size;
+ /* true to pass the parse result to/from the FM;
+ * User may use FM_PORT_GetBufferPrsResult() in
+ * order to get the parser-result from a buffer.
+ */
+ bool pass_prs_result;
+ /* true to pass the timeStamp to/from the FM User */
+ bool pass_time_stamp;
+ /* true to pass the KG hash result to/from the FM User may
+ * use FM_PORT_GetBufferHashResult() in order to get the
+ * parser-result from a buffer.
+ */
+ bool pass_hash_result;
+ /* Add all other Internal-Context information: AD,
+ * hash-result, key, etc.
+ */
+ u16 data_align;
+};
+
+/* A structure of information about each of the external
+ * buffer pools used by a port or storage-profile.
+ */
+struct fman_ext_pool_params {
+ u8 id; /* External buffer pool id */
+ u16 size; /* External buffer pool buffer size */
+};
+
+/* A structure for informing the driver about the external
+ * buffer pools allocated in the BM and used by a port or a
+ * storage-profile.
+ */
+struct fman_ext_pools {
+ u8 num_of_pools_used; /* Number of pools use by this port */
+ struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /* Parameters for each port */
+};
+
+/* A structure for defining BM pool depletion criteria */
+struct fman_buf_pool_depletion {
+ /* select mode in which pause frames will be sent after a
+ * number of pools (all together!) are depleted
+ */
+ bool pools_grp_mode_enable;
+ /* the number of depleted pools that will invoke pause
+ * frames transmission.
+ */
+ u8 num_of_pools;
+ /* For each pool, true if it should be considered for
+ * depletion (Note - this pool must be used by this port!).
+ */
+ bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
+ /* select mode in which pause frames will be sent
+ * after a single-pool is depleted;
+ */
+ bool single_pool_mode_enable;
+ /* For each pool, true if it should be considered
+ * for depletion (Note - this pool must be used by this port!)
+ */
+ bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
+};
+
+/* Enum for inter-module interrupts registration */
+enum fman_event_modules {
+ FMAN_MOD_MAC = 0, /* MAC event */
+ FMAN_MOD_FMAN_CTRL, /* FMAN Controller */
+ FMAN_MOD_DUMMY_LAST
+};
+
+/* Enum for interrupts types */
+enum fman_intr_type {
+ FMAN_INTR_TYPE_ERR,
+ FMAN_INTR_TYPE_NORMAL
+};
+
+/* Enum for inter-module interrupts registration */
+enum fman_inter_module_event {
+ FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */
+ FMAN_EV_ERR_MAC1, /* MAC 1 error event */
+ FMAN_EV_ERR_MAC2, /* MAC 2 error event */
+ FMAN_EV_ERR_MAC3, /* MAC 3 error event */
+ FMAN_EV_ERR_MAC4, /* MAC 4 error event */
+ FMAN_EV_ERR_MAC5, /* MAC 5 error event */
+ FMAN_EV_ERR_MAC6, /* MAC 6 error event */
+ FMAN_EV_ERR_MAC7, /* MAC 7 error event */
+ FMAN_EV_ERR_MAC8, /* MAC 8 error event */
+ FMAN_EV_ERR_MAC9, /* MAC 9 error event */
+ FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */
+ FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */
+ FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */
+ FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */
+ FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */
+ FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */
+ FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */
+ FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */
+ FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */
+ FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */
+ FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */
+ FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */
+ FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */
+ FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */
+ FMAN_EV_CNT
+};
+
+struct fman_intr_src {
+ void (*isr_cb)(void *src_arg);
+ void *src_handle;
+};
+
+/** fman_exceptions_cb
+ * fman - Pointer to FMan
+ * exception - The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ *
+ * Return: irq status
+ */
+typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+ enum fman_exceptions exception);
+/** fman_bus_error_cb
+ * fman - Pointer to FMan
+ * port_id - Port id
+ * addr - Address that caused the error
+ * tnum - Owner of error
+ * liodn - Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ *
+ * Return: IRQ status
+ */
+typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+ u64 addr, u8 tnum, u16 liodn);
+
+/* Structure that holds information received from device tree */
+struct fman_dts_params {
+ void __iomem *base_addr; /* FMan virtual address */
+ struct resource *res; /* FMan memory resource */
+ u8 id; /* FMan ID */
+
+ int err_irq; /* FMan Error IRQ */
+
+ u16 clk_freq; /* FMan clock freq (In Mhz) */
+
+ u32 qman_channel_base; /* QMan channels base */
+ u32 num_of_qman_channels; /* Number of QMan channels */
+
+ struct resource muram_res; /* MURAM resource */
+};
+
+struct fman {
+ struct device *dev;
+ void __iomem *base_addr;
+ struct fman_intr_src intr_mng[FMAN_EV_CNT];
+
+ struct fman_fpm_regs __iomem *fpm_regs;
+ struct fman_bmi_regs __iomem *bmi_regs;
+ struct fman_qmi_regs __iomem *qmi_regs;
+ struct fman_dma_regs __iomem *dma_regs;
+ struct fman_hwp_regs __iomem *hwp_regs;
+ struct fman_kg_regs __iomem *kg_regs;
+ fman_exceptions_cb *exception_cb;
+ fman_bus_error_cb *bus_error_cb;
+ /* Spinlock for FMan use */
+ spinlock_t spinlock;
+ struct fman_state_struct *state;
+
+ struct fman_cfg *cfg;
+ struct muram_info *muram;
+ struct fman_keygen *keygen;
+ /* cam section in muram */
+ unsigned long cam_offset;
+ size_t cam_size;
+ /* Fifo in MURAM */
+ unsigned long fifo_offset;
+ size_t fifo_size;
+
+ u32 liodn_base[64];
+ u32 liodn_offset[64];
+
+ struct fman_dts_params dts_params;
+};
+
+/* Structure for port-FM communication during fman_port_init. */
+struct fman_port_init_params {
+ u8 port_id; /* port Id */
+ enum fman_port_type port_type; /* Port type */
+ u16 port_speed; /* Port speed */
+ u16 liodn_offset; /* Port's requested resource */
+ u8 num_of_tasks; /* Port's requested resource */
+ u8 num_of_extra_tasks; /* Port's requested resource */
+ u8 num_of_open_dmas; /* Port's requested resource */
+ u8 num_of_extra_open_dmas; /* Port's requested resource */
+ u32 size_of_fifo; /* Port's requested resource */
+ u32 extra_size_of_fifo; /* Port's requested resource */
+ u8 deq_pipeline_depth; /* Port's requested resource */
+ u16 max_frame_length; /* Port's max frame length. */
+ u16 liodn_base;
+ /* LIODN base for this port, to be used together with LIODN offset. */
+};
+
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
+
+void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
+ u8 mod_id, enum fman_intr_type intr_type,
+ void (*f_isr)(void *h_src_arg), void *h_src_arg);
+
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
+ u8 mod_id, enum fman_intr_type intr_type);
+
+int fman_set_port_params(struct fman *fman,
+ struct fman_port_init_params *port_params);
+
+int fman_reset_mac(struct fman *fman, u8 mac_id);
+
+u16 fman_get_clock_freq(struct fman *fman);
+
+u32 fman_get_bmi_max_fifo_size(struct fman *fman);
+
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
+
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
+
+struct resource *fman_get_mem_region(struct fman *fman);
+
+u16 fman_get_max_frm(void);
+
+int fman_get_rx_extra_headroom(void);
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void);
+#endif
+
+struct fman *fman_bind(struct device *dev);
+
+#endif /* __FM_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
new file mode 100644
index 000000000..6617932fd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -0,0 +1,1538 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_dtsec.h"
+#include "fman.h"
+#include "mac.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/of_mdio.h>
+#include <linux/mii.h>
+
+/* TBI register addresses */
+#define MII_TBICON 0x11
+
+/* TBICON register bit fields */
+#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
+#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
+#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
+#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
+#define TBICON_CLK_SELECT 0x0020 /* Clock select */
+#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
+
+#define TBIANA_SGMII 0x4001
+#define TBIANA_1000X 0x01a0
+
+/* Interrupt Mask Register (IMASK) */
+#define DTSEC_IMASK_BREN 0x80000000
+#define DTSEC_IMASK_RXCEN 0x40000000
+#define DTSEC_IMASK_MSROEN 0x04000000
+#define DTSEC_IMASK_GTSCEN 0x02000000
+#define DTSEC_IMASK_BTEN 0x01000000
+#define DTSEC_IMASK_TXCEN 0x00800000
+#define DTSEC_IMASK_TXEEN 0x00400000
+#define DTSEC_IMASK_LCEN 0x00040000
+#define DTSEC_IMASK_CRLEN 0x00020000
+#define DTSEC_IMASK_XFUNEN 0x00010000
+#define DTSEC_IMASK_ABRTEN 0x00008000
+#define DTSEC_IMASK_IFERREN 0x00004000
+#define DTSEC_IMASK_MAGEN 0x00000800
+#define DTSEC_IMASK_MMRDEN 0x00000400
+#define DTSEC_IMASK_MMWREN 0x00000200
+#define DTSEC_IMASK_GRSCEN 0x00000100
+#define DTSEC_IMASK_TDPEEN 0x00000002
+#define DTSEC_IMASK_RDPEEN 0x00000001
+
+#define DTSEC_EVENTS_MASK \
+ ((u32)(DTSEC_IMASK_BREN | \
+ DTSEC_IMASK_RXCEN | \
+ DTSEC_IMASK_BTEN | \
+ DTSEC_IMASK_TXCEN | \
+ DTSEC_IMASK_TXEEN | \
+ DTSEC_IMASK_ABRTEN | \
+ DTSEC_IMASK_LCEN | \
+ DTSEC_IMASK_CRLEN | \
+ DTSEC_IMASK_XFUNEN | \
+ DTSEC_IMASK_IFERREN | \
+ DTSEC_IMASK_MAGEN | \
+ DTSEC_IMASK_TDPEEN | \
+ DTSEC_IMASK_RDPEEN))
+
+/* dtsec timestamp event bits */
+#define TMR_PEMASK_TSREEN 0x00010000
+#define TMR_PEVENT_TSRE 0x00010000
+
+/* Group address bit indication */
+#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
+
+/* Defaults */
+#define DEFAULT_HALFDUP_RETRANSMIT 0xf
+#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
+#define DEFAULT_TX_PAUSE_TIME 0xf000
+#define DEFAULT_RX_PREPEND 0
+#define DEFAULT_PREAMBLE_LEN 7
+#define DEFAULT_TX_PAUSE_TIME_EXTD 0
+#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
+#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
+#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
+#define DEFAULT_BACK_TO_BACK_IPG 0x60
+#define DEFAULT_MAXIMUM_FRAME 0x600
+
+/* register related defines (bits, field offsets..) */
+#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
+
+#define DTSEC_ECNTRL_GMIIM 0x00000040
+#define DTSEC_ECNTRL_TBIM 0x00000020
+#define DTSEC_ECNTRL_SGMIIM 0x00000002
+#define DTSEC_ECNTRL_RPM 0x00000010
+#define DTSEC_ECNTRL_R100M 0x00000008
+#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+
+#define TCTRL_TTSE 0x00000040
+#define TCTRL_GTS 0x00000020
+
+#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_PAL_SHIFT 16
+#define RCTRL_GHTX 0x00000400
+#define RCTRL_RTSE 0x00000040
+#define RCTRL_GRS 0x00000020
+#define RCTRL_MPROM 0x00000008
+#define RCTRL_RSF 0x00000004
+#define RCTRL_UPROM 0x00000001
+
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_TX_EN 0x00000001
+#define MACCFG1_RX_EN 0x00000004
+
+#define MACCFG2_NIBBLE_MODE 0x00000100
+#define MACCFG2_BYTE_MODE 0x00000200
+#define MACCFG2_PAD_CRC_EN 0x00000004
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
+#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
+#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
+#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
+#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
+
+#define HAFDUP_EXCESS_DEFER 0x00010000
+#define HAFDUP_COLLISION_WINDOW 0x000003ff
+#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
+#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
+
+#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
+
+#define PTV_PTE_MASK 0xffff0000
+#define PTV_PT_MASK 0x0000ffff
+#define PTV_PTE_SHIFT 16
+
+#define MAX_PACKET_ALIGNMENT 31
+#define MAX_INTER_PACKET_GAP 0x7f
+#define MAX_RETRANSMISSION 0x0f
+#define MAX_COLLISION_WINDOW 0x03ff
+
+/* Hash table size (32 bits*8 regs) */
+#define DTSEC_HASH_TABLE_SIZE 256
+/* Extended Hash table size (32 bits*16 regs) */
+#define EXTENDED_HASH_TABLE_SIZE 512
+
+/* dTSEC Memory Map registers */
+struct dtsec_regs {
+ /* dTSEC General Control and Status Registers */
+ u32 tsec_id; /* 0x000 ETSEC_ID register */
+ u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
+ u32 ievent; /* 0x008 Interrupt event register */
+ u32 imask; /* 0x00C Interrupt mask register */
+ u32 reserved0010[1];
+ u32 ecntrl; /* 0x014 E control register */
+ u32 ptv; /* 0x018 Pause time value register */
+ u32 tbipa; /* 0x01C TBI PHY address register */
+ u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
+ u32 tmr_pevent; /* 0x024 Time-stamp event register */
+ u32 tmr_pemask; /* 0x028 Timer event mask register */
+ u32 reserved002c[5];
+ u32 tctrl; /* 0x040 Transmit control register */
+ u32 reserved0044[3];
+ u32 rctrl; /* 0x050 Receive control register */
+ u32 reserved0054[11];
+ u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
+ u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
+ u32 reserved00c0[16];
+ u32 maccfg1; /* 0x100 MAC configuration #1 */
+ u32 maccfg2; /* 0x104 MAC configuration #2 */
+ u32 ipgifg; /* 0x108 IPG/IFG */
+ u32 hafdup; /* 0x10C Half-duplex */
+ u32 maxfrm; /* 0x110 Maximum frame */
+ u32 reserved0114[10];
+ u32 ifstat; /* 0x13C Interface status */
+ u32 macstnaddr1; /* 0x140 Station Address,part 1 */
+ u32 macstnaddr2; /* 0x144 Station Address,part 2 */
+ struct {
+ u32 exact_match1; /* octets 1-4 */
+ u32 exact_match2; /* octets 5-6 */
+ } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
+ u32 reserved01c0[16];
+ u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
+ u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
+ u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
+ u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
+ u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
+ u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
+ u32 trmgv;
+ /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
+ u32 rbyt; /* 0x21C receive byte counter */
+ u32 rpkt; /* 0x220 receive packet counter */
+ u32 rfcs; /* 0x224 receive FCS error counter */
+ u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
+ u32 rbca; /* 0x22C Rx broadcast packet counter */
+ u32 rxcf; /* 0x230 Rx control frame packet counter */
+ u32 rxpf; /* 0x234 Rx pause frame packet counter */
+ u32 rxuo; /* 0x238 Rx unknown OP code counter */
+ u32 raln; /* 0x23C Rx alignment error counter */
+ u32 rflr; /* 0x240 Rx frame length error counter */
+ u32 rcde; /* 0x244 Rx code error counter */
+ u32 rcse; /* 0x248 Rx carrier sense error counter */
+ u32 rund; /* 0x24C Rx undersize packet counter */
+ u32 rovr; /* 0x250 Rx oversize packet counter */
+ u32 rfrg; /* 0x254 Rx fragments counter */
+ u32 rjbr; /* 0x258 Rx jabber counter */
+ u32 rdrp; /* 0x25C Rx drop */
+ u32 tbyt; /* 0x260 Tx byte counter */
+ u32 tpkt; /* 0x264 Tx packet counter */
+ u32 tmca; /* 0x268 Tx multicast packet counter */
+ u32 tbca; /* 0x26C Tx broadcast packet counter */
+ u32 txpf; /* 0x270 Tx pause control frame counter */
+ u32 tdfr; /* 0x274 Tx deferral packet counter */
+ u32 tedf; /* 0x278 Tx excessive deferral packet counter */
+ u32 tscl; /* 0x27C Tx single collision packet counter */
+ u32 tmcl; /* 0x280 Tx multiple collision packet counter */
+ u32 tlcl; /* 0x284 Tx late collision packet counter */
+ u32 txcl; /* 0x288 Tx excessive collision packet counter */
+ u32 tncl; /* 0x28C Tx total collision counter */
+ u32 reserved0290[1];
+ u32 tdrp; /* 0x294 Tx drop frame counter */
+ u32 tjbr; /* 0x298 Tx jabber frame counter */
+ u32 tfcs; /* 0x29C Tx FCS error counter */
+ u32 txcf; /* 0x2A0 Tx control frame counter */
+ u32 tovr; /* 0x2A4 Tx oversize frame counter */
+ u32 tund; /* 0x2A8 Tx undersize frame counter */
+ u32 tfrg; /* 0x2AC Tx fragments frame counter */
+ u32 car1; /* 0x2B0 carry register one register* */
+ u32 car2; /* 0x2B4 carry register two register* */
+ u32 cam1; /* 0x2B8 carry register one mask register */
+ u32 cam2; /* 0x2BC carry register two mask register */
+ u32 reserved02c0[848];
+};
+
+/* struct dtsec_cfg - dTSEC configuration
+ * Transmit half-duplex flow control, under software control for 10/100-Mbps
+ * half-duplex media. If set, back pressure is applied to media by raising
+ * carrier.
+ * halfdup_retransmit:
+ * Number of retransmission attempts following a collision.
+ * If this is exceeded dTSEC aborts transmission due to excessive collisions.
+ * The standard specifies the attempt limit to be 15.
+ * halfdup_coll_window:
+ * The number of bytes of the frame during which collisions may occur.
+ * The default value of 55 corresponds to the frame byte at the end of the
+ * standard 512-bit slot time window. If collisions are detected after this
+ * byte, the late collision event is asserted and transmission of current
+ * frame is aborted.
+ * tx_pad_crc:
+ * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
+ * appends a CRC to every frame regardless of padding requirement.
+ * tx_pause_time:
+ * Transmit pause time value. This pause value is used as part of the pause
+ * frame to be sent when a transmit pause frame is initiated.
+ * If set to 0 this disables transmission of pause frames.
+ * preamble_len:
+ * Length, in bytes, of the preamble field preceding each Ethernet
+ * start-of-frame delimiter byte. The default value of 0x7 should be used in
+ * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
+ * rx_prepend:
+ * Packet alignment padding length. The specified number of bytes (1-31)
+ * of zero padding are inserted before the start of each received frame.
+ * For Ethernet, where optional preamble extraction is enabled, the padding
+ * appears before the preamble, otherwise the padding precedes the
+ * layer 2 header.
+ *
+ * This structure contains basic dTSEC configuration and must be passed to
+ * init() function. A default set of configuration values can be
+ * obtained by calling set_dflts().
+ */
+struct dtsec_cfg {
+ u16 halfdup_retransmit;
+ u16 halfdup_coll_window;
+ bool tx_pad_crc;
+ u16 tx_pause_time;
+ bool ptp_tsu_en;
+ bool ptp_exception_en;
+ u32 preamble_len;
+ u32 rx_prepend;
+ u16 tx_pause_time_extd;
+ u16 maximum_frame;
+ u32 non_back_to_back_ipg1;
+ u32 non_back_to_back_ipg2;
+ u32 min_ifg_enforcement;
+ u32 back_to_back_ipg;
+};
+
+struct fman_mac {
+ /* pointer to dTSEC memory mapped registers */
+ struct dtsec_regs __iomem *regs;
+ /* MAC address of device */
+ u64 addr;
+ /* Ethernet physical interface */
+ phy_interface_t phy_if;
+ u16 max_speed;
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* Number of individual addresses in registers for this station */
+ u8 num_of_ind_addr_in_regs;
+ /* pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ bool ptp_tsu_enabled;
+ bool en_tsu_err_exception;
+ struct dtsec_cfg *dtsec_drv_param;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool basex_if;
+ struct phy_device *tbiphy;
+};
+
+static void set_dflts(struct dtsec_cfg *cfg)
+{
+ cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
+ cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
+ cfg->tx_pad_crc = true;
+ cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
+ /* PHY address 0 is reserved (DPAA RM) */
+ cfg->rx_prepend = DEFAULT_RX_PREPEND;
+ cfg->ptp_tsu_en = true;
+ cfg->ptp_exception_en = true;
+ cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
+ cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
+ cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
+ cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
+ cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
+ cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
+ cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
+}
+
+static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
+{
+ u32 tmp;
+
+ tmp = (u32)((adr[5] << 24) |
+ (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+}
+
+static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
+ phy_interface_t iface, u16 iface_speed, u64 addr,
+ u32 exception_mask, u8 tbi_addr)
+{
+ bool is_rgmii, is_sgmii, is_qsgmii;
+ enet_addr_t eth_addr;
+ u32 tmp;
+ int i;
+
+ /* Soft reset */
+ iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
+ iowrite32be(0, &regs->maccfg1);
+
+ /* dtsec_id2 */
+ tmp = ioread32be(&regs->tsec_id2);
+
+ /* check RGMII support */
+ if (iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ iface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ iface == PHY_INTERFACE_MODE_RMII)
+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+ return -EINVAL;
+
+ if (iface == PHY_INTERFACE_MODE_SGMII ||
+ iface == PHY_INTERFACE_MODE_MII)
+ if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+ return -EINVAL;
+
+ is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ iface == PHY_INTERFACE_MODE_RGMII_TXID;
+ is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
+ is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
+
+ tmp = 0;
+ if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
+ tmp |= DTSEC_ECNTRL_GMIIM;
+ if (is_sgmii)
+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
+ if (is_qsgmii)
+ tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
+ DTSEC_ECNTRL_QSGMIIM);
+ if (is_rgmii)
+ tmp |= DTSEC_ECNTRL_RPM;
+ if (iface_speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+
+ iowrite32be(tmp, &regs->ecntrl);
+
+ tmp = 0;
+
+ if (cfg->tx_pause_time)
+ tmp |= cfg->tx_pause_time;
+ if (cfg->tx_pause_time_extd)
+ tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
+ iowrite32be(tmp, &regs->ptv);
+
+ tmp = 0;
+ tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
+ /* Accept short frames */
+ tmp |= RCTRL_RSF;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ /* Assign a Phy Address to the TBI (TBIPA).
+ * Done also in cases where TBI is not selected to avoid conflict with
+ * the external PHY's Physical address
+ */
+ iowrite32be(tbi_addr, &regs->tbipa);
+
+ iowrite32be(0, &regs->tmr_ctrl);
+
+ if (cfg->ptp_tsu_en) {
+ tmp = 0;
+ tmp |= TMR_PEVENT_TSRE;
+ iowrite32be(tmp, &regs->tmr_pevent);
+
+ if (cfg->ptp_exception_en) {
+ tmp = 0;
+ tmp |= TMR_PEMASK_TSREEN;
+ iowrite32be(tmp, &regs->tmr_pemask);
+ }
+ }
+
+ tmp = 0;
+ tmp |= MACCFG1_RX_FLOW;
+ tmp |= MACCFG1_TX_FLOW;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ tmp = 0;
+
+ if (iface_speed < SPEED_1000)
+ tmp |= MACCFG2_NIBBLE_MODE;
+ else if (iface_speed == SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+
+ tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
+ MACCFG2_PREAMBLE_LENGTH_MASK;
+ if (cfg->tx_pad_crc)
+ tmp |= MACCFG2_PAD_CRC_EN;
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
+ iowrite32be(tmp, &regs->maccfg2);
+
+ tmp = (((cfg->non_back_to_back_ipg1 <<
+ IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
+ & IPGIFG_NON_BACK_TO_BACK_IPG_1)
+ | ((cfg->non_back_to_back_ipg2 <<
+ IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
+ & IPGIFG_NON_BACK_TO_BACK_IPG_2)
+ | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
+ & IPGIFG_MIN_IFG_ENFORCEMENT)
+ | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
+ iowrite32be(tmp, &regs->ipgifg);
+
+ tmp = 0;
+ tmp |= HAFDUP_EXCESS_DEFER;
+ tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
+ & HAFDUP_RETRANSMISSION_MAX);
+ tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
+
+ iowrite32be(tmp, &regs->hafdup);
+
+ /* Initialize Maximum frame length */
+ iowrite32be(cfg->maximum_frame, &regs->maxfrm);
+
+ iowrite32be(0xffffffff, &regs->cam1);
+ iowrite32be(0xffffffff, &regs->cam2);
+
+ iowrite32be(exception_mask, &regs->imask);
+
+ iowrite32be(0xffffffff, &regs->ievent);
+
+ if (addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
+ set_mac_address(regs, (const u8 *)eth_addr);
+ }
+
+ /* HASH */
+ for (i = 0; i < NUM_OF_HASH_REGS; i++) {
+ /* Initialize IADDRx */
+ iowrite32be(0, &regs->igaddr[i]);
+ /* Initialize GADDRx */
+ iowrite32be(0, &regs->gaddr[i]);
+ }
+
+ return 0;
+}
+
+static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
+ bool enable)
+{
+ int reg_idx = (bucket >> 5) & 0xf;
+ int bit_idx = bucket & 0x1f;
+ u32 bit_mask = 0x80000000 >> bit_idx;
+ u32 __iomem *reg;
+
+ if (reg_idx > 7)
+ reg = &regs->gaddr[reg_idx - 8];
+ else
+ reg = &regs->igaddr[reg_idx];
+
+ if (enable)
+ iowrite32be(ioread32be(reg) | bit_mask, reg);
+ else
+ iowrite32be(ioread32be(reg) & (~bit_mask), reg);
+}
+
+static int check_init_parameters(struct fman_mac *dtsec)
+{
+ if (dtsec->max_speed >= SPEED_10000) {
+ pr_err("1G MAC driver supports 1G or lower speeds\n");
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->rx_prepend >
+ MAX_PACKET_ALIGNMENT) {
+ pr_err("packetAlignmentPadding can't be > than %d\n",
+ MAX_PACKET_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
+ MAX_INTER_PACKET_GAP) ||
+ ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
+ MAX_INTER_PACKET_GAP) ||
+ ((dtsec->dtsec_drv_param)->back_to_back_ipg >
+ MAX_INTER_PACKET_GAP)) {
+ pr_err("Inter packet gap can't be greater than %d\n",
+ MAX_INTER_PACKET_GAP);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
+ MAX_RETRANSMISSION) {
+ pr_err("maxRetransmission can't be greater than %d\n",
+ MAX_RETRANSMISSION);
+ return -EINVAL;
+ }
+ if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
+ MAX_COLLISION_WINDOW) {
+ pr_err("collisionWindow can't be greater than %d\n",
+ MAX_COLLISION_WINDOW);
+ return -EINVAL;
+ /* If Auto negotiation process is disabled, need to set up the PHY
+ * using the MII Management Interface
+ */
+ }
+ if (!dtsec->exception_cb) {
+ pr_err("uninitialized exception_cb\n");
+ return -EINVAL;
+ }
+ if (!dtsec->event_cb) {
+ pr_err("uninitialized event_cb\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_1G_BAB_RX:
+ bit_mask = DTSEC_IMASK_BREN;
+ break;
+ case FM_MAC_EX_1G_RX_CTL:
+ bit_mask = DTSEC_IMASK_RXCEN;
+ break;
+ case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
+ bit_mask = DTSEC_IMASK_GTSCEN;
+ break;
+ case FM_MAC_EX_1G_BAB_TX:
+ bit_mask = DTSEC_IMASK_BTEN;
+ break;
+ case FM_MAC_EX_1G_TX_CTL:
+ bit_mask = DTSEC_IMASK_TXCEN;
+ break;
+ case FM_MAC_EX_1G_TX_ERR:
+ bit_mask = DTSEC_IMASK_TXEEN;
+ break;
+ case FM_MAC_EX_1G_LATE_COL:
+ bit_mask = DTSEC_IMASK_LCEN;
+ break;
+ case FM_MAC_EX_1G_COL_RET_LMT:
+ bit_mask = DTSEC_IMASK_CRLEN;
+ break;
+ case FM_MAC_EX_1G_TX_FIFO_UNDRN:
+ bit_mask = DTSEC_IMASK_XFUNEN;
+ break;
+ case FM_MAC_EX_1G_MAG_PCKT:
+ bit_mask = DTSEC_IMASK_MAGEN;
+ break;
+ case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
+ bit_mask = DTSEC_IMASK_MMRDEN;
+ break;
+ case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
+ bit_mask = DTSEC_IMASK_MMWREN;
+ break;
+ case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
+ bit_mask = DTSEC_IMASK_GRSCEN;
+ break;
+ case FM_MAC_EX_1G_DATA_ERR:
+ bit_mask = DTSEC_IMASK_TDPEEN;
+ break;
+ case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
+ bit_mask = DTSEC_IMASK_MSROEN;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
+{
+ /* Checks if dTSEC driver parameters were initialized */
+ if (!dtsec_drv_params)
+ return true;
+
+ return false;
+}
+
+static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return 0;
+
+ return (u16)ioread32be(&regs->maxfrm);
+}
+
+static void dtsec_isr(void *handle)
+{
+ struct fman_mac *dtsec = (struct fman_mac *)handle;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 event;
+
+ /* do not handle MDIO events */
+ event = ioread32be(&regs->ievent) &
+ (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
+
+ event &= ioread32be(&regs->imask);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & DTSEC_IMASK_BREN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
+ if (event & DTSEC_IMASK_RXCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
+ if (event & DTSEC_IMASK_GTSCEN)
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
+ if (event & DTSEC_IMASK_BTEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
+ if (event & DTSEC_IMASK_TXCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
+ if (event & DTSEC_IMASK_TXEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
+ if (event & DTSEC_IMASK_LCEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
+ if (event & DTSEC_IMASK_CRLEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
+ if (event & DTSEC_IMASK_XFUNEN) {
+ /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2) {
+ u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
+ /* a. Write 0x00E0_0C00 to DTSEC_ID
+ * This is a read only register
+ * b. Read and save the value of TPKT
+ */
+ tpkt1 = ioread32be(&regs->tpkt);
+
+ /* c. Read the register at dTSEC address offset 0x32C */
+ tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
+
+ /* d. Compare bits [9:15] to bits [25:31] of the
+ * register at address offset 0x32C.
+ */
+ if ((tmp_reg1 & 0x007F0000) !=
+ (tmp_reg1 & 0x0000007F)) {
+ /* If they are not equal, save the value of
+ * this register and wait for at least
+ * MAXFRM*16 ns
+ */
+ usleep_range((u32)(min
+ (dtsec_get_max_frame_length(dtsec) *
+ 16 / 1000, 1)), (u32)
+ (min(dtsec_get_max_frame_length
+ (dtsec) * 16 / 1000, 1) + 1));
+ }
+
+ /* e. Read and save TPKT again and read the register
+ * at dTSEC address offset 0x32C again
+ */
+ tpkt2 = ioread32be(&regs->tpkt);
+ tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
+
+ /* f. Compare the value of TPKT saved in step b to
+ * value read in step e. Also compare bits [9:15] of
+ * the register at offset 0x32C saved in step d to the
+ * value of bits [9:15] saved in step e. If the two
+ * registers values are unchanged, then the transmit
+ * portion of the dTSEC controller is locked up and
+ * the user should proceed to the recover sequence.
+ */
+ if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
+ (tmp_reg2 & 0x007F0000))) {
+ /* recover sequence */
+
+ /* a.Write a 1 to RCTRL[GRS] */
+
+ iowrite32be(ioread32be(&regs->rctrl) |
+ RCTRL_GRS, &regs->rctrl);
+
+ /* b.Wait until IEVENT[GRSC]=1, or at least
+ * 100 us has elapsed.
+ */
+ for (i = 0; i < 100; i++) {
+ if (ioread32be(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ break;
+ udelay(1);
+ }
+ if (ioread32be(&regs->ievent) &
+ DTSEC_IMASK_GRSCEN)
+ iowrite32be(DTSEC_IMASK_GRSCEN,
+ &regs->ievent);
+ else
+ pr_debug("Rx lockup due to Tx lockup\n");
+
+ /* c.Write a 1 to bit n of FM_RSTC
+ * (offset 0x0CC of FPM)
+ */
+ fman_reset_mac(dtsec->fm, dtsec->mac_id);
+
+ /* d.Wait 4 Tx clocks (32 ns) */
+ udelay(1);
+
+ /* e.Write a 0 to bit n of FM_RSTC. */
+ /* cleared by FMAN
+ */
+ }
+ }
+
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
+ }
+ if (event & DTSEC_IMASK_MAGEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
+ if (event & DTSEC_IMASK_GRSCEN)
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
+ if (event & DTSEC_IMASK_TDPEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
+ if (event & DTSEC_IMASK_RDPEEN)
+ dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
+
+ /* masked interrupts */
+ WARN_ON(event & DTSEC_IMASK_ABRTEN);
+ WARN_ON(event & DTSEC_IMASK_IFERREN);
+}
+
+static void dtsec_1588_isr(void *handle)
+{
+ struct fman_mac *dtsec = (struct fman_mac *)handle;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 event;
+
+ if (dtsec->ptp_tsu_enabled) {
+ event = ioread32be(&regs->tmr_pevent);
+ event &= ioread32be(&regs->tmr_pemask);
+
+ if (event) {
+ iowrite32be(event, &regs->tmr_pevent);
+ WARN_ON(event & TMR_PEVENT_TSRE);
+ dtsec->exception_cb(dtsec->dev_id,
+ FM_MAC_EX_1G_1588_TS_RX_ERR);
+ }
+ }
+}
+
+static void free_init_resources(struct fman_mac *dtsec)
+{
+ fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_ERR);
+ fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_NORMAL);
+
+ /* release the driver's group hash table */
+ free_hash_table(dtsec->multicast_addr_hash);
+ dtsec->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(dtsec->unicast_addr_hash);
+ dtsec->unicast_addr_hash = NULL;
+}
+
+static void graceful_start(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
+}
+
+static void graceful_stop(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ /* Graceful stop - Assert the graceful Rx stop bit */
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
+
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
+ }
+
+ /* Graceful stop - Assert the graceful Tx stop bit */
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
+
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
+ }
+}
+
+static int dtsec_enable(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Enable */
+ tmp = ioread32be(&regs->maccfg1);
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ /* Graceful start - clear the graceful Rx/Tx stop bit */
+ graceful_start(dtsec);
+
+ return 0;
+}
+
+static void dtsec_disable(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
+
+ /* Graceful stop - Assert the graceful Rx/Tx stop bit */
+ graceful_stop(dtsec);
+
+ tmp = ioread32be(&regs->maccfg1);
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ iowrite32be(tmp, &regs->maccfg1);
+}
+
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 ptv = 0;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ graceful_stop(dtsec);
+
+ if (pause_time) {
+ /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
+ pr_warn("pause-time: %d illegal.Should be > 320\n",
+ pause_time);
+ return -EINVAL;
+ }
+
+ ptv = ioread32be(&regs->ptv);
+ ptv &= PTV_PTE_MASK;
+ ptv |= pause_time & PTV_PT_MASK;
+ iowrite32be(ptv, &regs->ptv);
+
+ /* trigger the transmission of a flow-control pause frame */
+ iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
+ &regs->maccfg1);
+ } else
+ iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
+ &regs->maccfg1);
+
+ graceful_start(dtsec);
+
+ return 0;
+}
+
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ graceful_stop(dtsec);
+
+ tmp = ioread32be(&regs->maccfg1);
+ if (en)
+ tmp |= MACCFG1_RX_FLOW;
+ else
+ tmp &= ~MACCFG1_RX_FLOW;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ graceful_start(dtsec);
+
+ return 0;
+}
+
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
+{
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ graceful_stop(dtsec);
+
+ /* Initialize MAC Station Address registers (1 & 2)
+ * Station address have to be swapped (big endian to little endian
+ */
+ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
+ set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
+
+ graceful_start(dtsec);
+
+ return 0;
+}
+
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct eth_hash_entry *hash_entry;
+ u64 addr;
+ s32 bucket;
+ u32 crc = 0xFFFFFFFF;
+ bool mcast, ghtx;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+ mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+ /* Cannot handle unicast mac addr when GHTX is on */
+ if (ghtx && !mcast) {
+ pr_err("Could not compute hash bucket\n");
+ return -EINVAL;
+ }
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+
+ /* considering the 9 highest order bits in crc H[8:0]:
+ *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
+ *and H[5:1] (next 5 bits) identify the hash bit
+ *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
+ *and H[4:0] (next 5 bits) identify the hash bit.
+ *
+ *In bucket index output the low 5 bits identify the hash register
+ *bit, while the higher 4 bits identify the hash register
+ */
+
+ if (ghtx) {
+ bucket = (s32)((crc >> 23) & 0x1ff);
+ } else {
+ bucket = (s32)((crc >> 24) & 0xff);
+ /* if !ghtx and mcast the bit must be set in gaddr instead of
+ *igaddr.
+ */
+ if (mcast)
+ bucket += 0x100;
+ }
+
+ set_bucket(dtsec->regs, bucket, true);
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ if (addr & MAC_GROUP_ADDRESS)
+ /* Group Address */
+ list_add_tail(&hash_entry->node,
+ &dtsec->multicast_addr_hash->lsts[bucket]);
+ else
+ list_add_tail(&hash_entry->node,
+ &dtsec->unicast_addr_hash->lsts[bucket]);
+
+ return 0;
+}
+
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+{
+ u32 tmp;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->rctrl);
+ if (enable)
+ tmp |= RCTRL_MPROM;
+ else
+ tmp &= ~RCTRL_MPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ return 0;
+}
+
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 rctrl, tctrl;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ rctrl = ioread32be(&regs->rctrl);
+ tctrl = ioread32be(&regs->tctrl);
+
+ if (enable) {
+ rctrl |= RCTRL_RTSE;
+ tctrl |= TCTRL_TTSE;
+ } else {
+ rctrl &= ~RCTRL_RTSE;
+ tctrl &= ~TCTRL_TTSE;
+ }
+
+ iowrite32be(rctrl, &regs->rctrl);
+ iowrite32be(tctrl, &regs->tctrl);
+
+ return 0;
+}
+
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct list_head *pos;
+ struct eth_hash_entry *hash_entry = NULL;
+ u64 addr;
+ s32 bucket;
+ u32 crc = 0xFFFFFFFF;
+ bool mcast, ghtx;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+ mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+ /* Cannot handle unicast mac addr when GHTX is on */
+ if (ghtx && !mcast) {
+ pr_err("Could not compute hash bucket\n");
+ return -EINVAL;
+ }
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+
+ if (ghtx) {
+ bucket = (s32)((crc >> 23) & 0x1ff);
+ } else {
+ bucket = (s32)((crc >> 24) & 0xff);
+ /* if !ghtx and mcast the bit must be set
+ * in gaddr instead of igaddr.
+ */
+ if (mcast)
+ bucket += 0x100;
+ }
+
+ if (addr & MAC_GROUP_ADDRESS) {
+ /* Group Address */
+ list_for_each(pos,
+ &dtsec->multicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
+ set_bucket(dtsec->regs, bucket, false);
+ } else {
+ /* Individual Address */
+ list_for_each(pos,
+ &dtsec->unicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+ if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
+ set_bucket(dtsec->regs, bucket, false);
+ }
+
+ /* address does not exist */
+ WARN_ON(!hash_entry);
+
+ return 0;
+}
+
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ /* Set unicast promiscuous */
+ tmp = ioread32be(&regs->rctrl);
+ if (new_val)
+ tmp |= RCTRL_UPROM;
+ else
+ tmp &= ~RCTRL_UPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ /* Set multicast promiscuous */
+ tmp = ioread32be(&regs->rctrl);
+ if (new_val)
+ tmp |= RCTRL_MPROM;
+ else
+ tmp &= ~RCTRL_MPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ return 0;
+}
+
+static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ graceful_stop(dtsec);
+
+ tmp = ioread32be(&regs->maccfg2);
+
+ /* Full Duplex */
+ tmp |= MACCFG2_FULL_DUPLEX;
+
+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
+ if (speed < SPEED_1000)
+ tmp |= MACCFG2_NIBBLE_MODE;
+ else if (speed == SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+ iowrite32be(tmp, &regs->maccfg2);
+
+ tmp = ioread32be(&regs->ecntrl);
+ if (speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+ else
+ tmp &= ~DTSEC_ECNTRL_R100M;
+ iowrite32be(tmp, &regs->ecntrl);
+
+ graceful_start(dtsec);
+
+ return 0;
+}
+
+static int dtsec_restart_autoneg(struct fman_mac *dtsec)
+{
+ u16 tmp_reg16;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
+
+ tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
+
+ return 0;
+}
+
+static void adjust_link_dtsec(struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
+
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
+
+ return;
+ }
+
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
+}
+
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 bit_mask = 0;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ dtsec->exceptions |= bit_mask;
+ else
+ dtsec->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ if (enable)
+ iowrite32be(ioread32be(&regs->imask) | bit_mask,
+ &regs->imask);
+ else
+ iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
+ &regs->imask);
+ } else {
+ if (!dtsec->ptp_tsu_enabled) {
+ pr_err("Exception valid for 1588 only\n");
+ return -EINVAL;
+ }
+ switch (exception) {
+ case FM_MAC_EX_1G_1588_TS_RX_ERR:
+ if (enable) {
+ dtsec->en_tsu_err_exception = true;
+ iowrite32be(ioread32be(&regs->tmr_pemask) |
+ TMR_PEMASK_TSREEN,
+ &regs->tmr_pemask);
+ } else {
+ dtsec->en_tsu_err_exception = false;
+ iowrite32be(ioread32be(&regs->tmr_pemask) &
+ ~TMR_PEMASK_TSREEN,
+ &regs->tmr_pemask);
+ }
+ break;
+ default:
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dtsec_init(struct fman_mac *dtsec)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct dtsec_cfg *dtsec_drv_param;
+ u16 max_frm_ln;
+ int err;
+
+ if (is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ if (DEFAULT_RESET_ON_INIT &&
+ (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
+ pr_err("Can't reset MAC!\n");
+ return -EINVAL;
+ }
+
+ err = check_init_parameters(dtsec);
+ if (err)
+ return err;
+
+ dtsec_drv_param = dtsec->dtsec_drv_param;
+
+ err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
+ dtsec->max_speed, dtsec->addr, dtsec->exceptions,
+ dtsec->tbiphy->mdio.addr);
+ if (err) {
+ free_init_resources(dtsec);
+ pr_err("DTSEC version doesn't support this i/f mode\n");
+ return err;
+ }
+
+ if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ u16 tmp_reg16;
+
+ /* Configure the TBI PHY Control Register */
+ tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+
+ tmp_reg16 = TBICON_CLK_SELECT;
+ phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+
+ tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
+
+ if (dtsec->basex_if)
+ tmp_reg16 = TBIANA_1000X;
+ else
+ tmp_reg16 = TBIANA_SGMII;
+ phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
+
+ tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
+ }
+
+ /* Max Frame Length */
+ max_frm_ln = (u16)ioread32be(&regs->maxfrm);
+ err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
+ if (err) {
+ pr_err("Setting max frame length failed\n");
+ free_init_resources(dtsec);
+ return -EINVAL;
+ }
+
+ dtsec->multicast_addr_hash =
+ alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
+ if (!dtsec->multicast_addr_hash) {
+ free_init_resources(dtsec);
+ pr_err("MC hash table is failed\n");
+ return -ENOMEM;
+ }
+
+ dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
+ if (!dtsec->unicast_addr_hash) {
+ free_init_resources(dtsec);
+ pr_err("UC hash table is failed\n");
+ return -ENOMEM;
+ }
+
+ /* register err intr handler for dtsec to FPM (err) */
+ fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
+ /* register 1588 intr handler for TMR to FPM (normal) */
+ fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+ FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
+
+ kfree(dtsec_drv_param);
+ dtsec->dtsec_drv_param = NULL;
+
+ return 0;
+}
+
+static int dtsec_free(struct fman_mac *dtsec)
+{
+ free_init_resources(dtsec);
+
+ kfree(dtsec->dtsec_drv_param);
+ dtsec->dtsec_drv_param = NULL;
+ kfree(dtsec);
+
+ return 0;
+}
+
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
+{
+ struct fman_mac *dtsec;
+ struct dtsec_cfg *dtsec_drv_param;
+
+ /* allocate memory for the UCC GETH data structure. */
+ dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
+ if (!dtsec)
+ return NULL;
+
+ /* allocate memory for the d_tsec driver parameters data structure. */
+ dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
+ if (!dtsec_drv_param)
+ goto err_dtsec;
+
+ /* Plant parameter structure pointer */
+ dtsec->dtsec_drv_param = dtsec_drv_param;
+
+ set_dflts(dtsec_drv_param);
+
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
+ dtsec->max_speed = params->max_speed;
+ dtsec->phy_if = mac_dev->phy_if;
+ dtsec->mac_id = params->mac_id;
+ dtsec->exceptions = (DTSEC_IMASK_BREN |
+ DTSEC_IMASK_RXCEN |
+ DTSEC_IMASK_BTEN |
+ DTSEC_IMASK_TXCEN |
+ DTSEC_IMASK_TXEEN |
+ DTSEC_IMASK_ABRTEN |
+ DTSEC_IMASK_LCEN |
+ DTSEC_IMASK_CRLEN |
+ DTSEC_IMASK_XFUNEN |
+ DTSEC_IMASK_IFERREN |
+ DTSEC_IMASK_MAGEN |
+ DTSEC_IMASK_TDPEEN |
+ DTSEC_IMASK_RDPEEN);
+ dtsec->exception_cb = params->exception_cb;
+ dtsec->event_cb = params->event_cb;
+ dtsec->dev_id = mac_dev;
+ dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
+ dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
+
+ dtsec->fm = params->fm;
+ dtsec->basex_if = params->basex_if;
+
+ /* Save FMan revision */
+ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+ return dtsec;
+
+err_dtsec:
+ kfree(dtsec);
+ return NULL;
+}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_dtsec;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node) {
+ pr_err("TBI PHY node is not available\n");
+ err = -EINVAL;
+ goto _return_fm_mac_free;
+ }
+
+ dtsec->tbiphy = of_phy_find_device(phy_node);
+ if (!dtsec->tbiphy) {
+ pr_err("of_phy_find_device (TBI PHY) failed\n");
+ err = -EINVAL;
+ goto _return_fm_mac_free;
+ }
+ put_device(&dtsec->tbiphy->mdio.dev);
+
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
new file mode 100644
index 000000000..8c72d280c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __DTSEC_H
+#define __DTSEC_H
+
+#include "fman_mac.h"
+
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
+
+#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
new file mode 100644
index 000000000..e73f6ef3c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2017 NXP
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+
+#include "fman_keygen.h"
+
+/* Maximum number of HW Ports */
+#define FMAN_MAX_NUM_OF_HW_PORTS 64
+
+/* Maximum number of KeyGen Schemes */
+#define FM_KG_MAX_NUM_OF_SCHEMES 32
+
+/* Number of generic KeyGen Generic Extract Command Registers */
+#define FM_KG_NUM_OF_GENERIC_REGS 8
+
+/* Dummy port ID */
+#define DUMMY_PORT_ID 0
+
+/* Select Scheme Value Register */
+#define KG_SCH_DEF_USE_KGSE_DV_0 2
+#define KG_SCH_DEF_USE_KGSE_DV_1 3
+
+/* Registers Shifting values */
+#define FM_KG_KGAR_NUM_SHIFT 16
+#define KG_SCH_DEF_L4_PORT_SHIFT 8
+#define KG_SCH_DEF_IP_ADDR_SHIFT 18
+#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24
+
+/* KeyGen Registers bit field masks: */
+
+/* Enable bit field mask for KeyGen General Configuration Register */
+#define FM_KG_KGGCR_EN 0x80000000
+
+/* KeyGen Global Registers bit field masks */
+#define FM_KG_KGAR_GO 0x80000000
+#define FM_KG_KGAR_READ 0x40000000
+#define FM_KG_KGAR_WRITE 0x00000000
+#define FM_KG_KGAR_SEL_SCHEME_ENTRY 0x00000000
+#define FM_KG_KGAR_SCM_WSEL_UPDATE_CNT 0x00008000
+
+#define FM_KG_KGAR_ERR 0x20000000
+#define FM_KG_KGAR_SEL_CLS_PLAN_ENTRY 0x01000000
+#define FM_KG_KGAR_SEL_PORT_ENTRY 0x02000000
+#define FM_KG_KGAR_SEL_PORT_WSEL_SP 0x00008000
+#define FM_KG_KGAR_SEL_PORT_WSEL_CPP 0x00004000
+
+/* Error events exceptions */
+#define FM_EX_KG_DOUBLE_ECC 0x80000000
+#define FM_EX_KG_KEYSIZE_OVERFLOW 0x40000000
+
+/* Scheme Registers bit field masks */
+#define KG_SCH_MODE_EN 0x80000000
+#define KG_SCH_VSP_NO_KSP_EN 0x80000000
+#define KG_SCH_HASH_CONFIG_SYM 0x40000000
+
+/* Known Protocol field codes */
+#define KG_SCH_KN_PORT_ID 0x80000000
+#define KG_SCH_KN_MACDST 0x40000000
+#define KG_SCH_KN_MACSRC 0x20000000
+#define KG_SCH_KN_TCI1 0x10000000
+#define KG_SCH_KN_TCI2 0x08000000
+#define KG_SCH_KN_ETYPE 0x04000000
+#define KG_SCH_KN_PPPSID 0x02000000
+#define KG_SCH_KN_PPPID 0x01000000
+#define KG_SCH_KN_MPLS1 0x00800000
+#define KG_SCH_KN_MPLS2 0x00400000
+#define KG_SCH_KN_MPLS_LAST 0x00200000
+#define KG_SCH_KN_IPSRC1 0x00100000
+#define KG_SCH_KN_IPDST1 0x00080000
+#define KG_SCH_KN_PTYPE1 0x00040000
+#define KG_SCH_KN_IPTOS_TC1 0x00020000
+#define KG_SCH_KN_IPV6FL1 0x00010000
+#define KG_SCH_KN_IPSRC2 0x00008000
+#define KG_SCH_KN_IPDST2 0x00004000
+#define KG_SCH_KN_PTYPE2 0x00002000
+#define KG_SCH_KN_IPTOS_TC2 0x00001000
+#define KG_SCH_KN_IPV6FL2 0x00000800
+#define KG_SCH_KN_GREPTYPE 0x00000400
+#define KG_SCH_KN_IPSEC_SPI 0x00000200
+#define KG_SCH_KN_IPSEC_NH 0x00000100
+#define KG_SCH_KN_IPPID 0x00000080
+#define KG_SCH_KN_L4PSRC 0x00000004
+#define KG_SCH_KN_L4PDST 0x00000002
+#define KG_SCH_KN_TFLG 0x00000001
+
+/* NIA values */
+#define NIA_ENG_BMI 0x00500000
+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
+#define ENQUEUE_KG_DFLT_NIA (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)
+
+/* Hard-coded configuration:
+ * These values are used as hard-coded values for KeyGen configuration
+ * and they replace user selections for this hard-coded version
+ */
+
+/* Hash distribution shift */
+#define DEFAULT_HASH_DIST_FQID_SHIFT 0
+
+/* Hash shift */
+#define DEFAULT_HASH_SHIFT 0
+
+/* Symmetric hash usage:
+ * Warning:
+ * - the value for symmetric hash usage must be in accordance with hash
+ * key defined below
+ * - according to tests performed, spreading is not working if symmetric
+ * hash is set on true
+ * So ultimately symmetric hash functionality should be always disabled:
+ */
+#define DEFAULT_SYMMETRIC_HASH false
+
+/* Hash Key extraction fields: */
+#define DEFAULT_HASH_KEY_EXTRACT_FIELDS \
+ (KG_SCH_KN_IPSRC1 | KG_SCH_KN_IPDST1 | \
+ KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST | \
+ KG_SCH_KN_IPSEC_SPI)
+
+/* Default values to be used as hash key in case IPv4 or L4 (TCP, UDP)
+ * don't exist in the frame
+ */
+/* Default IPv4 address */
+#define DEFAULT_HASH_KEY_IPv4_ADDR 0x0A0A0A0A
+/* Default L4 port */
+#define DEFAULT_HASH_KEY_L4_PORT 0x0B0B0B0B
+
+/* KeyGen Memory Mapped Registers: */
+
+/* Scheme Configuration RAM Registers */
+struct fman_kg_scheme_regs {
+ u32 kgse_mode; /* 0x100: MODE */
+ u32 kgse_ekfc; /* 0x104: Extract Known Fields Command */
+ u32 kgse_ekdv; /* 0x108: Extract Known Default Value */
+ u32 kgse_bmch; /* 0x10C: Bit Mask Command High */
+ u32 kgse_bmcl; /* 0x110: Bit Mask Command Low */
+ u32 kgse_fqb; /* 0x114: Frame Queue Base */
+ u32 kgse_hc; /* 0x118: Hash Command */
+ u32 kgse_ppc; /* 0x11C: Policer Profile Command */
+ u32 kgse_gec[FM_KG_NUM_OF_GENERIC_REGS];
+ /* 0x120: Generic Extract Command */
+ u32 kgse_spc;
+ /* 0x140: KeyGen Scheme Entry Statistic Packet Counter */
+ u32 kgse_dv0; /* 0x144: KeyGen Scheme Entry Default Value 0 */
+ u32 kgse_dv1; /* 0x148: KeyGen Scheme Entry Default Value 1 */
+ u32 kgse_ccbs;
+ /* 0x14C: KeyGen Scheme Entry Coarse Classification Bit*/
+ u32 kgse_mv; /* 0x150: KeyGen Scheme Entry Match vector */
+ u32 kgse_om; /* 0x154: KeyGen Scheme Entry Operation Mode bits */
+ u32 kgse_vsp;
+ /* 0x158: KeyGen Scheme Entry Virtual Storage Profile */
+};
+
+/* Port Partition Configuration Registers */
+struct fman_kg_pe_regs {
+ u32 fmkg_pe_sp; /* 0x100: KeyGen Port entry Scheme Partition */
+ u32 fmkg_pe_cpp;
+ /* 0x104: KeyGen Port Entry Classification Plan Partition */
+};
+
+/* General Configuration and Status Registers
+ * Global Statistic Counters
+ * KeyGen Global Registers
+ */
+struct fman_kg_regs {
+ u32 fmkg_gcr; /* 0x000: KeyGen General Configuration Register */
+ u32 res004; /* 0x004: Reserved */
+ u32 res008; /* 0x008: Reserved */
+ u32 fmkg_eer; /* 0x00C: KeyGen Error Event Register */
+ u32 fmkg_eeer; /* 0x010: KeyGen Error Event Enable Register */
+ u32 res014; /* 0x014: Reserved */
+ u32 res018; /* 0x018: Reserved */
+ u32 fmkg_seer; /* 0x01C: KeyGen Scheme Error Event Register */
+ u32 fmkg_seeer; /* 0x020: KeyGen Scheme Error Event Enable Register */
+ u32 fmkg_gsr; /* 0x024: KeyGen Global Status Register */
+ u32 fmkg_tpc; /* 0x028: Total Packet Counter Register */
+ u32 fmkg_serc; /* 0x02C: Soft Error Capture Register */
+ u32 res030[4]; /* 0x030: Reserved */
+ u32 fmkg_fdor; /* 0x034: Frame Data Offset Register */
+ u32 fmkg_gdv0r; /* 0x038: Global Default Value Register 0 */
+ u32 fmkg_gdv1r; /* 0x03C: Global Default Value Register 1 */
+ u32 res04c[6]; /* 0x040: Reserved */
+ u32 fmkg_feer; /* 0x044: Force Error Event Register */
+ u32 res068[38]; /* 0x048: Reserved */
+ union {
+ u32 fmkg_indirect[63]; /* 0x100: Indirect Access Registers */
+ struct fman_kg_scheme_regs fmkg_sch; /* Scheme Registers */
+ struct fman_kg_pe_regs fmkg_pe; /* Port Partition Registers */
+ };
+ u32 fmkg_ar; /* 0x1FC: KeyGen Action Register */
+};
+
+/* KeyGen Scheme data */
+struct keygen_scheme {
+ bool used; /* Specifies if this scheme is used */
+ u8 hw_port_id;
+ /* Hardware port ID
+ * schemes sharing between multiple ports is not
+ * currently supported
+ * so we have only one port id bound to a scheme
+ */
+ u32 base_fqid;
+ /* Base FQID:
+ * Must be between 1 and 2^24-1
+ * If hash is used and an even distribution is
+ * expected according to hash_fqid_count,
+ * base_fqid must be aligned to hash_fqid_count
+ */
+ u32 hash_fqid_count;
+ /* FQ range for hash distribution:
+ * Must be a power of 2
+ * Represents the range of queues for spreading
+ */
+ bool use_hashing; /* Usage of Hashing and spreading over FQ */
+ bool symmetric_hash; /* Symmetric Hash option usage */
+ u8 hashShift;
+ /* Hash result right shift.
+ * Select the 24 bits out of the 64 hash result.
+ * 0 means using the 24 LSB's, otherwise
+ * use the 24 LSB's after shifting right
+ */
+ u32 match_vector; /* Match Vector */
+};
+
+/* KeyGen driver data */
+struct fman_keygen {
+ struct keygen_scheme schemes[FM_KG_MAX_NUM_OF_SCHEMES];
+ /* Array of schemes */
+ struct fman_kg_regs __iomem *keygen_regs; /* KeyGen registers */
+};
+
+/* keygen_write_ar_wait
+ *
+ * Write Action Register with specified value, wait for GO bit field to be
+ * idle and then read the error
+ *
+ * regs: KeyGen registers
+ * fmkg_ar: Action Register value
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_write_ar_wait(struct fman_kg_regs __iomem *regs, u32 fmkg_ar)
+{
+ iowrite32be(fmkg_ar, &regs->fmkg_ar);
+
+ /* Wait for GO bit field to be idle */
+ while (fmkg_ar & FM_KG_KGAR_GO)
+ fmkg_ar = ioread32be(&regs->fmkg_ar);
+
+ if (fmkg_ar & FM_KG_KGAR_ERR)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* build_ar_scheme
+ *
+ * Build Action Register value for scheme settings
+ *
+ * scheme_id: Scheme ID
+ * update_counter: update scheme counter
+ * write: true for action to write the scheme or false for read action
+ *
+ * Return: AR value
+ */
+static u32 build_ar_scheme(u8 scheme_id, bool update_counter, bool write)
+{
+ u32 rw = (u32)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
+
+ return (u32)(FM_KG_KGAR_GO |
+ rw |
+ FM_KG_KGAR_SEL_SCHEME_ENTRY |
+ DUMMY_PORT_ID |
+ ((u32)scheme_id << FM_KG_KGAR_NUM_SHIFT) |
+ (update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0));
+}
+
+/* build_ar_bind_scheme
+ *
+ * Build Action Register value for port binding to schemes
+ *
+ * hwport_id: HW Port ID
+ * write: true for action to write the bind or false for read action
+ *
+ * Return: AR value
+ */
+static u32 build_ar_bind_scheme(u8 hwport_id, bool write)
+{
+ u32 rw = write ? (u32)FM_KG_KGAR_WRITE : (u32)FM_KG_KGAR_READ;
+
+ return (u32)(FM_KG_KGAR_GO |
+ rw |
+ FM_KG_KGAR_SEL_PORT_ENTRY |
+ hwport_id |
+ FM_KG_KGAR_SEL_PORT_WSEL_SP);
+}
+
+/* keygen_write_sp
+ *
+ * Write Scheme Partition Register with specified value
+ *
+ * regs: KeyGen Registers
+ * sp: Scheme Partition register value
+ * add: true to add a scheme partition or false to clear
+ *
+ * Return: none
+ */
+static void keygen_write_sp(struct fman_kg_regs __iomem *regs, u32 sp, bool add)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&regs->fmkg_pe.fmkg_pe_sp);
+
+ if (add)
+ tmp |= sp;
+ else
+ tmp &= ~sp;
+
+ iowrite32be(tmp, &regs->fmkg_pe.fmkg_pe_sp);
+}
+
+/* build_ar_bind_cls_plan
+ *
+ * Build Action Register value for Classification Plan
+ *
+ * hwport_id: HW Port ID
+ * write: true for action to write the CP or false for read action
+ *
+ * Return: AR value
+ */
+static u32 build_ar_bind_cls_plan(u8 hwport_id, bool write)
+{
+ u32 rw = write ? (u32)FM_KG_KGAR_WRITE : (u32)FM_KG_KGAR_READ;
+
+ return (u32)(FM_KG_KGAR_GO |
+ rw |
+ FM_KG_KGAR_SEL_PORT_ENTRY |
+ hwport_id |
+ FM_KG_KGAR_SEL_PORT_WSEL_CPP);
+}
+
+/* keygen_write_cpp
+ *
+ * Write Classification Plan Partition Register with specified value
+ *
+ * regs: KeyGen Registers
+ * cpp: CPP register value
+ *
+ * Return: none
+ */
+static void keygen_write_cpp(struct fman_kg_regs __iomem *regs, u32 cpp)
+{
+ iowrite32be(cpp, &regs->fmkg_pe.fmkg_pe_cpp);
+}
+
+/* keygen_write_scheme
+ *
+ * Write all Schemes Registers with specified values
+ *
+ * regs: KeyGen Registers
+ * scheme_id: Scheme ID
+ * scheme_regs: Scheme registers values desired to be written
+ * update_counter: update scheme counter
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_write_scheme(struct fman_kg_regs __iomem *regs, u8 scheme_id,
+ struct fman_kg_scheme_regs *scheme_regs,
+ bool update_counter)
+{
+ u32 ar_reg;
+ int err, i;
+
+ /* Write indirect scheme registers */
+ iowrite32be(scheme_regs->kgse_mode, &regs->fmkg_sch.kgse_mode);
+ iowrite32be(scheme_regs->kgse_ekfc, &regs->fmkg_sch.kgse_ekfc);
+ iowrite32be(scheme_regs->kgse_ekdv, &regs->fmkg_sch.kgse_ekdv);
+ iowrite32be(scheme_regs->kgse_bmch, &regs->fmkg_sch.kgse_bmch);
+ iowrite32be(scheme_regs->kgse_bmcl, &regs->fmkg_sch.kgse_bmcl);
+ iowrite32be(scheme_regs->kgse_fqb, &regs->fmkg_sch.kgse_fqb);
+ iowrite32be(scheme_regs->kgse_hc, &regs->fmkg_sch.kgse_hc);
+ iowrite32be(scheme_regs->kgse_ppc, &regs->fmkg_sch.kgse_ppc);
+ iowrite32be(scheme_regs->kgse_spc, &regs->fmkg_sch.kgse_spc);
+ iowrite32be(scheme_regs->kgse_dv0, &regs->fmkg_sch.kgse_dv0);
+ iowrite32be(scheme_regs->kgse_dv1, &regs->fmkg_sch.kgse_dv1);
+ iowrite32be(scheme_regs->kgse_ccbs, &regs->fmkg_sch.kgse_ccbs);
+ iowrite32be(scheme_regs->kgse_mv, &regs->fmkg_sch.kgse_mv);
+ iowrite32be(scheme_regs->kgse_om, &regs->fmkg_sch.kgse_om);
+ iowrite32be(scheme_regs->kgse_vsp, &regs->fmkg_sch.kgse_vsp);
+
+ for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++)
+ iowrite32be(scheme_regs->kgse_gec[i],
+ &regs->fmkg_sch.kgse_gec[i]);
+
+ /* Write AR (Action register) */
+ ar_reg = build_ar_scheme(scheme_id, update_counter, true);
+ err = keygen_write_ar_wait(regs, ar_reg);
+ if (err != 0) {
+ pr_err("Writing Action Register failed\n");
+ return err;
+ }
+
+ return err;
+}
+
+/* get_free_scheme_id
+ *
+ * Find the first free scheme available to be used
+ *
+ * keygen: KeyGen handle
+ * scheme_id: pointer to scheme id
+ *
+ * Return: 0 on success, -EINVAL when the are no available free schemes
+ */
+static int get_free_scheme_id(struct fman_keygen *keygen, u8 *scheme_id)
+{
+ u8 i;
+
+ for (i = 0; i < FM_KG_MAX_NUM_OF_SCHEMES; i++)
+ if (!keygen->schemes[i].used) {
+ *scheme_id = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* get_scheme
+ *
+ * Provides the scheme for specified ID
+ *
+ * keygen: KeyGen handle
+ * scheme_id: Scheme ID
+ *
+ * Return: handle to required scheme
+ */
+static struct keygen_scheme *get_scheme(struct fman_keygen *keygen,
+ u8 scheme_id)
+{
+ if (scheme_id >= FM_KG_MAX_NUM_OF_SCHEMES)
+ return NULL;
+ return &keygen->schemes[scheme_id];
+}
+
+/* keygen_bind_port_to_schemes
+ *
+ * Bind the port to schemes
+ *
+ * keygen: KeyGen handle
+ * scheme_id: id of the scheme to bind to
+ * bind: true to bind the port or false to unbind it
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_bind_port_to_schemes(struct fman_keygen *keygen,
+ u8 scheme_id,
+ bool bind)
+{
+ struct fman_kg_regs __iomem *keygen_regs = keygen->keygen_regs;
+ struct keygen_scheme *scheme;
+ u32 ar_reg;
+ u32 schemes_vector = 0;
+ int err;
+
+ scheme = get_scheme(keygen, scheme_id);
+ if (!scheme) {
+ pr_err("Requested Scheme does not exist\n");
+ return -EINVAL;
+ }
+ if (!scheme->used) {
+ pr_err("Cannot bind port to an invalid scheme\n");
+ return -EINVAL;
+ }
+
+ schemes_vector |= 1 << (31 - scheme_id);
+
+ ar_reg = build_ar_bind_scheme(scheme->hw_port_id, false);
+ err = keygen_write_ar_wait(keygen_regs, ar_reg);
+ if (err != 0) {
+ pr_err("Reading Action Register failed\n");
+ return err;
+ }
+
+ keygen_write_sp(keygen_regs, schemes_vector, bind);
+
+ ar_reg = build_ar_bind_scheme(scheme->hw_port_id, true);
+ err = keygen_write_ar_wait(keygen_regs, ar_reg);
+ if (err != 0) {
+ pr_err("Writing Action Register failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* keygen_scheme_setup
+ *
+ * Setup the scheme according to required configuration
+ *
+ * keygen: KeyGen handle
+ * scheme_id: scheme ID
+ * enable: true to enable scheme or false to disable it
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+static int keygen_scheme_setup(struct fman_keygen *keygen, u8 scheme_id,
+ bool enable)
+{
+ struct fman_kg_regs __iomem *keygen_regs = keygen->keygen_regs;
+ struct fman_kg_scheme_regs scheme_regs;
+ struct keygen_scheme *scheme;
+ u32 tmp_reg;
+ int err;
+
+ scheme = get_scheme(keygen, scheme_id);
+ if (!scheme) {
+ pr_err("Requested Scheme does not exist\n");
+ return -EINVAL;
+ }
+ if (enable && scheme->used) {
+ pr_err("The requested Scheme is already used\n");
+ return -EINVAL;
+ }
+
+ /* Clear scheme registers */
+ memset(&scheme_regs, 0, sizeof(struct fman_kg_scheme_regs));
+
+ /* Setup all scheme registers: */
+ tmp_reg = 0;
+
+ if (enable) {
+ /* Enable Scheme */
+ tmp_reg |= KG_SCH_MODE_EN;
+ /* Enqueue frame NIA */
+ tmp_reg |= ENQUEUE_KG_DFLT_NIA;
+ }
+
+ scheme_regs.kgse_mode = tmp_reg;
+
+ scheme_regs.kgse_mv = scheme->match_vector;
+
+ /* Scheme don't override StorageProfile:
+ * valid only for DPAA_VERSION >= 11
+ */
+ scheme_regs.kgse_vsp = KG_SCH_VSP_NO_KSP_EN;
+
+ /* Configure Hard-Coded Rx Hashing: */
+
+ if (scheme->use_hashing) {
+ /* configure kgse_ekfc */
+ scheme_regs.kgse_ekfc = DEFAULT_HASH_KEY_EXTRACT_FIELDS;
+
+ /* configure kgse_ekdv */
+ tmp_reg = 0;
+ tmp_reg |= (KG_SCH_DEF_USE_KGSE_DV_0 <<
+ KG_SCH_DEF_IP_ADDR_SHIFT);
+ tmp_reg |= (KG_SCH_DEF_USE_KGSE_DV_1 <<
+ KG_SCH_DEF_L4_PORT_SHIFT);
+ scheme_regs.kgse_ekdv = tmp_reg;
+
+ /* configure kgse_dv0 */
+ scheme_regs.kgse_dv0 = DEFAULT_HASH_KEY_IPv4_ADDR;
+ /* configure kgse_dv1 */
+ scheme_regs.kgse_dv1 = DEFAULT_HASH_KEY_L4_PORT;
+
+ /* configure kgse_hc */
+ tmp_reg = 0;
+ tmp_reg |= ((scheme->hash_fqid_count - 1) <<
+ DEFAULT_HASH_DIST_FQID_SHIFT);
+ tmp_reg |= scheme->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT;
+
+ if (scheme->symmetric_hash) {
+ /* Normally extraction key should be verified if
+ * complies with symmetric hash
+ * But because extraction is hard-coded, we are sure
+ * the key is symmetric
+ */
+ tmp_reg |= KG_SCH_HASH_CONFIG_SYM;
+ }
+ scheme_regs.kgse_hc = tmp_reg;
+ } else {
+ scheme_regs.kgse_ekfc = 0;
+ scheme_regs.kgse_hc = 0;
+ scheme_regs.kgse_ekdv = 0;
+ scheme_regs.kgse_dv0 = 0;
+ scheme_regs.kgse_dv1 = 0;
+ }
+
+ /* configure kgse_fqb: Scheme FQID base */
+ tmp_reg = 0;
+ tmp_reg |= scheme->base_fqid;
+ scheme_regs.kgse_fqb = tmp_reg;
+
+ /* features not used by hard-coded configuration */
+ scheme_regs.kgse_bmch = 0;
+ scheme_regs.kgse_bmcl = 0;
+ scheme_regs.kgse_spc = 0;
+
+ /* Write scheme registers */
+ err = keygen_write_scheme(keygen_regs, scheme_id, &scheme_regs, true);
+ if (err != 0) {
+ pr_err("Writing scheme registers failed\n");
+ return err;
+ }
+
+ /* Update used field for Scheme */
+ scheme->used = enable;
+
+ return 0;
+}
+
+/* keygen_init
+ *
+ * KeyGen initialization:
+ * Initializes and enables KeyGen, allocate driver memory, setup registers,
+ * clear port bindings, invalidate all schemes
+ *
+ * keygen_regs: KeyGen registers base address
+ *
+ * Return: Handle to KeyGen driver
+ */
+struct fman_keygen *keygen_init(struct fman_kg_regs __iomem *keygen_regs)
+{
+ struct fman_keygen *keygen;
+ u32 ar;
+ int i;
+
+ /* Allocate memory for KeyGen driver */
+ keygen = kzalloc(sizeof(*keygen), GFP_KERNEL);
+ if (!keygen)
+ return NULL;
+
+ keygen->keygen_regs = keygen_regs;
+
+ /* KeyGen initialization (for Master partition):
+ * Setup KeyGen registers
+ */
+ iowrite32be(ENQUEUE_KG_DFLT_NIA, &keygen_regs->fmkg_gcr);
+
+ iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW,
+ &keygen_regs->fmkg_eer);
+
+ iowrite32be(0, &keygen_regs->fmkg_fdor);
+ iowrite32be(0, &keygen_regs->fmkg_gdv0r);
+ iowrite32be(0, &keygen_regs->fmkg_gdv1r);
+
+ /* Clear binding between ports to schemes and classification plans
+ * so that all ports are not bound to any scheme/classification plan
+ */
+ for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) {
+ /* Clear all pe sp schemes registers */
+ keygen_write_sp(keygen_regs, 0xffffffff, false);
+ ar = build_ar_bind_scheme(i, true);
+ keygen_write_ar_wait(keygen_regs, ar);
+
+ /* Clear all pe cpp classification plans registers */
+ keygen_write_cpp(keygen_regs, 0);
+ ar = build_ar_bind_cls_plan(i, true);
+ keygen_write_ar_wait(keygen_regs, ar);
+ }
+
+ /* Enable all scheme interrupts */
+ iowrite32be(0xFFFFFFFF, &keygen_regs->fmkg_seer);
+ iowrite32be(0xFFFFFFFF, &keygen_regs->fmkg_seeer);
+
+ /* Enable KyeGen */
+ iowrite32be(ioread32be(&keygen_regs->fmkg_gcr) | FM_KG_KGGCR_EN,
+ &keygen_regs->fmkg_gcr);
+
+ return keygen;
+}
+EXPORT_SYMBOL(keygen_init);
+
+/* keygen_port_hashing_init
+ *
+ * Initializes a port for Rx Hashing with specified configuration parameters
+ *
+ * keygen: KeyGen handle
+ * hw_port_id: HW Port ID
+ * hash_base_fqid: Hashing Base FQID used for spreading
+ * hash_size: Hashing size
+ *
+ * Return: Zero for success or error code in case of failure
+ */
+int keygen_port_hashing_init(struct fman_keygen *keygen, u8 hw_port_id,
+ u32 hash_base_fqid, u32 hash_size)
+{
+ struct keygen_scheme *scheme;
+ u8 scheme_id;
+ int err;
+
+ /* Validate Scheme configuration parameters */
+ if (hash_base_fqid == 0 || (hash_base_fqid & ~0x00FFFFFF)) {
+ pr_err("Base FQID must be between 1 and 2^24-1\n");
+ return -EINVAL;
+ }
+ if (hash_size == 0 || (hash_size & (hash_size - 1)) != 0) {
+ pr_err("Hash size must be power of two\n");
+ return -EINVAL;
+ }
+
+ /* Find a free scheme */
+ err = get_free_scheme_id(keygen, &scheme_id);
+ if (err) {
+ pr_err("The maximum number of available Schemes has been exceeded\n");
+ return -EINVAL;
+ }
+
+ /* Create and configure Hard-Coded Scheme: */
+
+ scheme = get_scheme(keygen, scheme_id);
+ if (!scheme) {
+ pr_err("Requested Scheme does not exist\n");
+ return -EINVAL;
+ }
+ if (scheme->used) {
+ pr_err("The requested Scheme is already used\n");
+ return -EINVAL;
+ }
+
+ /* Clear all scheme fields because the scheme may have been
+ * previously used
+ */
+ memset(scheme, 0, sizeof(struct keygen_scheme));
+
+ /* Setup scheme: */
+ scheme->hw_port_id = hw_port_id;
+ scheme->use_hashing = true;
+ scheme->base_fqid = hash_base_fqid;
+ scheme->hash_fqid_count = hash_size;
+ scheme->symmetric_hash = DEFAULT_SYMMETRIC_HASH;
+ scheme->hashShift = DEFAULT_HASH_SHIFT;
+
+ /* All Schemes in hard-coded configuration
+ * are Indirect Schemes
+ */
+ scheme->match_vector = 0;
+
+ err = keygen_scheme_setup(keygen, scheme_id, true);
+ if (err != 0) {
+ pr_err("Scheme setup failed\n");
+ return err;
+ }
+
+ /* Bind Rx port to Scheme */
+ err = keygen_bind_port_to_schemes(keygen, scheme_id, true);
+ if (err != 0) {
+ pr_err("Binding port to schemes failed\n");
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(keygen_port_hashing_init);
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
new file mode 100644
index 000000000..2cb0df453
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2017 NXP
+ */
+
+#ifndef __KEYGEN_H
+#define __KEYGEN_H
+
+#include <linux/io.h>
+
+struct fman_keygen;
+struct fman_kg_regs;
+
+struct fman_keygen *keygen_init(struct fman_kg_regs __iomem *keygen_regs);
+
+int keygen_port_hashing_init(struct fman_keygen *keygen, u8 hw_port_id,
+ u32 hash_base_fqid, u32 hash_size);
+
+#endif /* __KEYGEN_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
new file mode 100644
index 000000000..65887a316
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* FM MAC ... */
+#ifndef __FM_MAC_H
+#define __FM_MAC_H
+
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/if_ether.h>
+
+struct fman_mac;
+struct mac_device;
+
+/* Ethernet Address */
+typedef u8 enet_addr_t[ETH_ALEN];
+
+#define ENET_ADDR_TO_UINT64(_enet_addr) \
+ (u64)(((u64)(_enet_addr)[0] << 40) | \
+ ((u64)(_enet_addr)[1] << 32) | \
+ ((u64)(_enet_addr)[2] << 24) | \
+ ((u64)(_enet_addr)[3] << 16) | \
+ ((u64)(_enet_addr)[4] << 8) | \
+ ((u64)(_enet_addr)[5]))
+
+#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
+ do { \
+ int i; \
+ for (i = 0; i < ETH_ALEN; i++) \
+ (_enet_addr)[i] = \
+ (u8)((_addr64) >> ((5 - i) * 8)); \
+ } while (0)
+
+/* defaults */
+#define DEFAULT_RESET_ON_INIT false
+
+/* PFC defines */
+#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
+#define FSL_FM_PAUSE_TIME_DISABLE 0
+#define FSL_FM_PAUSE_THRESH_DEFAULT 0
+
+#define FM_MAC_NO_PFC 0xff
+
+/* HASH defines */
+#define ETH_HASH_ENTRY_OBJ(ptr) \
+ hlist_entry_safe(ptr, struct eth_hash_entry, node)
+
+/* FM MAC Exceptions */
+enum fman_mac_exceptions {
+ FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
+ /* 10GEC MDIO scan event interrupt */
+ , FM_MAC_EX_10G_MDIO_CMD_CMPL
+ /* 10GEC MDIO command completion interrupt */
+ , FM_MAC_EX_10G_REM_FAULT
+ /* 10GEC, mEMAC Remote fault interrupt */
+ , FM_MAC_EX_10G_LOC_FAULT
+ /* 10GEC, mEMAC Local fault interrupt */
+ , FM_MAC_EX_10G_TX_ECC_ER
+ /* 10GEC, mEMAC Transmit frame ECC error interrupt */
+ , FM_MAC_EX_10G_TX_FIFO_UNFL
+ /* 10GEC, mEMAC Transmit FIFO underflow interrupt */
+ , FM_MAC_EX_10G_TX_FIFO_OVFL
+ /* 10GEC, mEMAC Transmit FIFO overflow interrupt */
+ , FM_MAC_EX_10G_TX_ER
+ /* 10GEC Transmit frame error interrupt */
+ , FM_MAC_EX_10G_RX_FIFO_OVFL
+ /* 10GEC, mEMAC Receive FIFO overflow interrupt */
+ , FM_MAC_EX_10G_RX_ECC_ER
+ /* 10GEC, mEMAC Receive frame ECC error interrupt */
+ , FM_MAC_EX_10G_RX_JAB_FRM
+ /* 10GEC Receive jabber frame interrupt */
+ , FM_MAC_EX_10G_RX_OVRSZ_FRM
+ /* 10GEC Receive oversized frame interrupt */
+ , FM_MAC_EX_10G_RX_RUNT_FRM
+ /* 10GEC Receive runt frame interrupt */
+ , FM_MAC_EX_10G_RX_FRAG_FRM
+ /* 10GEC Receive fragment frame interrupt */
+ , FM_MAC_EX_10G_RX_LEN_ER
+ /* 10GEC Receive payload length error interrupt */
+ , FM_MAC_EX_10G_RX_CRC_ER
+ /* 10GEC Receive CRC error interrupt */
+ , FM_MAC_EX_10G_RX_ALIGN_ER
+ /* 10GEC Receive alignment error interrupt */
+ , FM_MAC_EX_1G_BAB_RX
+ /* dTSEC Babbling receive error */
+ , FM_MAC_EX_1G_RX_CTL
+ /* dTSEC Receive control (pause frame) interrupt */
+ , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
+ /* dTSEC Graceful transmit stop complete */
+ , FM_MAC_EX_1G_BAB_TX
+ /* dTSEC Babbling transmit error */
+ , FM_MAC_EX_1G_TX_CTL
+ /* dTSEC Transmit control (pause frame) interrupt */
+ , FM_MAC_EX_1G_TX_ERR
+ /* dTSEC Transmit error */
+ , FM_MAC_EX_1G_LATE_COL
+ /* dTSEC Late collision */
+ , FM_MAC_EX_1G_COL_RET_LMT
+ /* dTSEC Collision retry limit */
+ , FM_MAC_EX_1G_TX_FIFO_UNDRN
+ /* dTSEC Transmit FIFO underrun */
+ , FM_MAC_EX_1G_MAG_PCKT
+ /* dTSEC Magic Packet detection */
+ , FM_MAC_EX_1G_MII_MNG_RD_COMPLET
+ /* dTSEC MII management read completion */
+ , FM_MAC_EX_1G_MII_MNG_WR_COMPLET
+ /* dTSEC MII management write completion */
+ , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
+ /* dTSEC Graceful receive stop complete */
+ , FM_MAC_EX_1G_DATA_ERR
+ /* dTSEC Internal data error on transmit */
+ , FM_MAC_1G_RX_DATA_ERR
+ /* dTSEC Internal data error on receive */
+ , FM_MAC_EX_1G_1588_TS_RX_ERR
+ /* dTSEC Time-Stamp Receive Error */
+ , FM_MAC_EX_1G_RX_MIB_CNT_OVFL
+ /* dTSEC MIB counter overflow */
+ , FM_MAC_EX_TS_FIFO_ECC_ERR
+ /* mEMAC Time-stamp FIFO ECC error interrupt;
+ * not supported on T4240/B4860 rev1 chips
+ */
+ , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
+ /* mEMAC Magic Packet Indication Interrupt */
+};
+
+struct eth_hash_entry {
+ u64 addr; /* Ethernet Address */
+ struct list_head node;
+};
+
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
+
+/* FMan MAC config input */
+struct fman_mac_params {
+ /* MAC ID; numbering of dTSEC and 1G-mEMAC:
+ * 0 - FM_MAX_NUM_OF_1G_MACS;
+ * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
+ * 0 - FM_MAX_NUM_OF_10G_MACS
+ */
+ u8 mac_id;
+ /* Note that the speed should indicate the maximum rate that
+ * this MAC should support rather than the actual speed;
+ */
+ u16 max_speed;
+ /* A handle to the FM object this port related to */
+ void *fm;
+ fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
+ fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
+ /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
+ * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
+ * to interface between MAC and phy/backplane, SGMII phy can still
+ * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
+ */
+ bool basex_if;
+};
+
+struct eth_hash_t {
+ u16 size;
+ struct list_head *lsts;
+};
+
+static inline struct eth_hash_entry
+*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
+{
+ struct eth_hash_entry *hash_entry = NULL;
+
+ if (!list_empty(addr_lst)) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
+ list_del_init(&hash_entry->node);
+ }
+ return hash_entry;
+}
+
+static inline void free_hash_table(struct eth_hash_t *hash)
+{
+ struct eth_hash_entry *hash_entry;
+ int i = 0;
+
+ if (hash) {
+ if (hash->lsts) {
+ for (i = 0; i < hash->size; i++) {
+ hash_entry =
+ dequeue_addr_from_hash_entry(&hash->lsts[i]);
+ while (hash_entry) {
+ kfree(hash_entry);
+ hash_entry =
+ dequeue_addr_from_hash_entry(&hash->
+ lsts[i]);
+ }
+ }
+
+ kfree(hash->lsts);
+ }
+
+ kfree(hash);
+ }
+}
+
+static inline struct eth_hash_t *alloc_hash_table(u16 size)
+{
+ u32 i;
+ struct eth_hash_t *hash;
+
+ /* Allocate address hash table */
+ hash = kmalloc(sizeof(*hash), GFP_KERNEL);
+ if (!hash)
+ return NULL;
+
+ hash->size = size;
+
+ hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!hash->lsts) {
+ kfree(hash);
+ return NULL;
+ }
+
+ for (i = 0; i < hash->size; i++)
+ INIT_LIST_HEAD(&hash->lsts[i]);
+
+ return hash;
+}
+
+#endif /* __FM_MAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
new file mode 100644
index 000000000..32d26cf17
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -0,0 +1,1251 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_memac.h"
+#include "fman.h"
+#include "mac.h"
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of_mdio.h>
+
+/* PCS registers */
+#define MDIO_SGMII_CR 0x00
+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
+#define MDIO_SGMII_LINK_TMR_L 0x12
+#define MDIO_SGMII_LINK_TMR_H 0x13
+#define MDIO_SGMII_IF_MODE 0x14
+
+/* SGMII Control defines */
+#define SGMII_CR_AN_EN 0x1000
+#define SGMII_CR_RESTART_AN 0x0200
+#define SGMII_CR_FD 0x0100
+#define SGMII_CR_SPEED_SEL1_1G 0x0040
+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
+ SGMII_CR_SPEED_SEL1_1G)
+
+/* SGMII Device Ability for SGMII defines */
+#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
+#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
+
+/* Link timer define */
+#define LINK_TMR_L 0xa120
+#define LINK_TMR_H 0x0007
+#define LINK_TMR_L_BASEX 0xaf08
+#define LINK_TMR_H_BASEX 0x002f
+
+/* SGMII IF Mode defines */
+#define IF_MODE_USE_SGMII_AN 0x0002
+#define IF_MODE_SGMII_EN 0x0001
+#define IF_MODE_SGMII_SPEED_100M 0x0004
+#define IF_MODE_SGMII_SPEED_1G 0x0008
+#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
+
+/* Num of additional exact match MAC adr regs */
+#define MEMAC_NUM_OF_PADDRS 7
+
+/* Control and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
+#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
+#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
+#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
+#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
+#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
+#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
+#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
+#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
+#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
+#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
+
+/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
+#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
+#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
+#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
+#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
+#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
+
+#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
+do { \
+ _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
+ ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
+ (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
+} while (0)
+
+/* Interface Mode Register (IF_MODE) */
+
+#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
+#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
+#define IF_MODE_MII 0x00000001 /* 30-31 MII interface */
+#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
+#define IF_MODE_RGMII 0x00000004
+#define IF_MODE_RGMII_AUTO 0x00008000
+#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
+#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
+#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
+#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
+#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
+#define IF_MODE_HD 0x00000040 /* Half duplex operation */
+
+/* Hash table Control Register (HASHTABLE_CTRL) */
+#define HASH_CTRL_MCAST_EN 0x00000100
+/* 26-31 Hash table address code */
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+/* MAC mcast indication */
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_TABLE_SIZE 64 /* Hash tbl size */
+
+/* Interrupt Mask Register (IMASK) */
+#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
+#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
+#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
+#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
+
+#define MEMAC_ALL_ERRS_IMASK \
+ ((u32)(MEMAC_IMASK_TSECC_ER | \
+ MEMAC_IMASK_TECC_ER | \
+ MEMAC_IMASK_RECC_ER | \
+ MEMAC_IMASK_MGI))
+
+#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
+#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
+#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
+#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
+#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/
+#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
+#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
+#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
+#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
+#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
+#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
+#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
+#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
+#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
+#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
+#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
+#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
+
+#define DEFAULT_PAUSE_QUANTA 0xf000
+#define DEFAULT_FRAME_LENGTH 0x600
+#define DEFAULT_TX_IPG_LENGTH 12
+
+#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
+#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
+#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
+#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000
+
+struct mac_addr {
+ /* Lower 32 bits of 48-bit MAC address */
+ u32 mac_addr_l;
+ /* Upper 16 bits of 48-bit MAC address */
+ u32 mac_addr_u;
+};
+
+/* memory map */
+struct memac_regs {
+ u32 res0000[2]; /* General Control and Status */
+ u32 command_config; /* 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
+ u32 maxfrm; /* 0x014 Max frame length */
+ u32 res0018[1];
+ u32 rx_fifo_sections; /* Receive FIFO configuration reg */
+ u32 tx_fifo_sections; /* Transmit FIFO configuration reg */
+ u32 res0024[2];
+ u32 hashtable_ctrl; /* 0x02C Hash table control */
+ u32 res0030[4];
+ u32 ievent; /* 0x040 Interrupt event */
+ u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
+ u32 res0048;
+ u32 imask; /* 0x04C Interrupt mask */
+ u32 res0050;
+ u32 pause_quanta[4]; /* 0x054 Pause quanta */
+ u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */
+ u32 rx_pause_status; /* 0x074 Receive pause status */
+ u32 res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
+ u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
+ u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
+ u32 res00c0[8];
+ u32 statn_config; /* 0x0E0 Statistics configuration */
+ u32 res00e4[7];
+ /* Rx Statistics Counter */
+ u32 reoct_l;
+ u32 reoct_u;
+ u32 roct_l;
+ u32 roct_u;
+ u32 raln_l;
+ u32 raln_u;
+ u32 rxpf_l;
+ u32 rxpf_u;
+ u32 rfrm_l;
+ u32 rfrm_u;
+ u32 rfcs_l;
+ u32 rfcs_u;
+ u32 rvlan_l;
+ u32 rvlan_u;
+ u32 rerr_l;
+ u32 rerr_u;
+ u32 ruca_l;
+ u32 ruca_u;
+ u32 rmca_l;
+ u32 rmca_u;
+ u32 rbca_l;
+ u32 rbca_u;
+ u32 rdrp_l;
+ u32 rdrp_u;
+ u32 rpkt_l;
+ u32 rpkt_u;
+ u32 rund_l;
+ u32 rund_u;
+ u32 r64_l;
+ u32 r64_u;
+ u32 r127_l;
+ u32 r127_u;
+ u32 r255_l;
+ u32 r255_u;
+ u32 r511_l;
+ u32 r511_u;
+ u32 r1023_l;
+ u32 r1023_u;
+ u32 r1518_l;
+ u32 r1518_u;
+ u32 r1519x_l;
+ u32 r1519x_u;
+ u32 rovr_l;
+ u32 rovr_u;
+ u32 rjbr_l;
+ u32 rjbr_u;
+ u32 rfrg_l;
+ u32 rfrg_u;
+ u32 rcnp_l;
+ u32 rcnp_u;
+ u32 rdrntp_l;
+ u32 rdrntp_u;
+ u32 res01d0[12];
+ /* Tx Statistics Counter */
+ u32 teoct_l;
+ u32 teoct_u;
+ u32 toct_l;
+ u32 toct_u;
+ u32 res0210[2];
+ u32 txpf_l;
+ u32 txpf_u;
+ u32 tfrm_l;
+ u32 tfrm_u;
+ u32 tfcs_l;
+ u32 tfcs_u;
+ u32 tvlan_l;
+ u32 tvlan_u;
+ u32 terr_l;
+ u32 terr_u;
+ u32 tuca_l;
+ u32 tuca_u;
+ u32 tmca_l;
+ u32 tmca_u;
+ u32 tbca_l;
+ u32 tbca_u;
+ u32 res0258[2];
+ u32 tpkt_l;
+ u32 tpkt_u;
+ u32 tund_l;
+ u32 tund_u;
+ u32 t64_l;
+ u32 t64_u;
+ u32 t127_l;
+ u32 t127_u;
+ u32 t255_l;
+ u32 t255_u;
+ u32 t511_l;
+ u32 t511_u;
+ u32 t1023_l;
+ u32 t1023_u;
+ u32 t1518_l;
+ u32 t1518_u;
+ u32 t1519x_l;
+ u32 t1519x_u;
+ u32 res02a8[6];
+ u32 tcnp_l;
+ u32 tcnp_u;
+ u32 res02c8[14];
+ /* Line Interface Control */
+ u32 if_mode; /* 0x300 Interface Mode Control */
+ u32 if_status; /* 0x304 Interface Status */
+ u32 res0308[14];
+ /* HiGig/2 */
+ u32 hg_config; /* 0x340 Control and cfg */
+ u32 res0344[3];
+ u32 hg_pause_quanta; /* 0x350 Pause quanta */
+ u32 res0354[3];
+ u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */
+ u32 res0364[3];
+ u32 hgrx_pause_status; /* 0x370 Receive pause status */
+ u32 hg_fifos_status; /* 0x374 fifos status */
+ u32 rhm; /* 0x378 rx messages counter */
+ u32 thm; /* 0x37C tx messages counter */
+};
+
+struct memac_cfg {
+ bool reset_on_init;
+ bool pause_ignore;
+ bool promiscuous_mode_enable;
+ struct fixed_phy_status *fixed_link;
+ u16 max_frame_length;
+ u16 pause_quanta;
+ u32 tx_ipg_length;
+};
+
+struct fman_mac {
+ /* Pointer to MAC memory mapped registers */
+ struct memac_regs __iomem *regs;
+ /* MAC address of device */
+ u64 addr;
+ /* Ethernet physical interface */
+ phy_interface_t phy_if;
+ u16 max_speed;
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* Pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* Pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ struct memac_cfg *memac_drv_param;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool basex_if;
+ struct phy_device *pcsphy;
+ bool allmulti_enabled;
+};
+
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
+ u8 paddr_num)
+{
+ u32 tmp0, tmp1;
+
+ tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+ tmp1 = (u32)(adr[4] | adr[5] << 8);
+
+ if (paddr_num == 0) {
+ iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
+ iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
+ } else {
+ iowrite32be(tmp0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
+ iowrite32be(tmp1, &regs->mac_addr[paddr_num - 1].mac_addr_u);
+ }
+}
+
+static int reset(struct memac_regs __iomem *regs)
+{
+ u32 tmp;
+ int count;
+
+ tmp = ioread32be(&regs->command_config);
+
+ tmp |= CMD_CFG_SW_RESET;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ count = 100;
+ do {
+ udelay(1);
+ } while ((ioread32be(&regs->command_config) & CMD_CFG_SW_RESET) &&
+ --count);
+
+ if (count == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void set_exception(struct memac_regs __iomem *regs, u32 val,
+ bool enable)
+{
+ u32 tmp;
+
+ tmp = ioread32be(&regs->imask);
+ if (enable)
+ tmp |= val;
+ else
+ tmp &= ~val;
+
+ iowrite32be(tmp, &regs->imask);
+}
+
+static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
+ phy_interface_t phy_if, u16 speed, bool slow_10g_if,
+ u32 exceptions)
+{
+ u32 tmp;
+
+ /* Config */
+ tmp = 0;
+ if (cfg->promiscuous_mode_enable)
+ tmp |= CMD_CFG_PROMIS_EN;
+ if (cfg->pause_ignore)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ /* Enable padding of frames in transmit direction */
+ tmp |= CMD_CFG_TX_PAD_EN;
+
+ tmp |= CMD_CFG_CRC_FWD;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ /* Max Frame Length */
+ iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+
+ /* Pause Time */
+ iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
+ iowrite32be((u32)0, &regs->pause_thresh[0]);
+
+ /* IF_MODE */
+ tmp = 0;
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_XGMII:
+ tmp |= IF_MODE_10G;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ tmp |= IF_MODE_MII;
+ break;
+ default:
+ tmp |= IF_MODE_GMII;
+ if (phy_if == PHY_INTERFACE_MODE_RGMII ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
+ tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
+ }
+ iowrite32be(tmp, &regs->if_mode);
+
+ /* TX_FIFO_SECTIONS */
+ tmp = 0;
+ if (phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (slow_10g_if) {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+ } else {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+ }
+ } else {
+ tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
+ }
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ /* clear all pending events and set-up interrupts */
+ iowrite32be(0xffffffff, &regs->ievent);
+ set_exception(regs, exceptions, true);
+
+ return 0;
+}
+
+static void set_dflts(struct memac_cfg *cfg)
+{
+ cfg->reset_on_init = false;
+ cfg->promiscuous_mode_enable = false;
+ cfg->pause_ignore = false;
+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+ cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
+ cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+ u64 mask1, mask2;
+ u32 xor_val = 0;
+ u8 i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (u64)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (u64)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xor_val |= (mask1 << (5 - i));
+ }
+
+ return xor_val;
+}
+
+static void setup_sgmii_internal_phy(struct fman_mac *memac,
+ struct fixed_phy_status *fixed_link)
+{
+ u16 tmp_reg16;
+
+ if (WARN_ON(!memac->pcsphy))
+ return;
+
+ /* SGMII mode */
+ tmp_reg16 = IF_MODE_SGMII_EN;
+ if (!fixed_link)
+ /* AN enable */
+ tmp_reg16 |= IF_MODE_USE_SGMII_AN;
+ else {
+ switch (fixed_link->speed) {
+ case 10:
+ /* For 10M: IF_MODE[SPEED_10M] = 0 */
+ break;
+ case 100:
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
+ break;
+ case 1000:
+ default:
+ tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
+ break;
+ }
+ if (!fixed_link->duplex)
+ tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
+ }
+ phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
+
+ /* Device ability according to SGMII specification */
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
+
+ /* Adjust link timer for SGMII -
+ * According to Cisco SGMII specification the timer should be 1.6 ms.
+ * The link_timer register is configured in units of the clock.
+ * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+ * unit = 1 / (125*10^6 Hz) = 8 ns.
+ * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
+ * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+ * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+ * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
+ * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+ * we always set up here a value of 2.5 SGMII.
+ */
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
+
+ if (!fixed_link)
+ /* Restart AN */
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
+ else
+ /* AN disabled */
+ tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
+}
+
+static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
+{
+ u16 tmp_reg16;
+
+ /* AN Device capability */
+ tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
+ phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
+
+ /* Adjust link timer for SGMII -
+ * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
+ * The link_timer register is configured in units of the clock.
+ * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+ * unit = 1 / (125*10^6 Hz) = 8 ns.
+ * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
+ * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+ * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+ * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
+ * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+ * we always set up here a value of 2.5 SGMII.
+ */
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
+ phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
+
+ /* Restart AN */
+ tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
+ phy_write(memac->pcsphy, 0x0, tmp_reg16);
+}
+
+static int check_init_parameters(struct fman_mac *memac)
+{
+ if (!memac->exception_cb) {
+ pr_err("Uninitialized exception handler\n");
+ return -EINVAL;
+ }
+ if (!memac->event_cb) {
+ pr_warn("Uninitialize event handler\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_10G_TX_ECC_ER:
+ bit_mask = MEMAC_IMASK_TECC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_ECC_ER:
+ bit_mask = MEMAC_IMASK_RECC_ER;
+ break;
+ case FM_MAC_EX_TS_FIFO_ECC_ERR:
+ bit_mask = MEMAC_IMASK_TSECC_ER;
+ break;
+ case FM_MAC_EX_MAGIC_PACKET_INDICATION:
+ bit_mask = MEMAC_IMASK_MGI;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static void memac_err_exception(void *handle)
+{
+ struct fman_mac *memac = (struct fman_mac *)handle;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 event, imask;
+
+ event = ioread32be(&regs->ievent);
+ imask = ioread32be(&regs->imask);
+
+ /* Imask include both error and notification/event bits.
+ * Leaving only error bits enabled by imask.
+ * The imask error bits are shifted by 16 bits offset from
+ * their corresponding location in the ievent - hence the >> 16
+ */
+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & MEMAC_IEVNT_TS_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
+ if (event & MEMAC_IEVNT_TX_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+ if (event & MEMAC_IEVNT_RX_ECC_ER)
+ memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+}
+
+static void memac_exception(void *handle)
+{
+ struct fman_mac *memac = (struct fman_mac *)handle;
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 event, imask;
+
+ event = ioread32be(&regs->ievent);
+ imask = ioread32be(&regs->imask);
+
+ /* Imask include both error and notification/event bits.
+ * Leaving only error bits enabled by imask.
+ * The imask error bits are shifted by 16 bits offset from
+ * their corresponding location in the ievent - hence the >> 16
+ */
+ event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & MEMAC_IEVNT_MGI)
+ memac->exception_cb(memac->dev_id,
+ FM_MAC_EX_MAGIC_PACKET_INDICATION);
+}
+
+static void free_init_resources(struct fman_mac *memac)
+{
+ fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_ERR);
+
+ fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_NORMAL);
+
+ /* release the driver's group hash table */
+ free_hash_table(memac->multicast_addr_hash);
+ memac->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(memac->unicast_addr_hash);
+ memac->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct memac_cfg *memac_drv_params)
+{
+ /* Checks if mEMAC driver parameters were initialized */
+ if (!memac_drv_params)
+ return true;
+
+ return false;
+}
+
+static int memac_enable(struct fman_mac *memac)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static void memac_disable(struct fman_mac *memac)
+
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
+
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
+ iowrite32be(tmp, &regs->command_config);
+}
+
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static int memac_adjust_link(struct fman_mac *memac, u16 speed)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->if_mode);
+
+ /* Set full duplex */
+ tmp &= ~IF_MODE_HD;
+
+ if (phy_interface_mode_is_rgmii(memac->phy_if)) {
+ /* Configure RGMII in manual mode */
+ tmp &= ~IF_MODE_RGMII_AUTO;
+ tmp &= ~IF_MODE_RGMII_SP_MASK;
+ /* Full duplex */
+ tmp |= IF_MODE_RGMII_FD;
+
+ switch (speed) {
+ case SPEED_1000:
+ tmp |= IF_MODE_RGMII_1000;
+ break;
+ case SPEED_100:
+ tmp |= IF_MODE_RGMII_100;
+ break;
+ case SPEED_10:
+ tmp |= IF_MODE_RGMII_10;
+ break;
+ default:
+ break;
+ }
+ }
+
+ iowrite32be(tmp, &regs->if_mode);
+
+ return 0;
+}
+
+static void adjust_link_memac(struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
+
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
+}
+
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->tx_fifo_sections);
+
+ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
+ iowrite32be(tmp, &regs->tx_fifo_sections);
+
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~CMD_CFG_PFC_MODE;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
+ else
+ tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
+ tmp |= ((u32)pause_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+
+ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
+ else
+ tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
+ tmp |= ((u32)thresh_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+
+ return 0;
+}
+
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ u32 tmp;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (en)
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ else
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
+{
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
+
+ return 0;
+}
+
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ struct eth_hash_entry *hash_entry;
+ u32 hash;
+ u64 addr;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ if (!(addr & GROUP_ADDRESS)) {
+ /* Unicast addresses not supported in hash */
+ pr_err("Unicast Address\n");
+ return -EINVAL;
+ }
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ list_add_tail(&hash_entry->node,
+ &memac->multicast_addr_hash->lsts[hash]);
+ iowrite32be(hash | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
+{
+ u32 entry;
+ struct memac_regs __iomem *regs = memac->regs;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ if (enable) {
+ for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry | HASH_CTRL_MCAST_EN,
+ &regs->hashtable_ctrl);
+ } else {
+ for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry & ~HASH_CTRL_MCAST_EN,
+ &regs->hashtable_ctrl);
+ }
+
+ memac->allmulti_enabled = enable;
+
+ return 0;
+}
+
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
+{
+ return 0; /* Always enabled. */
+}
+
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
+{
+ struct memac_regs __iomem *regs = memac->regs;
+ struct eth_hash_entry *hash_entry = NULL;
+ struct list_head *pos;
+ u32 hash;
+ u64 addr;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+
+ if (!memac->allmulti_enabled) {
+ if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
+ iowrite32be(hash & ~HASH_CTRL_MCAST_EN,
+ &regs->hashtable_ctrl);
+ }
+
+ return 0;
+}
+
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ u32 bit_mask = 0;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ memac->exceptions |= bit_mask;
+ else
+ memac->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ set_exception(memac->regs, bit_mask, enable);
+
+ return 0;
+}
+
+static int memac_init(struct fman_mac *memac)
+{
+ struct memac_cfg *memac_drv_param;
+ u8 i;
+ enet_addr_t eth_addr;
+ bool slow_10g_if = false;
+ struct fixed_phy_status *fixed_link = NULL;
+ int err;
+ u32 reg32 = 0;
+
+ if (is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ err = check_init_parameters(memac);
+ if (err)
+ return err;
+
+ memac_drv_param = memac->memac_drv_param;
+
+ if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
+ slow_10g_if = true;
+
+ /* First, reset the MAC if desired. */
+ if (memac_drv_param->reset_on_init) {
+ err = reset(memac->regs);
+ if (err) {
+ pr_err("mEMAC reset failed\n");
+ return err;
+ }
+ }
+
+ /* MAC Address */
+ if (memac->addr != 0) {
+ MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+ add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
+ }
+
+ fixed_link = memac_drv_param->fixed_link;
+
+ init(memac->regs, memac->memac_drv_param, memac->phy_if,
+ memac->max_speed, slow_10g_if, memac->exceptions);
+
+ /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
+ * Exists only in FMan 6.0 and 6.3.
+ */
+ if ((memac->fm_rev_info.major == 6) &&
+ ((memac->fm_rev_info.minor == 0) ||
+ (memac->fm_rev_info.minor == 3))) {
+ /* MAC strips CRC from received frames - this workaround
+ * should decrease the likelihood of bug appearance
+ */
+ reg32 = ioread32be(&memac->regs->command_config);
+ reg32 &= ~CMD_CFG_CRC_FWD;
+ iowrite32be(reg32, &memac->regs->command_config);
+ }
+
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ /* Configure internal SGMII PHY */
+ if (memac->basex_if)
+ setup_sgmii_internal_phy_base_x(memac);
+ else
+ setup_sgmii_internal_phy(memac, fixed_link);
+ } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+ /* Configure 4 internal SGMII PHYs */
+ for (i = 0; i < 4; i++) {
+ u8 qsmgii_phy_addr, phy_addr;
+ /* QSGMII PHY address occupies 3 upper bits of 5-bit
+ * phy_address; the lower 2 bits are used to extend
+ * register address space and access each one of 4
+ * ports inside QSGMII.
+ */
+ phy_addr = memac->pcsphy->mdio.addr;
+ qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
+ memac->pcsphy->mdio.addr = qsmgii_phy_addr;
+ if (memac->basex_if)
+ setup_sgmii_internal_phy_base_x(memac);
+ else
+ setup_sgmii_internal_phy(memac, fixed_link);
+
+ memac->pcsphy->mdio.addr = phy_addr;
+ }
+ }
+
+ /* Max Frame Length */
+ err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
+ memac_drv_param->max_frame_length);
+ if (err) {
+ pr_err("settings Mac max frame length is FAILED\n");
+ return err;
+ }
+
+ memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+ if (!memac->multicast_addr_hash) {
+ free_init_resources(memac);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+ if (!memac->unicast_addr_hash) {
+ free_init_resources(memac);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
+
+ fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+ FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
+
+ kfree(memac_drv_param);
+ memac->memac_drv_param = NULL;
+
+ return 0;
+}
+
+static int memac_free(struct fman_mac *memac)
+{
+ free_init_resources(memac);
+
+ if (memac->pcsphy)
+ put_device(&memac->pcsphy->mdio.dev);
+
+ kfree(memac->memac_drv_param);
+ kfree(memac);
+
+ return 0;
+}
+
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
+{
+ struct fman_mac *memac;
+ struct memac_cfg *memac_drv_param;
+
+ /* allocate memory for the m_emac data structure */
+ memac = kzalloc(sizeof(*memac), GFP_KERNEL);
+ if (!memac)
+ return NULL;
+
+ /* allocate memory for the m_emac driver parameters data structure */
+ memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
+ if (!memac_drv_param) {
+ memac_free(memac);
+ return NULL;
+ }
+
+ /* Plant parameter structure pointer */
+ memac->memac_drv_param = memac_drv_param;
+
+ set_dflts(memac_drv_param);
+
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
+
+ memac->regs = mac_dev->vaddr;
+ memac->max_speed = params->max_speed;
+ memac->phy_if = mac_dev->phy_if;
+ memac->mac_id = params->mac_id;
+ memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
+ MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
+ memac->exception_cb = params->exception_cb;
+ memac->event_cb = params->event_cb;
+ memac->dev_id = mac_dev;
+ memac->fm = params->fm;
+ memac->basex_if = params->basex_if;
+
+ /* Save FMan revision */
+ fman_get_revision(memac->fm, &memac->fm_rev_info);
+
+ return memac;
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct device_node *phy_node;
+ struct fixed_phy_status *fixed_link;
+ struct fman_mac *memac;
+
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_memac;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+
+ if (params->max_speed == SPEED_10000)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+ phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
+ if (!phy_node) {
+ pr_err("PCS PHY node is not available\n");
+ err = -EINVAL;
+ goto _return_fm_mac_free;
+ }
+
+ memac->pcsphy = of_phy_find_device(phy_node);
+ if (!memac->pcsphy) {
+ pr_err("of_phy_find_device (PCS PHY) failed\n");
+ err = -EINVAL;
+ goto _return_fm_mac_free;
+ }
+ }
+
+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_fm_mac_free;
+
+ fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
+ if (!fixed_link) {
+ err = -ENOMEM;
+ goto _return_fm_mac_free;
+ }
+
+ mac_dev->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(mac_dev->phy_node);
+ if (!phy) {
+ err = -EINVAL;
+ of_node_put(mac_dev->phy_node);
+ goto _return_fixed_link_free;
+ }
+
+ fixed_link->link = phy->link;
+ fixed_link->speed = phy->speed;
+ fixed_link->duplex = phy->duplex;
+ fixed_link->pause = phy->pause;
+ fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
+ memac->memac_drv_param->fixed_link = fixed_link;
+ }
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fixed_link_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fixed_link_free:
+ kfree(fixed_link);
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
new file mode 100644
index 000000000..5a3a14f96
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __MEMAC_H
+#define __MEMAC_H
+
+#include "fman_mac.h"
+
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
+
+#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
new file mode 100644
index 000000000..f557d68e5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#include "fman_muram.h"
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+
+struct muram_info {
+ struct gen_pool *pool;
+ void __iomem *vbase;
+ size_t size;
+ phys_addr_t pbase;
+};
+
+static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
+ unsigned long vaddr)
+{
+ return vaddr - (unsigned long)muram->vbase;
+}
+
+/**
+ * fman_muram_init
+ * @base: Pointer to base of memory mapped FM-MURAM.
+ * @size: Size of the FM-MURAM partition.
+ *
+ * Creates partition in the MURAM.
+ * The routine returns a pointer to the MURAM partition.
+ * This pointer must be passed as to all other FM-MURAM function calls.
+ * No actual initialization or configuration of FM_MURAM hardware is done by
+ * this routine.
+ *
+ * Return: pointer to FM-MURAM object, or NULL for Failure.
+ */
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
+{
+ struct muram_info *muram;
+ void __iomem *vaddr;
+ int ret;
+
+ muram = kzalloc(sizeof(*muram), GFP_KERNEL);
+ if (!muram)
+ return NULL;
+
+ muram->pool = gen_pool_create(ilog2(64), -1);
+ if (!muram->pool) {
+ pr_err("%s(): MURAM pool create failed\n", __func__);
+ goto muram_free;
+ }
+
+ vaddr = ioremap(base, size);
+ if (!vaddr) {
+ pr_err("%s(): MURAM ioremap failed\n", __func__);
+ goto pool_destroy;
+ }
+
+ ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
+ base, size, -1);
+ if (ret < 0) {
+ pr_err("%s(): MURAM pool add failed\n", __func__);
+ iounmap(vaddr);
+ goto pool_destroy;
+ }
+
+ memset_io(vaddr, 0, (int)size);
+
+ muram->vbase = vaddr;
+ muram->pbase = base;
+ return muram;
+
+pool_destroy:
+ gen_pool_destroy(muram->pool);
+muram_free:
+ kfree(muram);
+ return NULL;
+}
+
+/**
+ * fman_muram_offset_to_vbase
+ * @muram: FM-MURAM module pointer.
+ * @offset: the offset of the memory block
+ *
+ * Gives the address of the memory region from specific offset
+ *
+ * Return: The address of the memory block
+ */
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+ unsigned long offset)
+{
+ return offset + (unsigned long)muram->vbase;
+}
+
+/**
+ * fman_muram_alloc
+ * @muram: FM-MURAM module pointer.
+ * @size: Size of the memory to be allocated.
+ *
+ * Allocate some memory from FM-MURAM partition.
+ *
+ * Return: address of the allocated memory; NULL otherwise.
+ */
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
+{
+ unsigned long vaddr;
+
+ vaddr = gen_pool_alloc(muram->pool, size);
+ if (!vaddr)
+ return -ENOMEM;
+
+ memset_io((void __iomem *)vaddr, 0, size);
+
+ return fman_muram_vbase_to_offset(muram, vaddr);
+}
+
+/**
+ * fman_muram_free_mem
+ * @muram: FM-MURAM module pointer.
+ * @offset: offset of the memory region to be freed.
+ * @size: size of the memory to be freed.
+ *
+ * Free an allocated memory from FM-MURAM partition.
+ */
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+ size_t size)
+{
+ unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
+
+ gen_pool_free(muram->pool, addr, size);
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
new file mode 100644
index 000000000..3643af61b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __FM_MURAM_EXT
+#define __FM_MURAM_EXT
+
+#include <linux/types.h>
+
+#define FM_MURAM_INVALID_ALLOCATION -1
+
+/* Structure for FM MURAM information */
+struct muram_info;
+
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
+
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+ unsigned long offset);
+
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
+
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+ size_t size);
+
+#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
new file mode 100644
index 000000000..ab90fe2be
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -0,0 +1,1920 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/libfdt_env.h>
+
+#include "fman.h"
+#include "fman_port.h"
+#include "fman_sp.h"
+#include "fman_keygen.h"
+
+/* Queue ID */
+#define DFLT_FQ_ID 0x00FFFFFF
+
+/* General defines */
+#define PORT_BMI_FIFO_UNITS 0x100
+
+#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \
+ min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
+
+#define PORT_CG_MAP_NUM 8
+#define PORT_PRS_RESULT_WORDS_NUM 8
+#define PORT_IC_OFFSET_UNITS 0x10
+
+#define MIN_EXT_BUF_SIZE 64
+
+#define BMI_PORT_REGS_OFFSET 0
+#define QMI_PORT_REGS_OFFSET 0x400
+#define HWP_PORT_REGS_OFFSET 0x800
+
+/* Default values */
+#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
+ DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
+
+#define DFLT_PORT_CUT_BYTES_FROM_END 4
+
+#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD
+#define DFLT_PORT_MAX_FRAME_LENGTH 9600
+
+#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \
+ MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
+
+#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \
+ (major == 6 ? \
+ MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \
+ (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \
+
+#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0
+
+/* QMI defines */
+#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
+
+#define QMI_PORT_CFG_EN 0x80000000
+#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
+
+#define QMI_DEQ_CFG_PRI 0x80000000
+#define QMI_DEQ_CFG_TYPE1 0x10000000
+#define QMI_DEQ_CFG_TYPE2 0x20000000
+#define QMI_DEQ_CFG_TYPE3 0x30000000
+#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
+#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
+#define QMI_DEQ_CFG_SP_MASK 0xf
+#define QMI_DEQ_CFG_SP_SHIFT 20
+
+#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \
+ (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
+
+/* BMI defins */
+#define BMI_EBD_EN 0x80000000
+
+#define BMI_PORT_CFG_EN 0x80000000
+
+#define BMI_PORT_STATUS_BSY 0x80000000
+
+#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
+#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
+
+#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
+#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
+
+#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
+#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f
+
+#define BMI_RX_FRAME_END_CUT_SHIFT 16
+#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f
+
+#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
+#define BMI_IC_TO_EXT_MASK 0x0000001f
+#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
+#define BMI_IC_FROM_INT_MASK 0x0000000f
+#define BMI_IC_SIZE_MASK 0x0000001f
+
+#define BMI_INT_BUF_MARG_SHIFT 28
+#define BMI_INT_BUF_MARG_MASK 0x0000000f
+#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
+#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff
+#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff
+
+#define BMI_CMD_MR_LEAC 0x00200000
+#define BMI_CMD_MR_SLEAC 0x00100000
+#define BMI_CMD_MR_MA 0x00080000
+#define BMI_CMD_MR_DEAS 0x00040000
+#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
+ BMI_CMD_MR_SLEAC | \
+ BMI_CMD_MR_MA | \
+ BMI_CMD_MR_DEAS)
+#define BMI_CMD_TX_MR_DEF 0
+
+#define BMI_CMD_ATTR_ORDER 0x80000000
+#define BMI_CMD_ATTR_SYNC 0x02000000
+#define BMI_CMD_ATTR_COLOR_SHIFT 26
+
+#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
+#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f
+#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
+
+#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
+#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
+#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
+#define BMI_EXT_BUF_POOL_ID_SHIFT 16
+#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
+#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
+
+#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
+
+#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+
+#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
+ ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
+
+#define RX_ERRS_TO_ENQ \
+ (FM_PORT_FRM_ERR_DMA | \
+ FM_PORT_FRM_ERR_PHYSICAL | \
+ FM_PORT_FRM_ERR_SIZE | \
+ FM_PORT_FRM_ERR_EXTRACTION | \
+ FM_PORT_FRM_ERR_NO_SCHEME | \
+ FM_PORT_FRM_ERR_PRS_TIMEOUT | \
+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
+ FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
+ FM_PORT_FRM_ERR_PRS_HDR_ERR | \
+ FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
+ FM_PORT_FRM_ERR_IPRE)
+
+/* NIA defines */
+#define NIA_ORDER_RESTOR 0x00800000
+#define NIA_ENG_BMI 0x00500000
+#define NIA_ENG_QMI_ENQ 0x00540000
+#define NIA_ENG_QMI_DEQ 0x00580000
+#define NIA_ENG_HWP 0x00440000
+#define NIA_ENG_HWK 0x00480000
+#define NIA_BMI_AC_ENQ_FRAME 0x00000002
+#define NIA_BMI_AC_TX_RELEASE 0x000002C0
+#define NIA_BMI_AC_RELEASE 0x000000C0
+#define NIA_BMI_AC_TX 0x00000274
+#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
+
+/* Port IDs */
+#define TX_10G_PORT_BASE 0x30
+#define RX_10G_PORT_BASE 0x10
+
+/* BMI Rx port register map */
+struct fman_port_rx_bmi_regs {
+ u32 fmbm_rcfg; /* Rx Configuration */
+ u32 fmbm_rst; /* Rx Status */
+ u32 fmbm_rda; /* Rx DMA attributes */
+ u32 fmbm_rfp; /* Rx FIFO Parameters */
+ u32 fmbm_rfed; /* Rx Frame End Data */
+ u32 fmbm_ricp; /* Rx Internal Context Parameters */
+ u32 fmbm_rim; /* Rx Internal Buffer Margins */
+ u32 fmbm_rebm; /* Rx External Buffer Margins */
+ u32 fmbm_rfne; /* Rx Frame Next Engine */
+ u32 fmbm_rfca; /* Rx Frame Command Attributes. */
+ u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */
+ u32 fmbm_rpso; /* Rx Parse Start Offset */
+ u32 fmbm_rpp; /* Rx Policer Profile */
+ u32 fmbm_rccb; /* Rx Coarse Classification Base */
+ u32 fmbm_reth; /* Rx Excessive Threshold */
+ u32 reserved003c[1]; /* (0x03C 0x03F) */
+ u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
+ /* Rx Parse Results Array Init */
+ u32 fmbm_rfqid; /* Rx Frame Queue ID */
+ u32 fmbm_refqid; /* Rx Error Frame Queue ID */
+ u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */
+ u32 fmbm_rfsem; /* Rx Frame Status Error Mask */
+ u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */
+ u32 reserved0074[0x2]; /* (0x074-0x07C) */
+ u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */
+ u32 reserved0080[0x20]; /* (0x080 0x0FF) */
+ u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /* Buffer Manager pool Information- */
+ u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */
+ u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */
+ u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */
+ u32 fmbm_mpd; /* BM Pool Depletion */
+ u32 reserved0184[0x1F]; /* (0x184 0x1FF) */
+ u32 fmbm_rstc; /* Rx Statistics Counters */
+ u32 fmbm_rfrc; /* Rx Frame Counter */
+ u32 fmbm_rfbc; /* Rx Bad Frames Counter */
+ u32 fmbm_rlfc; /* Rx Large Frames Counter */
+ u32 fmbm_rffc; /* Rx Filter Frames Counter */
+ u32 fmbm_rfdc; /* Rx Frame Discard Counter */
+ u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */
+ u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */
+ u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */
+ u32 fmbm_rpec; /* RX Prepare to enqueue Counte */
+ u32 reserved0224[0x16]; /* (0x224 0x27F) */
+ u32 fmbm_rpc; /* Rx Performance Counters */
+ u32 fmbm_rpcp; /* Rx Performance Count Parameters */
+ u32 fmbm_rccn; /* Rx Cycle Counter */
+ u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */
+ u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */
+ u32 fmbm_rduc; /* Rx DMA Utilization Counter */
+ u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */
+ u32 fmbm_rpac; /* Rx Pause Activation Counter */
+ u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */
+ u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */
+ u32 fmbm_rgpr; /* Rx General Purpose Register */
+ u32 reserved0310[0x3a];
+};
+
+/* BMI Tx port register map */
+struct fman_port_tx_bmi_regs {
+ u32 fmbm_tcfg; /* Tx Configuration */
+ u32 fmbm_tst; /* Tx Status */
+ u32 fmbm_tda; /* Tx DMA attributes */
+ u32 fmbm_tfp; /* Tx FIFO Parameters */
+ u32 fmbm_tfed; /* Tx Frame End Data */
+ u32 fmbm_ticp; /* Tx Internal Context Parameters */
+ u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */
+ u32 fmbm_tfca; /* Tx Frame Command attribute. */
+ u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */
+ u32 fmbm_tefqid; /* Tx Frame Error Queue ID */
+ u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */
+ u32 fmbm_trlmts; /* Tx Rate Limiter Scale */
+ u32 fmbm_trlmt; /* Tx Rate Limiter */
+ u32 reserved0034[0x0e]; /* (0x034-0x6c) */
+ u32 fmbm_tccb; /* Tx Coarse Classification base */
+ u32 fmbm_tfne; /* Tx Frame Next Engine */
+ u32 fmbm_tpfcm[0x02];
+ /* Tx Priority based Flow Control (PFC) Mapping */
+ u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */
+ u32 reserved0080[0x60]; /* (0x080-0x200) */
+ u32 fmbm_tstc; /* Tx Statistics Counters */
+ u32 fmbm_tfrc; /* Tx Frame Counter */
+ u32 fmbm_tfdc; /* Tx Frames Discard Counter */
+ u32 fmbm_tfledc; /* Tx Frame len error discard cntr */
+ u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */
+ u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */
+ u32 reserved0218[0x1A]; /* (0x218-0x280) */
+ u32 fmbm_tpc; /* Tx Performance Counters */
+ u32 fmbm_tpcp; /* Tx Performance Count Parameters */
+ u32 fmbm_tccn; /* Tx Cycle Counter */
+ u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */
+ u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */
+ u32 fmbm_tduc; /* Tx DMA Utilization Counter */
+ u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */
+ u32 reserved029c[16]; /* (0x29C-0x2FF) */
+ u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */
+ u32 fmbm_tgpr; /* Tx General Purpose Register */
+ u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
+};
+
+/* BMI port register map */
+union fman_port_bmi_regs {
+ struct fman_port_rx_bmi_regs rx;
+ struct fman_port_tx_bmi_regs tx;
+};
+
+/* QMI port register map */
+struct fman_port_qmi_regs {
+ u32 fmqm_pnc; /* PortID n Configuration Register */
+ u32 fmqm_pns; /* PortID n Status Register */
+ u32 fmqm_pnts; /* PortID n Task Status Register */
+ u32 reserved00c[4]; /* 0xn00C - 0xn01B */
+ u32 fmqm_pnen; /* PortID n Enqueue NIA Register */
+ u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */
+ u32 reserved024[2]; /* 0xn024 - 0x02B */
+ u32 fmqm_pndn; /* PortID n Dequeue NIA Register */
+ u32 fmqm_pndc; /* PortID n Dequeue Config Register */
+ u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */
+ u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */
+ u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
+};
+
+#define HWP_HXS_COUNT 16
+#define HWP_HXS_PHE_REPORT 0x00000800
+#define HWP_HXS_PCAC_PSTAT 0x00000100
+#define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
+struct fman_port_hwp_regs {
+ struct {
+ u32 ssa; /* Soft Sequence Attachment */
+ u32 lcv; /* Line-up Enable Confirmation Mask */
+ } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
+ u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
+ u32 fmpr_pcac; /* Configuration Access Control */
+};
+
+/* QMI dequeue prefetch modes */
+enum fman_port_deq_prefetch {
+ FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
+ FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
+ FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
+};
+
+/* A structure for defining FM port resources */
+struct fman_port_rsrc {
+ u32 num; /* Committed required resource */
+ u32 extra; /* Extra (not committed) required resource */
+};
+
+enum fman_port_dma_swap {
+ FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */
+ FMAN_PORT_DMA_SWAP_LE,
+ /* The transferred data should be swapped in PPC Little Endian mode */
+ FMAN_PORT_DMA_SWAP_BE
+ /* The transferred data should be swapped in Big Endian mode */
+};
+
+/* Default port color */
+enum fman_port_color {
+ FMAN_PORT_COLOR_GREEN, /* Default port color is green */
+ FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
+ FMAN_PORT_COLOR_RED, /* Default port color is red */
+ FMAN_PORT_COLOR_OVERRIDE /* Ignore color */
+};
+
+/* QMI dequeue from the SP channel - types */
+enum fman_port_deq_type {
+ FMAN_PORT_DEQ_BY_PRI,
+ /* Priority precedence and Intra-Class scheduling */
+ FMAN_PORT_DEQ_ACTIVE_FQ,
+ /* Active FQ precedence and Intra-Class scheduling */
+ FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
+ /* Active FQ precedence and override Intra-Class scheduling */
+};
+
+/* External buffer pools configuration */
+struct fman_port_bpools {
+ u8 count; /* Num of pools to set up */
+ bool counters_enable; /* Enable allocate counters */
+ u8 grp_bp_depleted_num;
+ /* Number of depleted pools - if reached the BMI indicates
+ * the MAC to send a pause frame
+ */
+ struct {
+ u8 bpid; /* BM pool ID */
+ u16 size;
+ /* Pool's size - must be in ascending order */
+ bool is_backup;
+ /* If this is a backup pool */
+ bool grp_bp_depleted;
+ /* Consider this buffer in multiple pools depletion criteria */
+ bool single_bp_depleted;
+ /* Consider this buffer in single pool depletion criteria */
+ } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+};
+
+struct fman_port_cfg {
+ u32 dflt_fqid;
+ u32 err_fqid;
+ u32 pcd_base_fqid;
+ u32 pcd_fqs_count;
+ u8 deq_sp;
+ bool deq_high_priority;
+ enum fman_port_deq_type deq_type;
+ enum fman_port_deq_prefetch deq_prefetch_option;
+ u16 deq_byte_cnt;
+ u8 cheksum_last_bytes_ignore;
+ u8 rx_cut_end_bytes;
+ struct fman_buf_pool_depletion buf_pool_depletion;
+ struct fman_ext_pools ext_buf_pools;
+ u32 tx_fifo_min_level;
+ u32 tx_fifo_low_comf_level;
+ u32 rx_pri_elevation;
+ u32 rx_fifo_thr;
+ struct fman_sp_buf_margins buf_margins;
+ u32 int_buf_start_margin;
+ struct fman_sp_int_context_data_copy int_context;
+ u32 discard_mask;
+ u32 err_mask;
+ struct fman_buffer_prefix_content buffer_prefix_content;
+ bool dont_release_buf;
+
+ u8 rx_fd_bits;
+ u32 tx_fifo_deq_pipeline_depth;
+ bool errata_A006320;
+ bool excessive_threshold_register;
+ bool fmbm_tfne_has_features;
+
+ enum fman_port_dma_swap dma_swap_data;
+ enum fman_port_color color;
+};
+
+struct fman_port_rx_pools_params {
+ u8 num_of_pools;
+ u16 largest_buf_size;
+};
+
+struct fman_port_dts_params {
+ void __iomem *base_addr; /* FMan port virtual memory */
+ enum fman_port_type type; /* Port type */
+ u16 speed; /* Port speed */
+ u8 id; /* HW Port Id */
+ u32 qman_channel_id; /* QMan channel id (non RX only) */
+ struct fman *fman; /* FMan Handle */
+};
+
+struct fman_port {
+ void *fm;
+ struct device *dev;
+ struct fman_rev_info rev_info;
+ u8 port_id;
+ enum fman_port_type port_type;
+ u16 port_speed;
+
+ union fman_port_bmi_regs __iomem *bmi_regs;
+ struct fman_port_qmi_regs __iomem *qmi_regs;
+ struct fman_port_hwp_regs __iomem *hwp_regs;
+
+ struct fman_sp_buffer_offsets buffer_offsets;
+
+ u8 internal_buf_offset;
+ struct fman_ext_pools ext_buf_pools;
+
+ u16 max_frame_length;
+ struct fman_port_rsrc open_dmas;
+ struct fman_port_rsrc tasks;
+ struct fman_port_rsrc fifo_bufs;
+ struct fman_port_rx_pools_params rx_pools_params;
+
+ struct fman_port_cfg *cfg;
+ struct fman_port_dts_params dts_params;
+
+ u8 ext_pools_num;
+ u32 max_port_fifo_size;
+ u32 max_num_of_ext_pools;
+ u32 max_num_of_sub_portals;
+ u32 bm_max_num_of_pools;
+};
+
+static int init_bmi_rx(struct fman_port *port)
+{
+ struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* DMA attributes */
+ tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+ /* Enable write optimization */
+ tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
+ iowrite32be(tmp, &regs->fmbm_rda);
+
+ /* Rx FIFO parameters */
+ tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
+ BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
+ tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
+ iowrite32be(tmp, &regs->fmbm_rfp);
+
+ if (cfg->excessive_threshold_register)
+ /* always allow access to the extra resources */
+ iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
+
+ /* Frame end data */
+ tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+ BMI_FRAME_END_CS_IGNORE_SHIFT;
+ tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
+ BMI_RX_FRAME_END_CUT_SHIFT;
+ if (cfg->errata_A006320)
+ tmp &= 0xffe0ffff;
+ iowrite32be(tmp, &regs->fmbm_rfed);
+
+ /* Internal context parameters */
+ tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+ tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+ tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_SIZE_MASK;
+ iowrite32be(tmp, &regs->fmbm_ricp);
+
+ /* Internal buffer offset */
+ tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
+ BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_rim);
+
+ /* External buffer margins */
+ tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
+ BMI_EXT_BUF_MARG_START_SHIFT;
+ tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
+ iowrite32be(tmp, &regs->fmbm_rebm);
+
+ /* Frame attributes */
+ tmp = BMI_CMD_RX_MR_DEF;
+ tmp |= BMI_CMD_ATTR_ORDER;
+ tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+ /* Synchronization request */
+ tmp |= BMI_CMD_ATTR_SYNC;
+
+ iowrite32be(tmp, &regs->fmbm_rfca);
+
+ /* NIA */
+ tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
+
+ tmp |= NIA_ENG_HWP;
+ iowrite32be(tmp, &regs->fmbm_rfne);
+
+ /* Parser Next Engine NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
+
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
+
+ /* Default/error queues */
+ iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), &regs->fmbm_rfqid);
+ iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_refqid);
+
+ /* Discard/error masks */
+ iowrite32be(cfg->discard_mask, &regs->fmbm_rfsdm);
+ iowrite32be(cfg->err_mask, &regs->fmbm_rfsem);
+
+ return 0;
+}
+
+static int init_bmi_tx(struct fman_port *port)
+{
+ struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Tx Configuration register */
+ tmp = 0;
+ iowrite32be(tmp, &regs->fmbm_tcfg);
+
+ /* DMA attributes */
+ tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tda);
+
+ /* Tx FIFO parameters */
+ tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
+ BMI_TX_FIFO_MIN_FILL_SHIFT;
+ tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
+ BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
+ tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
+ iowrite32be(tmp, &regs->fmbm_tfp);
+
+ /* Frame end data */
+ tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+ BMI_FRAME_END_CS_IGNORE_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tfed);
+
+ /* Internal context parameters */
+ tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+ tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+ tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+ BMI_IC_SIZE_MASK;
+ iowrite32be(tmp, &regs->fmbm_ticp);
+
+ /* Frame attributes */
+ tmp = BMI_CMD_TX_MR_DEF;
+ tmp |= BMI_CMD_ATTR_ORDER;
+ tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+ iowrite32be(tmp, &regs->fmbm_tfca);
+
+ /* Dequeue NIA + enqueue NIA */
+ iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
+ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
+ if (cfg->fmbm_tfne_has_features)
+ iowrite32be(!cfg->dflt_fqid ?
+ BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
+ NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
+ if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+ iowrite32be(DFLT_FQ_ID, &regs->fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &regs->fmbm_tfene);
+ if (cfg->fmbm_tfne_has_features)
+ iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN,
+ &regs->fmbm_tfne);
+ }
+
+ /* Confirmation/error queues */
+ if (cfg->dflt_fqid || !cfg->dont_release_buf)
+ iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, &regs->fmbm_tcfqid);
+ iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_tefqid);
+
+ return 0;
+}
+
+static int init_qmi(struct fman_port *port)
+{
+ struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp;
+
+ /* Rx port configuration */
+ if (port->port_type == FMAN_PORT_TYPE_RX) {
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
+ return 0;
+ }
+
+ /* Continue with Tx port configuration */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ /* Enqueue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &regs->fmqm_pnen);
+ /* Dequeue NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
+ }
+
+ /* Dequeue Configuration register */
+ tmp = 0;
+ if (cfg->deq_high_priority)
+ tmp |= QMI_DEQ_CFG_PRI;
+
+ switch (cfg->deq_type) {
+ case FMAN_PORT_DEQ_BY_PRI:
+ tmp |= QMI_DEQ_CFG_TYPE1;
+ break;
+ case FMAN_PORT_DEQ_ACTIVE_FQ:
+ tmp |= QMI_DEQ_CFG_TYPE2;
+ break;
+ case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
+ tmp |= QMI_DEQ_CFG_TYPE3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (cfg->deq_prefetch_option) {
+ case FMAN_PORT_DEQ_NO_PREFETCH:
+ break;
+ case FMAN_PORT_DEQ_PART_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
+ break;
+ case FMAN_PORT_DEQ_FULL_PREFETCH:
+ tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
+ tmp |= cfg->deq_byte_cnt;
+ iowrite32be(tmp, &regs->fmqm_pndc);
+
+ return 0;
+}
+
+static void stop_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout stopping HW Parser\n");
+}
+
+static void start_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(0, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout starting HW Parser\n");
+}
+
+static void init_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int i;
+
+ stop_port_hwp(port);
+
+ for (i = 0; i < HWP_HXS_COUNT; i++) {
+ /* enable HXS error reporting into FD[STATUS] PHE */
+ iowrite32be(0x00000000, &regs->pmda[i].ssa);
+ iowrite32be(0xffffffff, &regs->pmda[i].lcv);
+ }
+
+ /* Short packet padding removal from checksum calculation */
+ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
+ start_port_hwp(port);
+}
+
+static int init(struct fman_port *port)
+{
+ int err;
+
+ /* Init BMI registers */
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ err = init_bmi_rx(port);
+ if (!err)
+ init_hwp(port);
+ break;
+ case FMAN_PORT_TYPE_TX:
+ err = init_bmi_tx(port);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ /* Init QMI registers */
+ err = init_qmi(port);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int set_bpools(const struct fman_port *port,
+ const struct fman_port_bpools *bp)
+{
+ u32 __iomem *bp_reg, *bp_depl_reg;
+ u32 tmp;
+ u8 i, max_bp_num;
+ bool grp_depl_used = false, rx_port;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ max_bp_num = port->ext_pools_num;
+ rx_port = true;
+ bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
+ bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rx_port) {
+ /* Check buffers are provided in ascending order */
+ for (i = 0; (i < (bp->count - 1) &&
+ (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
+ if (bp->bpool[i].size > bp->bpool[i + 1].size)
+ return -EINVAL;
+ }
+ }
+
+ /* Set up external buffers pools */
+ for (i = 0; i < bp->count; i++) {
+ tmp = BMI_EXT_BUF_POOL_VALID;
+ tmp |= ((u32)bp->bpool[i].bpid <<
+ BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
+
+ if (rx_port) {
+ if (bp->counters_enable)
+ tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
+
+ if (bp->bpool[i].is_backup)
+ tmp |= BMI_EXT_BUF_POOL_BACKUP;
+
+ tmp |= (u32)bp->bpool[i].size;
+ }
+
+ iowrite32be(tmp, &bp_reg[i]);
+ }
+
+ /* Clear unused pools */
+ for (i = bp->count; i < max_bp_num; i++)
+ iowrite32be(0, &bp_reg[i]);
+
+ /* Pools depletion */
+ tmp = 0;
+ for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
+ if (bp->bpool[i].grp_bp_depleted) {
+ grp_depl_used = true;
+ tmp |= 0x80000000 >> i;
+ }
+
+ if (bp->bpool[i].single_bp_depleted)
+ tmp |= 0x80 >> i;
+ }
+
+ if (grp_depl_used)
+ tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
+ BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
+
+ iowrite32be(tmp, bp_depl_reg);
+ return 0;
+}
+
+static bool is_init_done(struct fman_port_cfg *cfg)
+{
+ /* Checks if FMan port driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static int verify_size_of_fifo(struct fman_port *port)
+{
+ u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
+
+ /* TX Ports */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ min_fifo_size_required = (u32)
+ (roundup(port->max_frame_length,
+ FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
+
+ min_fifo_size_required +=
+ port->cfg->tx_fifo_deq_pipeline_depth *
+ FMAN_BMI_FIFO_UNITS;
+
+ opt_fifo_size_for_b2b = min_fifo_size_required;
+
+ /* Add some margin for back-to-back capability to improve
+ * performance, allows the hardware to pipeline new frame dma
+ * while the previous frame not yet transmitted.
+ */
+ if (port->port_speed == 10000)
+ opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+ else
+ opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
+ }
+
+ /* RX Ports */
+ else if (port->port_type == FMAN_PORT_TYPE_RX) {
+ if (port->rev_info.major >= 6)
+ min_fifo_size_required = (u32)
+ (roundup(port->max_frame_length,
+ FMAN_BMI_FIFO_UNITS) +
+ (5 * FMAN_BMI_FIFO_UNITS));
+ /* 4 according to spec + 1 for FOF>0 */
+ else
+ min_fifo_size_required = (u32)
+ (roundup(min(port->max_frame_length,
+ port->rx_pools_params.largest_buf_size),
+ FMAN_BMI_FIFO_UNITS) +
+ (7 * FMAN_BMI_FIFO_UNITS));
+
+ opt_fifo_size_for_b2b = min_fifo_size_required;
+
+ /* Add some margin for back-to-back capability to improve
+ * performance,allows the hardware to pipeline new frame dma
+ * while the previous frame not yet transmitted.
+ */
+ if (port->port_speed == 10000)
+ opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
+ else
+ opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+ }
+
+ WARN_ON(min_fifo_size_required <= 0);
+ WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
+
+ /* Verify the size */
+ if (port->fifo_bufs.num < min_fifo_size_required)
+ dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
+ __func__, min_fifo_size_required);
+ else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
+ dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
+ __func__, opt_fifo_size_for_b2b);
+
+ return 0;
+}
+
+static int set_ext_buffer_pools(struct fman_port *port)
+{
+ struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
+ struct fman_buf_pool_depletion *buf_pool_depletion =
+ &port->cfg->buf_pool_depletion;
+ u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ u16 sizes_array[BM_MAX_NUM_OF_POOLS];
+ int i = 0, j = 0, err;
+ struct fman_port_bpools bpools;
+
+ memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
+ memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
+ memcpy(&port->ext_buf_pools, ext_buf_pools,
+ sizeof(struct fman_ext_pools));
+
+ fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
+ ordered_array,
+ sizes_array);
+
+ memset(&bpools, 0, sizeof(struct fman_port_bpools));
+ bpools.count = ext_buf_pools->num_of_pools_used;
+ bpools.counters_enable = true;
+ for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
+ bpools.bpool[i].bpid = ordered_array[i];
+ bpools.bpool[i].size = sizes_array[ordered_array[i]];
+ }
+
+ /* save pools parameters for later use */
+ port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
+ port->rx_pools_params.largest_buf_size =
+ sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
+
+ /* FMBM_RMPD reg. - pool depletion */
+ if (buf_pool_depletion->pools_grp_mode_enable) {
+ bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
+ for (i = 0; i < port->bm_max_num_of_pools; i++) {
+ if (buf_pool_depletion->pools_to_consider[i]) {
+ for (j = 0; j < ext_buf_pools->
+ num_of_pools_used; j++) {
+ if (i == ordered_array[j]) {
+ bpools.bpool[j].
+ grp_bp_depleted = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (buf_pool_depletion->single_pool_mode_enable) {
+ for (i = 0; i < port->bm_max_num_of_pools; i++) {
+ if (buf_pool_depletion->
+ pools_to_consider_for_single_mode[i]) {
+ for (j = 0; j < ext_buf_pools->
+ num_of_pools_used; j++) {
+ if (i == ordered_array[j]) {
+ bpools.bpool[j].
+ single_bp_depleted = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ err = set_bpools(port, &bpools);
+ if (err != 0) {
+ dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int init_low_level_driver(struct fman_port *port)
+{
+ struct fman_port_cfg *cfg = port->cfg;
+ u32 tmp_val;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
+ break;
+ default:
+ break;
+ }
+
+ tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
+ (port->internal_buf_offset / OFFSET_UNITS + 1) :
+ (port->internal_buf_offset / OFFSET_UNITS));
+ port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
+ port->cfg->int_buf_start_margin = port->internal_buf_offset;
+
+ if (init(port) != 0) {
+ dev_err(port->dev, "%s: fman port initialization failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* The code bellow is a trick so the FM will not release the buffer
+ * to BM nor will try to enqueue the frame to QM
+ */
+ if (port->port_type == FMAN_PORT_TYPE_TX) {
+ if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+ /* override fmbm_tcfqid 0 with a false non-0 value.
+ * This will force FM to act according to tfene.
+ * Otherwise, if fmbm_tcfqid is 0 the FM will release
+ * buffers to BM regardless of fmbm_tfene
+ */
+ iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+ &port->bmi_regs->tx.fmbm_tfene);
+ }
+ }
+
+ return 0;
+}
+
+static int fill_soc_specific_params(struct fman_port *port)
+{
+ u32 bmi_max_fifo_size;
+
+ bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
+ port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
+ port->bm_max_num_of_pools = 64;
+
+ /* P4080 - Major 2
+ * P2041/P3041/P5020/P5040 - Major 3
+ * Tx/Bx - Major 6
+ */
+ switch (port->rev_info.major) {
+ case 2:
+ case 3:
+ port->max_num_of_ext_pools = 4;
+ port->max_num_of_sub_portals = 12;
+ break;
+
+ case 6:
+ port->max_num_of_ext_pools = 8;
+ port->max_num_of_sub_portals = 16;
+ break;
+
+ default:
+ dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ switch (speed) {
+ case 10000:
+ return 4;
+ case 1000:
+ if (major >= 6)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ switch (speed) {
+ case 10000:
+ return 16;
+ case 1000:
+ if (major >= 6)
+ return 4;
+ else
+ return 3;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ /* FMan V3 */
+ if (major >= 6)
+ return 0;
+
+ /* FMan V2 */
+ if (speed == 10000)
+ return 8;
+ else
+ return 2;
+ case FMAN_PORT_TYPE_TX:
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ int val;
+
+ if (major >= 6) {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 12;
+ else
+ val = 3;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 8;
+ else
+ val = 2;
+ break;
+ default:
+ return 0;
+ }
+ } else {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 8;
+ else
+ val = 1;
+ break;
+ default:
+ val = 0;
+ }
+ }
+
+ return val;
+}
+
+static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ /* FMan V3 */
+ if (major >= 6)
+ return 0;
+
+ /* FMan V2 */
+ switch (type) {
+ case FMAN_PORT_TYPE_RX:
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ return 8;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
+ u16 speed)
+{
+ int val;
+
+ if (major >= 6) {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 64;
+ else
+ val = 50;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 96;
+ else
+ val = 50;
+ break;
+ default:
+ val = 0;
+ }
+ } else {
+ switch (type) {
+ case FMAN_PORT_TYPE_TX:
+ if (speed == 10000)
+ val = 48;
+ else
+ val = 44;
+ break;
+ case FMAN_PORT_TYPE_RX:
+ if (speed == 10000)
+ val = 48;
+ else
+ val = 45;
+ break;
+ default:
+ val = 0;
+ }
+ }
+
+ return val;
+}
+
+static void set_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params)
+{
+ struct fman_port_cfg *cfg = port->cfg;
+
+ cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
+ cfg->color = FMAN_PORT_COLOR_GREEN;
+ cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
+ cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
+ cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
+ cfg->tx_fifo_low_comf_level = (5 * 1024);
+ cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
+ cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
+ cfg->tx_fifo_deq_pipeline_depth =
+ BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
+ cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
+
+ cfg->rx_pri_elevation =
+ DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
+ port->cfg->rx_fifo_thr =
+ DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
+ port->max_port_fifo_size);
+
+ if ((port->rev_info.major == 6) &&
+ ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
+ cfg->errata_A006320 = true;
+
+ /* Excessive Threshold register - exists for pre-FMv3 chips only */
+ if (port->rev_info.major < 6)
+ cfg->excessive_threshold_register = true;
+ else
+ cfg->fmbm_tfne_has_features = true;
+
+ cfg->buffer_prefix_content.data_align =
+ DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+}
+
+static void set_rx_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params)
+{
+ port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
+
+ memcpy(&port->cfg->ext_buf_pools,
+ &port_params->specific_params.rx_params.ext_buf_pools,
+ sizeof(struct fman_ext_pools));
+ port->cfg->err_fqid =
+ port_params->specific_params.rx_params.err_fqid;
+ port->cfg->dflt_fqid =
+ port_params->specific_params.rx_params.dflt_fqid;
+ port->cfg->pcd_base_fqid =
+ port_params->specific_params.rx_params.pcd_base_fqid;
+ port->cfg->pcd_fqs_count =
+ port_params->specific_params.rx_params.pcd_fqs_count;
+}
+
+static void set_tx_dflt_cfg(struct fman_port *port,
+ struct fman_port_params *port_params,
+ struct fman_port_dts_params *dts_params)
+{
+ port->cfg->tx_fifo_deq_pipeline_depth =
+ get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
+ port->port_type,
+ port->port_speed);
+ port->cfg->err_fqid =
+ port_params->specific_params.non_rx_params.err_fqid;
+ port->cfg->deq_sp =
+ (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
+ port->cfg->dflt_fqid =
+ port_params->specific_params.non_rx_params.dflt_fqid;
+ port->cfg->deq_high_priority = true;
+}
+
+/**
+ * fman_port_config
+ * @port: Pointer to the port structure
+ * @params: Pointer to data structure of parameters
+ *
+ * Creates a descriptor for the FM PORT module.
+ * The routine returns a pointer to the FM PORT object.
+ * This descriptor must be passed as first parameter to all other FM PORT
+ * function calls.
+ * No actual initialization or configuration of FM hardware is done by this
+ * routine.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_config(struct fman_port *port, struct fman_port_params *params)
+{
+ void __iomem *base_addr = port->dts_params.base_addr;
+ int err;
+
+ /* Allocate the FM driver's parameters structure */
+ port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
+ if (!port->cfg)
+ return -EINVAL;
+
+ /* Initialize FM port parameters which will be kept by the driver */
+ port->port_type = port->dts_params.type;
+ port->port_speed = port->dts_params.speed;
+ port->port_id = port->dts_params.id;
+ port->fm = port->dts_params.fman;
+ port->ext_pools_num = (u8)8;
+
+ /* get FM revision */
+ fman_get_revision(port->fm, &port->rev_info);
+
+ err = fill_soc_specific_params(port);
+ if (err)
+ goto err_port_cfg;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ set_rx_dflt_cfg(port, params);
+ fallthrough;
+ case FMAN_PORT_TYPE_TX:
+ set_tx_dflt_cfg(port, params, &port->dts_params);
+ fallthrough;
+ default:
+ set_dflt_cfg(port, params);
+ }
+
+ /* Continue with other parameters */
+ /* set memory map pointers */
+ port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
+ port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
+ port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
+
+ port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
+ /* resource distribution. */
+
+ port->fifo_bufs.num =
+ get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
+ port->port_speed) * FMAN_BMI_FIFO_UNITS;
+ port->fifo_bufs.extra =
+ DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
+
+ port->open_dmas.num =
+ get_dflt_num_of_open_dmas(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->open_dmas.extra =
+ get_dflt_extra_num_of_open_dmas(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->tasks.num =
+ get_dflt_num_of_tasks(port->rev_info.major,
+ port->port_type, port->port_speed);
+ port->tasks.extra =
+ get_dflt_extra_num_of_tasks(port->rev_info.major,
+ port->port_type, port->port_speed);
+
+ /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
+ * workaround
+ */
+ if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
+ (((port->port_type == FMAN_PORT_TYPE_TX) &&
+ (port->port_speed == 1000)))) {
+ port->open_dmas.num = 16;
+ port->open_dmas.extra = 0;
+ }
+
+ if (port->rev_info.major >= 6 &&
+ port->port_type == FMAN_PORT_TYPE_TX &&
+ port->port_speed == 1000) {
+ /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
+ * workaround
+ */
+ u32 reg;
+
+ reg = 0x00001013;
+ iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
+ }
+
+ return 0;
+
+err_port_cfg:
+ kfree(port->cfg);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fman_port_config);
+
+/*
+ * fman_port_use_kg_hash
+ * @port: A pointer to a FM Port module.
+ * @enable: enable or disable
+ *
+ * Sets the HW KeyGen or the BMI as HW Parser next engine, enabling
+ * or bypassing the KeyGen hashing of Rx traffic
+ */
+void fman_port_use_kg_hash(struct fman_port *port, bool enable)
+{
+ if (enable)
+ /* After the Parser frames go to KeyGen */
+ iowrite32be(NIA_ENG_HWK, &port->bmi_regs->rx.fmbm_rfpne);
+ else
+ /* After the Parser frames go to BMI */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME,
+ &port->bmi_regs->rx.fmbm_rfpne);
+}
+EXPORT_SYMBOL(fman_port_use_kg_hash);
+
+/**
+ * fman_port_init
+ * @port: A pointer to a FM Port module.
+ *
+ * Initializes the FM PORT module by defining the software structure and
+ * configuring the hardware registers.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_init(struct fman_port *port)
+{
+ struct fman_port_init_params params;
+ struct fman_keygen *keygen;
+ struct fman_port_cfg *cfg;
+ int err;
+
+ if (is_init_done(port->cfg))
+ return -EINVAL;
+
+ err = fman_sp_build_buffer_struct(&port->cfg->int_context,
+ &port->cfg->buffer_prefix_content,
+ &port->cfg->buf_margins,
+ &port->buffer_offsets,
+ &port->internal_buf_offset);
+ if (err)
+ return err;
+
+ cfg = port->cfg;
+
+ if (port->port_type == FMAN_PORT_TYPE_RX) {
+ /* Call the external Buffer routine which also checks fifo
+ * size and updates it if necessary
+ */
+ /* define external buffer pools and pool depletion */
+ err = set_ext_buffer_pools(port);
+ if (err)
+ return err;
+ /* check if the largest external buffer pool is large enough */
+ if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
+ cfg->buf_margins.end_margins >
+ port->rx_pools_params.largest_buf_size) {
+ dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
+ __func__, cfg->buf_margins.start_margins,
+ cfg->buf_margins.end_margins,
+ port->rx_pools_params.largest_buf_size);
+ return -EINVAL;
+ }
+ }
+
+ /* Call FM module routine for communicating parameters */
+ memset(&params, 0, sizeof(params));
+ params.port_id = port->port_id;
+ params.port_type = port->port_type;
+ params.port_speed = port->port_speed;
+ params.num_of_tasks = (u8)port->tasks.num;
+ params.num_of_extra_tasks = (u8)port->tasks.extra;
+ params.num_of_open_dmas = (u8)port->open_dmas.num;
+ params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
+
+ if (port->fifo_bufs.num) {
+ err = verify_size_of_fifo(port);
+ if (err)
+ return err;
+ }
+ params.size_of_fifo = port->fifo_bufs.num;
+ params.extra_size_of_fifo = port->fifo_bufs.extra;
+ params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
+ params.max_frame_length = port->max_frame_length;
+
+ err = fman_set_port_params(port->fm, &params);
+ if (err)
+ return err;
+
+ err = init_low_level_driver(port);
+ if (err)
+ return err;
+
+ if (port->cfg->pcd_fqs_count) {
+ keygen = port->dts_params.fman->keygen;
+ err = keygen_port_hashing_init(keygen, port->port_id,
+ port->cfg->pcd_base_fqid,
+ port->cfg->pcd_fqs_count);
+ if (err)
+ return err;
+
+ fman_port_use_kg_hash(port, true);
+ }
+
+ kfree(port->cfg);
+ port->cfg = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_init);
+
+/**
+ * fman_port_cfg_buf_prefix_content
+ * @port: A pointer to a FM Port module.
+ * @buffer_prefix_content: A structure of parameters describing
+ * the structure of the buffer.
+ * Out parameter:
+ * Start margin - offset of data from
+ * start of external buffer.
+ * Defines the structure, size and content of the application buffer.
+ * The prefix, in Tx ports, if 'pass_prs_result', the application should set
+ * a value to their offsets in the prefix of the FM will save the first
+ * 'priv_data_size', than, depending on 'pass_prs_result' and
+ * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
+ * (in this order), to the application buffer, and to offset.
+ * Calling this routine changes the buffer margins definitions in the internal
+ * driver data base from its default configuration:
+ * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
+ * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
+ * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
+ * May be used for all ports
+ *
+ * Allowed only following fman_port_config() and before fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+ struct fman_buffer_prefix_content *
+ buffer_prefix_content)
+{
+ if (is_init_done(port->cfg))
+ return -EINVAL;
+
+ memcpy(&port->cfg->buffer_prefix_content,
+ buffer_prefix_content,
+ sizeof(struct fman_buffer_prefix_content));
+ /* if data_align was not initialized by user,
+ * we return to driver's default
+ */
+ if (!port->cfg->buffer_prefix_content.data_align)
+ port->cfg->buffer_prefix_content.data_align =
+ DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
+
+/**
+ * fman_port_disable
+ * @port: A pointer to a FM Port module.
+ *
+ * Gracefully disable an FM port. The port will not start new tasks after all
+ * tasks associated with the port are terminated.
+ *
+ * This is a blocking routine, it returns after port is gracefully stopped,
+ * i.e. the port will not except new frames, but it will finish all frames
+ * or tasks which were already began.
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_disable(struct fman_port *port)
+{
+ u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
+ u32 tmp;
+ bool rx_port, failure = false;
+ int count;
+
+ if (!is_init_done(port->cfg))
+ return -EINVAL;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+ bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
+ rx_port = true;
+ break;
+ case FMAN_PORT_TYPE_TX:
+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+ bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
+ rx_port = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Disable QMI */
+ if (!rx_port) {
+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+
+ /* Wait for QMI to finish FD handling */
+ count = 100;
+ do {
+ udelay(10);
+ tmp = ioread32be(&port->qmi_regs->fmqm_pns);
+ } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
+
+ if (count == 0) {
+ /* Timeout */
+ failure = true;
+ }
+ }
+
+ /* Disable BMI */
+ tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
+ iowrite32be(tmp, bmi_cfg_reg);
+
+ /* Wait for graceful stop end */
+ count = 500;
+ do {
+ udelay(10);
+ tmp = ioread32be(bmi_status_reg);
+ } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
+
+ if (count == 0) {
+ /* Timeout */
+ failure = true;
+ }
+
+ if (failure)
+ dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
+ __func__, port->port_id);
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_disable);
+
+/**
+ * fman_port_enable
+ * @port: A pointer to a FM Port module.
+ *
+ * A runtime routine provided to allow disable/enable of port.
+ *
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_enable(struct fman_port *port)
+{
+ u32 __iomem *bmi_cfg_reg;
+ u32 tmp;
+ bool rx_port;
+
+ if (!is_init_done(port->cfg))
+ return -EINVAL;
+
+ switch (port->port_type) {
+ case FMAN_PORT_TYPE_RX:
+ bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+ rx_port = true;
+ break;
+ case FMAN_PORT_TYPE_TX:
+ bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+ rx_port = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Enable QMI */
+ if (!rx_port) {
+ tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
+ iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+ }
+
+ /* Enable BMI */
+ tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
+ iowrite32be(tmp, bmi_cfg_reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_enable);
+
+/**
+ * fman_port_bind
+ * @dev: FMan Port OF device pointer
+ *
+ * Bind to a specific FMan Port.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan port device.
+ */
+struct fman_port *fman_port_bind(struct device *dev)
+{
+ return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
+}
+EXPORT_SYMBOL(fman_port_bind);
+
+/**
+ * fman_port_get_qman_channel_id
+ * @port: Pointer to the FMan port devuce
+ *
+ * Get the QMan channel ID for the specific port
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_port_get_qman_channel_id(struct fman_port *port)
+{
+ return port->dts_params.qman_channel_id;
+}
+EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+
+/**
+ * fman_port_get_device
+ * @port: Pointer to the FMan port device
+ *
+ * Get the 'struct device' associated to the specified FMan port device
+ *
+ * Return: pointer to associated 'struct device'
+ */
+struct device *fman_port_get_device(struct fman_port *port)
+{
+ return port->dev;
+}
+EXPORT_SYMBOL(fman_port_get_device);
+
+int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
+{
+ if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *offset = port->buffer_offsets.hash_result_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
+{
+ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *tstamp = be64_to_cpu(*(__be64 *)(data +
+ port->buffer_offsets.time_stamp_offset));
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_tstamp);
+
+static int fman_port_probe(struct platform_device *of_dev)
+{
+ struct fman_port *port;
+ struct fman *fman;
+ struct device_node *fm_node, *port_node;
+ struct platform_device *fm_pdev;
+ struct resource res;
+ struct resource *dev_res;
+ u32 val;
+ int err = 0, lenp;
+ enum fman_port_type port_type;
+ u16 port_speed;
+ u8 port_id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = &of_dev->dev;
+
+ port_node = of_node_get(of_dev->dev.of_node);
+
+ /* Get the FM node */
+ fm_node = of_get_parent(port_node);
+ if (!fm_node) {
+ dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
+ err = -ENODEV;
+ goto return_err;
+ }
+
+ fm_pdev = of_find_device_by_node(fm_node);
+ of_node_put(fm_node);
+ if (!fm_pdev) {
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ fman = dev_get_drvdata(&fm_pdev->dev);
+ if (!fman) {
+ err = -EINVAL;
+ goto put_device;
+ }
+
+ err = of_property_read_u32(port_node, "cell-index", &val);
+ if (err) {
+ dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
+ __func__, port_node);
+ err = -EINVAL;
+ goto put_device;
+ }
+ port_id = (u8)val;
+ port->dts_params.id = port_id;
+
+ if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
+ port_type = FMAN_PORT_TYPE_TX;
+ port_speed = 1000;
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ port_speed = 10000;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
+ if (port_id >= TX_10G_PORT_BASE)
+ port_speed = 10000;
+ else
+ port_speed = 1000;
+ port_type = FMAN_PORT_TYPE_TX;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
+ port_type = FMAN_PORT_TYPE_RX;
+ port_speed = 1000;
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ port_speed = 10000;
+
+ } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
+ if (port_id >= RX_10G_PORT_BASE)
+ port_speed = 10000;
+ else
+ port_speed = 1000;
+ port_type = FMAN_PORT_TYPE_RX;
+
+ } else {
+ dev_err(port->dev, "%s: Illegal port type\n", __func__);
+ err = -EINVAL;
+ goto put_device;
+ }
+
+ port->dts_params.type = port_type;
+ port->dts_params.speed = port_speed;
+
+ if (port_type == FMAN_PORT_TYPE_TX) {
+ u32 qman_channel_id;
+
+ qman_channel_id = fman_get_qman_channel_id(fman, port_id);
+ if (qman_channel_id == 0) {
+ dev_err(port->dev, "%s: incorrect qman-channel-id\n",
+ __func__);
+ err = -EINVAL;
+ goto put_device;
+ }
+ port->dts_params.qman_channel_id = qman_channel_id;
+ }
+
+ err = of_address_to_resource(port_node, 0, &res);
+ if (err < 0) {
+ dev_err(port->dev, "%s: of_address_to_resource() failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto put_device;
+ }
+
+ port->dts_params.fman = fman;
+
+ of_node_put(port_node);
+
+ dev_res = __devm_request_region(port->dev, &res, res.start,
+ resource_size(&res), "fman-port");
+ if (!dev_res) {
+ dev_err(port->dev, "%s: __devm_request_region() failed\n",
+ __func__);
+ err = -EINVAL;
+ goto free_port;
+ }
+
+ port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
+ resource_size(&res));
+ if (!port->dts_params.base_addr)
+ dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
+
+ dev_set_drvdata(&of_dev->dev, port);
+
+ return 0;
+
+put_device:
+ put_device(&fm_pdev->dev);
+return_err:
+ of_node_put(port_node);
+free_port:
+ kfree(port);
+ return err;
+}
+
+static const struct of_device_id fman_port_match[] = {
+ {.compatible = "fsl,fman-v3-port-rx"},
+ {.compatible = "fsl,fman-v2-port-rx"},
+ {.compatible = "fsl,fman-v3-port-tx"},
+ {.compatible = "fsl,fman-v2-port-tx"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, fman_port_match);
+
+static struct platform_driver fman_port_driver = {
+ .driver = {
+ .name = "fsl-fman-port",
+ .of_match_table = fman_port_match,
+ },
+ .probe = fman_port_probe,
+};
+
+static int __init fman_port_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_port_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_port_load);
+
+static void __exit fman_port_unload(void)
+{
+ platform_driver_unregister(&fman_port_driver);
+}
+module_exit(fman_port_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
new file mode 100644
index 000000000..4917fe8f0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __FMAN_PORT_H
+#define __FMAN_PORT_H
+
+#include "fman.h"
+
+/* FM Port API
+ * The FM uses a general module called "port" to represent a Tx port (MAC),
+ * an Rx port (MAC).
+ * The number of ports in an FM varies between SOCs.
+ * The SW driver manages these ports as sub-modules of the FM,i.e. after an
+ * FM is initialized, its ports may be initialized and operated upon.
+ * The port is initialized aware of its type, but other functions on a port
+ * may be indifferent to its type. When necessary, the driver verifies
+ * coherence and returns error if applicable.
+ * On initialization, user specifies the port type and it's index (relative
+ * to the port's type) - always starting at 0.
+ */
+
+/* FM Frame error */
+/* Frame Descriptor errors */
+/* Not for Rx-Port! Unsupported Format */
+#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT
+/* Not for Rx-Port! Length Error */
+#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH
+/* DMA Data error */
+#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA
+/* non Frame-Manager error; probably come from SEC that was chained to FM */
+#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM
+ /* IPR error */
+#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
+/* IPR non-consistent-sp */
+#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \
+ ~FM_FD_IPR)
+
+/* Rx FIFO overflow, FCS error, code error, running disparity
+ * error (SGMII and TBI modes), FIFO parity error.
+ * PHY Sequence error, PHY error control character detected.
+ */
+#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL
+/* Frame too long OR Frame size exceeds max_length_frame */
+#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE
+/* indicates a classifier "drop" operation */
+#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD
+/* Extract Out of Frame */
+#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION
+/* No Scheme Selected */
+#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME
+/* Keysize Overflow */
+#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW
+/* Frame color is red */
+#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED
+/* Frame color is yellow */
+#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW
+/* Parser Time out Exceed */
+#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT
+/* Invalid Soft Parser instruction */
+#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT
+/* Header error was identified during parsing */
+#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
+/* Frame parsed beyind 256 first bytes */
+#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
+/* FPM Frame Processing Timeout Exceeded */
+#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
+
+struct fman_port;
+
+/* A structure for additional Rx port parameters */
+struct fman_port_rx_params {
+ u32 err_fqid; /* Error Queue Id. */
+ u32 dflt_fqid; /* Default Queue Id. */
+ u32 pcd_base_fqid; /* PCD base Queue Id. */
+ u32 pcd_fqs_count; /* Number of PCD FQs. */
+
+ /* Which external buffer pools are used
+ * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
+ */
+ struct fman_ext_pools ext_buf_pools;
+};
+
+/* A structure for additional non-Rx port parameters */
+struct fman_port_non_rx_params {
+ /* Error Queue Id. */
+ u32 err_fqid;
+ /* For Tx - Default Confirmation queue, 0 means no Tx confirmation
+ * for processed frames. For OP port - default Rx queue.
+ */
+ u32 dflt_fqid;
+};
+
+/* A union for additional parameters depending on port type */
+union fman_port_specific_params {
+ /* Rx port parameters structure */
+ struct fman_port_rx_params rx_params;
+ /* Non-Rx port parameters structure */
+ struct fman_port_non_rx_params non_rx_params;
+};
+
+/* A structure representing FM initialization parameters */
+struct fman_port_params {
+ /* Virtual Address of memory mapped FM Port registers. */
+ void *fm;
+ union fman_port_specific_params specific_params;
+ /* Additional parameters depending on port type. */
+};
+
+int fman_port_config(struct fman_port *port, struct fman_port_params *params);
+
+void fman_port_use_kg_hash(struct fman_port *port, bool enable);
+
+int fman_port_init(struct fman_port *port);
+
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+ struct fman_buffer_prefix_content
+ *buffer_prefix_content);
+
+int fman_port_disable(struct fman_port *port);
+
+int fman_port_enable(struct fman_port *port);
+
+u32 fman_port_get_qman_channel_id(struct fman_port *port);
+
+int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
+
+struct fman_port *fman_port_bind(struct device *dev);
+
+struct device *fman_port_get_device(struct fman_port *port);
+
+#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
new file mode 100644
index 000000000..0fac60aa5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#include "fman_sp.h"
+#include "fman.h"
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+ *fm_ext_pools,
+ u8 *ordered_array,
+ u16 *sizes_array)
+{
+ u16 buf_size = 0;
+ int i = 0, j = 0, k = 0;
+
+ /* First we copy the external buffers pools information
+ * to an ordered local array
+ */
+ for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
+ /* get pool size */
+ buf_size = fm_ext_pools->ext_buf_pool[i].size;
+
+ /* keep sizes in an array according to poolId
+ * for direct access
+ */
+ sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
+
+ /* save poolId in an ordered array according to size */
+ for (j = 0; j <= i; j++) {
+ /* this is the next free place in the array */
+ if (j == i)
+ ordered_array[i] =
+ fm_ext_pools->ext_buf_pool[i].id;
+ else {
+ /* find the right place for this poolId */
+ if (buf_size < sizes_array[ordered_array[j]]) {
+ /* move the pool_ids one place ahead
+ * to make room for this poolId
+ */
+ for (k = i; k > j; k--)
+ ordered_array[k] =
+ ordered_array[k - 1];
+
+ /* now k==j, this is the place for
+ * the new size
+ */
+ ordered_array[k] =
+ fm_ext_pools->ext_buf_pool[i].id;
+ break;
+ }
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
+ int_context_data_copy,
+ struct fman_buffer_prefix_content *
+ buffer_prefix_content,
+ struct fman_sp_buf_margins *buf_margins,
+ struct fman_sp_buffer_offsets *buffer_offsets,
+ u8 *internal_buf_offset)
+{
+ u32 tmp;
+
+ /* Align start of internal context data to 16 byte */
+ int_context_data_copy->ext_buf_offset = (u16)
+ ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
+ ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
+ ~(u16)(OFFSET_UNITS - 1)) :
+ buffer_prefix_content->priv_data_size);
+
+ /* Translate margin and int_context params to FM parameters */
+ /* Initialize with illegal value. Later we'll set legal values. */
+ buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
+ buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
+ buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
+
+ /* Internally the driver supports 4 options
+ * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
+ * but for simplicity we'll
+ * relate to it as 1).
+ * 2. All IC context (from AD) not including debug.
+ */
+
+ /* This case covers the options under 1 */
+ /* Copy size must be in 16-byte granularity. */
+ int_context_data_copy->size =
+ (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
+ ((buffer_prefix_content->pass_time_stamp ||
+ buffer_prefix_content->pass_hash_result) ? 16 : 0));
+
+ /* Align start of internal context data to 16 byte */
+ int_context_data_copy->int_context_offset =
+ (u8)(buffer_prefix_content->pass_prs_result ? 32 :
+ ((buffer_prefix_content->pass_time_stamp ||
+ buffer_prefix_content->pass_hash_result) ? 64 : 0));
+
+ if (buffer_prefix_content->pass_prs_result)
+ buffer_offsets->prs_result_offset =
+ int_context_data_copy->ext_buf_offset;
+ if (buffer_prefix_content->pass_time_stamp)
+ buffer_offsets->time_stamp_offset =
+ buffer_prefix_content->pass_prs_result ?
+ (int_context_data_copy->ext_buf_offset +
+ sizeof(struct fman_prs_result)) :
+ int_context_data_copy->ext_buf_offset;
+ if (buffer_prefix_content->pass_hash_result)
+ /* If PR is not requested, whether TS is
+ * requested or not, IC will be copied from TS
+ */
+ buffer_offsets->hash_result_offset =
+ buffer_prefix_content->pass_prs_result ?
+ (int_context_data_copy->ext_buf_offset +
+ sizeof(struct fman_prs_result) + 8) :
+ int_context_data_copy->ext_buf_offset + 8;
+
+ if (int_context_data_copy->size)
+ buf_margins->start_margins =
+ (u16)(int_context_data_copy->ext_buf_offset +
+ int_context_data_copy->size);
+ else
+ /* No Internal Context passing, STartMargin is
+ * immediately after private_info
+ */
+ buf_margins->start_margins =
+ buffer_prefix_content->priv_data_size;
+
+ /* align data start */
+ tmp = (u32)(buf_margins->start_margins %
+ buffer_prefix_content->data_align);
+ if (tmp)
+ buf_margins->start_margins +=
+ (buffer_prefix_content->data_align - tmp);
+ buffer_offsets->data_offset = buf_margins->start_margins;
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_sp_build_buffer_struct);
+
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
new file mode 100644
index 000000000..a62dd21c8
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __FM_SP_H
+#define __FM_SP_H
+
+#include "fman.h"
+#include <linux/types.h>
+
+#define ILLEGAL_BASE (~0)
+
+/* defaults */
+#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64
+
+/* Registers bit fields */
+#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
+#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
+#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
+#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
+#define FMAN_SP_SG_DISABLE 0x80000000
+
+/* shifts */
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
+#define FMAN_SP_IC_TO_EXT_SHIFT 16
+#define FMAN_SP_IC_FROM_INT_SHIFT 8
+
+/* structure for defining internal context copying */
+struct fman_sp_int_context_data_copy {
+ /* < Offset in External buffer to which internal
+ * context is copied to (Rx) or taken from (Tx, Op).
+ */
+ u16 ext_buf_offset;
+ /* Offset within internal context to copy from
+ * (Rx) or to copy to (Tx, Op).
+ */
+ u8 int_context_offset;
+ /* Internal offset size to be copied */
+ u16 size;
+};
+
+/* struct for defining external buffer margins */
+struct fman_sp_buf_margins {
+ /* Number of bytes to be left at the beginning
+ * of the external buffer (must be divisible by 16)
+ */
+ u16 start_margins;
+ /* number of bytes to be left at the end
+ * of the external buffer(must be divisible by 16)
+ */
+ u16 end_margins;
+};
+
+struct fman_sp_buffer_offsets {
+ u32 data_offset;
+ u32 prs_result_offset;
+ u32 time_stamp_offset;
+ u32 hash_result_offset;
+};
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
+ *int_context_data_copy,
+ struct fman_buffer_prefix_content
+ *buffer_prefix_content,
+ struct fman_sp_buf_margins *buf_margins,
+ struct fman_sp_buffer_offsets
+ *buffer_offsets,
+ u8 *internal_buf_offset);
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+ *fm_ext_pools,
+ u8 *ordered_array,
+ u16 *sizes_array);
+
+#endif /* __FM_SP_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
new file mode 100644
index 000000000..5a4be54ad
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_tgec.h"
+#include "fman.h"
+#include "mac.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/crc32.h>
+
+/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
+#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
+
+/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP 0x00100000
+#define CMD_CFG_NO_LEN_CHK 0x00020000
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+#define CMF_CFG_CRC_FWD 0x00000040
+#define CMD_CFG_PROMIS_EN 0x00000010
+#define CMD_CFG_RX_EN 0x00000002
+#define CMD_CFG_TX_EN 0x00000001
+
+/* Interrupt Mask Register (IMASK) */
+#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
+#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
+#define TGEC_IMASK_REM_FAULT 0x00004000
+#define TGEC_IMASK_LOC_FAULT 0x00002000
+#define TGEC_IMASK_TX_ECC_ER 0x00001000
+#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
+#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
+#define TGEC_IMASK_TX_ER 0x00000200
+#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
+#define TGEC_IMASK_RX_ECC_ER 0x00000080
+#define TGEC_IMASK_RX_JAB_FRM 0x00000040
+#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
+#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
+#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
+#define TGEC_IMASK_RX_LEN_ER 0x00000004
+#define TGEC_IMASK_RX_CRC_ER 0x00000002
+#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
+
+/* Hashtable Control Register (HASHTABLE_CTRL) */
+#define TGEC_HASH_MCAST_SHIFT 23
+#define TGEC_HASH_MCAST_EN 0x00000200
+#define TGEC_HASH_ADR_MSK 0x000001ff
+
+#define DEFAULT_TX_IPG_LENGTH 12
+#define DEFAULT_MAX_FRAME_LENGTH 0x600
+#define DEFAULT_PAUSE_QUANT 0xf000
+
+/* number of pattern match registers (entries) */
+#define TGEC_NUM_OF_PADDRS 1
+
+/* Group address bit indication */
+#define GROUP_ADDRESS 0x0000010000000000LL
+
+/* Hash table size (= 32 bits*8 regs) */
+#define TGEC_HASH_TABLE_SIZE 512
+
+/* tGEC memory map */
+struct tgec_regs {
+ u32 tgec_id; /* 0x000 Controller ID */
+ u32 reserved001[1]; /* 0x004 */
+ u32 command_config; /* 0x008 Control and configuration */
+ u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
+ u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
+ u32 maxfrm; /* 0x014 Maximum frame length */
+ u32 pause_quant; /* 0x018 Pause quanta */
+ u32 rx_fifo_sections; /* 0x01c */
+ u32 tx_fifo_sections; /* 0x020 */
+ u32 rx_fifo_almost_f_e; /* 0x024 */
+ u32 tx_fifo_almost_f_e; /* 0x028 */
+ u32 hashtable_ctrl; /* 0x02c Hash table control */
+ u32 mdio_cfg_status; /* 0x030 */
+ u32 mdio_command; /* 0x034 */
+ u32 mdio_data; /* 0x038 */
+ u32 mdio_regaddr; /* 0x03c */
+ u32 status; /* 0x040 */
+ u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
+ u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
+ u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
+ u32 rx_fifo_ptr_rd; /* 0x050 */
+ u32 rx_fifo_ptr_wr; /* 0x054 */
+ u32 tx_fifo_ptr_rd; /* 0x058 */
+ u32 tx_fifo_ptr_wr; /* 0x05c */
+ u32 imask; /* 0x060 Interrupt mask */
+ u32 ievent; /* 0x064 Interrupt event */
+ u32 udp_port; /* 0x068 Defines a UDP Port number */
+ u32 type_1588v2; /* 0x06c Type field for 1588v2 */
+ u32 reserved070[4]; /* 0x070 */
+ /* 10Ge Statistics Counter */
+ u32 tfrm_u; /* 80 aFramesTransmittedOK */
+ u32 tfrm_l; /* 84 aFramesTransmittedOK */
+ u32 rfrm_u; /* 88 aFramesReceivedOK */
+ u32 rfrm_l; /* 8c aFramesReceivedOK */
+ u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */
+ u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */
+ u32 raln_u; /* 98 aAlignmentErrors */
+ u32 raln_l; /* 9c aAlignmentErrors */
+ u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
+ u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
+ u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
+ u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
+ u32 rlong_u; /* B0 aFrameTooLongErrors */
+ u32 rlong_l; /* B4 aFrameTooLongErrors */
+ u32 rflr_u; /* B8 aInRangeLengthErrors */
+ u32 rflr_l; /* Bc aInRangeLengthErrors */
+ u32 tvlan_u; /* C0 VLANTransmittedOK */
+ u32 tvlan_l; /* C4 VLANTransmittedOK */
+ u32 rvlan_u; /* C8 VLANReceivedOK */
+ u32 rvlan_l; /* Cc VLANReceivedOK */
+ u32 toct_u; /* D0 if_out_octets */
+ u32 toct_l; /* D4 if_out_octets */
+ u32 roct_u; /* D8 if_in_octets */
+ u32 roct_l; /* Dc if_in_octets */
+ u32 ruca_u; /* E0 if_in_ucast_pkts */
+ u32 ruca_l; /* E4 if_in_ucast_pkts */
+ u32 rmca_u; /* E8 ifInMulticastPkts */
+ u32 rmca_l; /* Ec ifInMulticastPkts */
+ u32 rbca_u; /* F0 ifInBroadcastPkts */
+ u32 rbca_l; /* F4 ifInBroadcastPkts */
+ u32 terr_u; /* F8 if_out_errors */
+ u32 terr_l; /* Fc if_out_errors */
+ u32 reserved100[2]; /* 100-108 */
+ u32 tuca_u; /* 108 if_out_ucast_pkts */
+ u32 tuca_l; /* 10c if_out_ucast_pkts */
+ u32 tmca_u; /* 110 ifOutMulticastPkts */
+ u32 tmca_l; /* 114 ifOutMulticastPkts */
+ u32 tbca_u; /* 118 ifOutBroadcastPkts */
+ u32 tbca_l; /* 11c ifOutBroadcastPkts */
+ u32 rdrp_u; /* 120 etherStatsDropEvents */
+ u32 rdrp_l; /* 124 etherStatsDropEvents */
+ u32 reoct_u; /* 128 etherStatsOctets */
+ u32 reoct_l; /* 12c etherStatsOctets */
+ u32 rpkt_u; /* 130 etherStatsPkts */
+ u32 rpkt_l; /* 134 etherStatsPkts */
+ u32 trund_u; /* 138 etherStatsUndersizePkts */
+ u32 trund_l; /* 13c etherStatsUndersizePkts */
+ u32 r64_u; /* 140 etherStatsPkts64Octets */
+ u32 r64_l; /* 144 etherStatsPkts64Octets */
+ u32 r127_u; /* 148 etherStatsPkts65to127Octets */
+ u32 r127_l; /* 14c etherStatsPkts65to127Octets */
+ u32 r255_u; /* 150 etherStatsPkts128to255Octets */
+ u32 r255_l; /* 154 etherStatsPkts128to255Octets */
+ u32 r511_u; /* 158 etherStatsPkts256to511Octets */
+ u32 r511_l; /* 15c etherStatsPkts256to511Octets */
+ u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */
+ u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */
+ u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */
+ u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */
+ u32 r1519x_u; /* 170 etherStatsPkts1519toX */
+ u32 r1519x_l; /* 174 etherStatsPkts1519toX */
+ u32 trovr_u; /* 178 etherStatsOversizePkts */
+ u32 trovr_l; /* 17c etherStatsOversizePkts */
+ u32 trjbr_u; /* 180 etherStatsJabbers */
+ u32 trjbr_l; /* 184 etherStatsJabbers */
+ u32 trfrg_u; /* 188 etherStatsFragments */
+ u32 trfrg_l; /* 18C etherStatsFragments */
+ u32 rerr_u; /* 190 if_in_errors */
+ u32 rerr_l; /* 194 if_in_errors */
+};
+
+struct tgec_cfg {
+ bool pause_ignore;
+ bool promiscuous_mode_enable;
+ u16 max_frame_length;
+ u16 pause_quant;
+ u32 tx_ipg_length;
+};
+
+struct fman_mac {
+ /* Pointer to the memory mapped registers. */
+ struct tgec_regs __iomem *regs;
+ /* MAC address of device; */
+ u64 addr;
+ u16 max_speed;
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
+ fman_mac_exception_cb *exception_cb;
+ fman_mac_exception_cb *event_cb;
+ /* pointer to driver's global address hash table */
+ struct eth_hash_t *multicast_addr_hash;
+ /* pointer to driver's individual address hash table */
+ struct eth_hash_t *unicast_addr_hash;
+ u8 mac_id;
+ u32 exceptions;
+ struct tgec_cfg *cfg;
+ void *fm;
+ struct fman_rev_info fm_rev_info;
+ bool allmulti_enabled;
+};
+
+static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr)
+{
+ u32 tmp0, tmp1;
+
+ tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+ tmp1 = (u32)(adr[4] | adr[5] << 8);
+ iowrite32be(tmp0, &regs->mac_addr_0);
+ iowrite32be(tmp1, &regs->mac_addr_1);
+}
+
+static void set_dflts(struct tgec_cfg *cfg)
+{
+ cfg->promiscuous_mode_enable = false;
+ cfg->pause_ignore = false;
+ cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+ cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
+ cfg->pause_quant = DEFAULT_PAUSE_QUANT;
+}
+
+static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
+ u32 exception_mask)
+{
+ u32 tmp;
+
+ /* Config */
+ tmp = CMF_CFG_CRC_FWD;
+ if (cfg->promiscuous_mode_enable)
+ tmp |= CMD_CFG_PROMIS_EN;
+ if (cfg->pause_ignore)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ /* Payload length check disable */
+ tmp |= CMD_CFG_NO_LEN_CHK;
+ iowrite32be(tmp, &regs->command_config);
+
+ /* Max Frame Length */
+ iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+ /* Pause Time */
+ iowrite32be(cfg->pause_quant, &regs->pause_quant);
+
+ /* clear all pending events and set-up interrupts */
+ iowrite32be(0xffffffff, &regs->ievent);
+ iowrite32be(ioread32be(&regs->imask) | exception_mask, &regs->imask);
+
+ return 0;
+}
+
+static int check_init_parameters(struct fman_mac *tgec)
+{
+ if (tgec->max_speed < SPEED_10000) {
+ pr_err("10G MAC driver only support 10G speed\n");
+ return -EINVAL;
+ }
+ if (!tgec->exception_cb) {
+ pr_err("uninitialized exception_cb\n");
+ return -EINVAL;
+ }
+ if (!tgec->event_cb) {
+ pr_err("uninitialized event_cb\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+ u32 bit_mask;
+
+ switch (exception) {
+ case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
+ bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
+ break;
+ case FM_MAC_EX_10G_MDIO_CMD_CMPL:
+ bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
+ break;
+ case FM_MAC_EX_10G_REM_FAULT:
+ bit_mask = TGEC_IMASK_REM_FAULT;
+ break;
+ case FM_MAC_EX_10G_LOC_FAULT:
+ bit_mask = TGEC_IMASK_LOC_FAULT;
+ break;
+ case FM_MAC_EX_10G_TX_ECC_ER:
+ bit_mask = TGEC_IMASK_TX_ECC_ER;
+ break;
+ case FM_MAC_EX_10G_TX_FIFO_UNFL:
+ bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
+ break;
+ case FM_MAC_EX_10G_TX_FIFO_OVFL:
+ bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
+ break;
+ case FM_MAC_EX_10G_TX_ER:
+ bit_mask = TGEC_IMASK_TX_ER;
+ break;
+ case FM_MAC_EX_10G_RX_FIFO_OVFL:
+ bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
+ break;
+ case FM_MAC_EX_10G_RX_ECC_ER:
+ bit_mask = TGEC_IMASK_RX_ECC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_JAB_FRM:
+ bit_mask = TGEC_IMASK_RX_JAB_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_OVRSZ_FRM:
+ bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_RUNT_FRM:
+ bit_mask = TGEC_IMASK_RX_RUNT_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_FRAG_FRM:
+ bit_mask = TGEC_IMASK_RX_FRAG_FRM;
+ break;
+ case FM_MAC_EX_10G_RX_LEN_ER:
+ bit_mask = TGEC_IMASK_RX_LEN_ER;
+ break;
+ case FM_MAC_EX_10G_RX_CRC_ER:
+ bit_mask = TGEC_IMASK_RX_CRC_ER;
+ break;
+ case FM_MAC_EX_10G_RX_ALIGN_ER:
+ bit_mask = TGEC_IMASK_RX_ALIGN_ER;
+ break;
+ default:
+ bit_mask = 0;
+ break;
+ }
+
+ return bit_mask;
+}
+
+static void tgec_err_exception(void *handle)
+{
+ struct fman_mac *tgec = (struct fman_mac *)handle;
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 event;
+
+ /* do not handle MDIO events */
+ event = ioread32be(&regs->ievent) &
+ ~(TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_MDIO_CMD_CMPL);
+
+ event &= ioread32be(&regs->imask);
+
+ iowrite32be(event, &regs->ievent);
+
+ if (event & TGEC_IMASK_REM_FAULT)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
+ if (event & TGEC_IMASK_LOC_FAULT)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
+ if (event & TGEC_IMASK_TX_ECC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+ if (event & TGEC_IMASK_TX_FIFO_UNFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
+ if (event & TGEC_IMASK_TX_FIFO_OVFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
+ if (event & TGEC_IMASK_TX_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
+ if (event & TGEC_IMASK_RX_FIFO_OVFL)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
+ if (event & TGEC_IMASK_RX_ECC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+ if (event & TGEC_IMASK_RX_JAB_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
+ if (event & TGEC_IMASK_RX_OVRSZ_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
+ if (event & TGEC_IMASK_RX_RUNT_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
+ if (event & TGEC_IMASK_RX_FRAG_FRM)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
+ if (event & TGEC_IMASK_RX_LEN_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
+ if (event & TGEC_IMASK_RX_CRC_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
+ if (event & TGEC_IMASK_RX_ALIGN_ER)
+ tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
+}
+
+static void free_init_resources(struct fman_mac *tgec)
+{
+ fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+ FMAN_INTR_TYPE_ERR);
+
+ /* release the driver's group hash table */
+ free_hash_table(tgec->multicast_addr_hash);
+ tgec->multicast_addr_hash = NULL;
+
+ /* release the driver's individual hash table */
+ free_hash_table(tgec->unicast_addr_hash);
+ tgec->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct tgec_cfg *cfg)
+{
+ /* Checks if tGEC driver parameters were initialized */
+ if (!cfg)
+ return true;
+
+ return false;
+}
+
+static int tgec_enable(struct fman_mac *tgec)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static void tgec_disable(struct fman_mac *tgec)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ WARN_ON_ONCE(!is_init_done(tgec->cfg));
+
+ tmp = ioread32be(&regs->command_config);
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
+ iowrite32be(tmp, &regs->command_config);
+}
+
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ iowrite32be((u32)pause_time, &regs->pause_quant);
+
+ return 0;
+}
+
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+ if (!en)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
+ else
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
+{
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
+ set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
+
+ return 0;
+}
+
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ struct eth_hash_entry *hash_entry;
+ u32 crc = 0xFFFFFFFF, hash;
+ u64 addr;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+ if (!(addr & GROUP_ADDRESS)) {
+ /* Unicast addresses not supported in hash */
+ pr_err("Unicast Address\n");
+ return -EINVAL;
+ }
+ /* CRC calculation */
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+ /* Take 9 MSB bits */
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ /* Create element to be added to the driver hash table */
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+ INIT_LIST_HEAD(&hash_entry->node);
+
+ list_add_tail(&hash_entry->node,
+ &tgec->multicast_addr_hash->lsts[hash]);
+ iowrite32be((hash | TGEC_HASH_MCAST_EN), &regs->hashtable_ctrl);
+
+ return 0;
+}
+
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+{
+ u32 entry;
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ if (enable) {
+ for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry | TGEC_HASH_MCAST_EN,
+ &regs->hashtable_ctrl);
+ } else {
+ for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry & ~TGEC_HASH_MCAST_EN,
+ &regs->hashtable_ctrl);
+ }
+
+ tgec->allmulti_enabled = enable;
+
+ return 0;
+}
+
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+
+ if (enable)
+ tmp |= CMD_CFG_EN_TIMESTAMP;
+ else
+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ struct eth_hash_entry *hash_entry = NULL;
+ struct list_head *pos;
+ u32 crc = 0xFFFFFFFF, hash;
+ u64 addr;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ addr = ((*(u64 *)eth_addr) >> 16);
+
+ /* CRC calculation */
+ crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
+ crc = bitrev32(crc);
+ /* Take 9 MSB bits */
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+ }
+ }
+
+ if (!tgec->allmulti_enabled) {
+ if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
+ iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
+ &regs->hashtable_ctrl);
+ }
+
+ return 0;
+}
+
+static void tgec_adjust_link(struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+}
+
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 bit_mask = 0;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ bit_mask = get_exception_flag(exception);
+ if (bit_mask) {
+ if (enable)
+ tgec->exceptions |= bit_mask;
+ else
+ tgec->exceptions &= ~bit_mask;
+ } else {
+ pr_err("Undefined exception\n");
+ return -EINVAL;
+ }
+ if (enable)
+ iowrite32be(ioread32be(&regs->imask) | bit_mask, &regs->imask);
+ else
+ iowrite32be(ioread32be(&regs->imask) & ~bit_mask, &regs->imask);
+
+ return 0;
+}
+
+static int tgec_init(struct fman_mac *tgec)
+{
+ struct tgec_cfg *cfg;
+ enet_addr_t eth_addr;
+ int err;
+
+ if (is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ if (DEFAULT_RESET_ON_INIT &&
+ (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
+ pr_err("Can't reset MAC!\n");
+ return -EINVAL;
+ }
+
+ err = check_init_parameters(tgec);
+ if (err)
+ return err;
+
+ cfg = tgec->cfg;
+
+ if (tgec->addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+ set_mac_address(tgec->regs, (const u8 *)eth_addr);
+ }
+
+ /* interrupts */
+ /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
+ if (tgec->fm_rev_info.major <= 2)
+ tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT);
+
+ err = init(tgec->regs, cfg, tgec->exceptions);
+ if (err) {
+ free_init_resources(tgec);
+ pr_err("TGEC version doesn't support this i/f mode\n");
+ return err;
+ }
+
+ /* Max Frame Length */
+ err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
+ cfg->max_frame_length);
+ if (err) {
+ pr_err("Setting max frame length FAILED\n");
+ free_init_resources(tgec);
+ return -EINVAL;
+ }
+
+ /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
+ if (tgec->fm_rev_info.major == 2) {
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ /* restore the default tx ipg Length */
+ tmp = (ioread32be(&regs->tx_ipg_len) &
+ ~TGEC_TX_IPG_LENGTH_MASK) | 12;
+
+ iowrite32be(tmp, &regs->tx_ipg_len);
+ }
+
+ tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+ if (!tgec->multicast_addr_hash) {
+ free_init_resources(tgec);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+ if (!tgec->unicast_addr_hash) {
+ free_init_resources(tgec);
+ pr_err("allocation hash table is FAILED\n");
+ return -ENOMEM;
+ }
+
+ fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+ FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
+
+ kfree(cfg);
+ tgec->cfg = NULL;
+
+ return 0;
+}
+
+static int tgec_free(struct fman_mac *tgec)
+{
+ free_init_resources(tgec);
+
+ kfree(tgec->cfg);
+ kfree(tgec);
+
+ return 0;
+}
+
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
+{
+ struct fman_mac *tgec;
+ struct tgec_cfg *cfg;
+
+ /* allocate memory for the UCC GETH data structure. */
+ tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
+ if (!tgec)
+ return NULL;
+
+ /* allocate memory for the 10G MAC driver parameters data structure. */
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ tgec_free(tgec);
+ return NULL;
+ }
+
+ /* Plant parameter structure pointer */
+ tgec->cfg = cfg;
+
+ set_dflts(cfg);
+
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
+ tgec->max_speed = params->max_speed;
+ tgec->mac_id = params->mac_id;
+ tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
+ TGEC_IMASK_REM_FAULT |
+ TGEC_IMASK_LOC_FAULT |
+ TGEC_IMASK_TX_ECC_ER |
+ TGEC_IMASK_TX_FIFO_UNFL |
+ TGEC_IMASK_TX_FIFO_OVFL |
+ TGEC_IMASK_TX_ER |
+ TGEC_IMASK_RX_FIFO_OVFL |
+ TGEC_IMASK_RX_ECC_ER |
+ TGEC_IMASK_RX_JAB_FRM |
+ TGEC_IMASK_RX_OVRSZ_FRM |
+ TGEC_IMASK_RX_RUNT_FRM |
+ TGEC_IMASK_RX_FRAG_FRM |
+ TGEC_IMASK_RX_CRC_ER |
+ TGEC_IMASK_RX_ALIGN_ER);
+ tgec->exception_cb = params->exception_cb;
+ tgec->event_cb = params->event_cb;
+ tgec->dev_id = mac_dev;
+ tgec->fm = params->fm;
+
+ /* Save FMan revision */
+ fman_get_revision(tgec->fm, &tgec->fm_rev_info);
+
+ return tgec;
+}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = tgec_adjust_link;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
new file mode 100644
index 000000000..768b8d165
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __TGEC_H
+#define __TGEC_H
+
+#include "fman_mac.h"
+
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
+
+#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
new file mode 100644
index 000000000..13e67f286
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/etherdevice.h>
+#include <linux/libfdt_env.h>
+
+#include "mac.h"
+#include "fman_mac.h"
+#include "fman_dtsec.h"
+#include "fman_tgec.h"
+#include "fman_memac.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL FMan MAC API based driver");
+
+struct mac_priv_s {
+ u8 cell_index;
+ struct fman *fman;
+ /* List of multicast addresses */
+ struct list_head mc_addr_list;
+ struct platform_device *eth_dev;
+ u16 speed;
+};
+
+struct mac_address {
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
+{
+ if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
+ /* don't flag RX FIFO after the first */
+ mac_dev->set_exception(mac_dev->fman_mac,
+ FM_MAC_EX_10G_RX_FIFO_OVFL, false);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ }
+
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ __func__, ex);
+}
+
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+{
+ struct mac_priv_s *priv;
+ struct mac_address *old_addr, *tmp;
+ struct netdev_hw_addr *ha;
+ int err;
+ enet_addr_t *addr;
+
+ priv = mac_dev->priv;
+
+ /* Clear previous address list */
+ list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
+ addr = (enet_addr_t *)old_addr->addr;
+ err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
+ if (err < 0)
+ return err;
+
+ list_del(&old_addr->list);
+ kfree(old_addr);
+ }
+
+ /* Add all the addresses from the new list */
+ netdev_for_each_mc_addr(ha, net_dev) {
+ addr = (enet_addr_t *)ha->addr;
+ err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
+ if (err < 0)
+ return err;
+
+ tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ ether_addr_copy(tmp->addr, ha->addr);
+ list_add(&tmp->list, &priv->mc_addr_list);
+ }
+ return 0;
+}
+
+/**
+ * fman_set_mac_active_pause
+ * @mac_dev: A pointer to the MAC device
+ * @rx: Pause frame setting for RX
+ * @tx: Pause frame setting for TX
+ *
+ * Set the MAC RX/TX PAUSE frames settings
+ *
+ * Avoid redundant calls to FMD, if the MAC driver already contains the desired
+ * active PAUSE settings. Otherwise, the new active settings should be reflected
+ * in FMan.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
+{
+ struct fman_mac *fman_mac = mac_dev->fman_mac;
+ int err = 0;
+
+ if (rx != mac_dev->rx_pause_active) {
+ err = mac_dev->set_rx_pause(fman_mac, rx);
+ if (likely(err == 0))
+ mac_dev->rx_pause_active = rx;
+ }
+
+ if (tx != mac_dev->tx_pause_active) {
+ u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE);
+
+ err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
+
+ if (likely(err == 0))
+ mac_dev->tx_pause_active = tx;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(fman_set_mac_active_pause);
+
+/**
+ * fman_get_pause_cfg
+ * @mac_dev: A pointer to the MAC device
+ * @rx_pause: Return value for RX setting
+ * @tx_pause: Return value for TX setting
+ *
+ * Determine the MAC RX/TX PAUSE frames settings based on PHY
+ * autonegotiation or values set by eththool.
+ *
+ * Return: Pointer to FMan device.
+ */
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+ bool *tx_pause)
+{
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+
+ *rx_pause = *tx_pause = false;
+
+ if (!phy_dev->duplex)
+ return;
+
+ /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
+ * are those set by ethtool.
+ */
+ if (!mac_dev->autoneg_pause) {
+ *rx_pause = mac_dev->rx_pause_req;
+ *tx_pause = mac_dev->tx_pause_req;
+ return;
+ }
+
+ /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
+ * settings depend on the result of the link negotiation.
+ */
+
+ /* get local capabilities */
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
+
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phy_dev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ /* Calculate TX/RX settings based on local and peer advertised
+ * symmetric/asymmetric PAUSE capabilities.
+ */
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_RX)
+ *rx_pause = true;
+ if (flowctrl & FLOW_CTRL_TX)
+ *tx_pause = true;
+}
+EXPORT_SYMBOL(fman_get_pause_cfg);
+
+#define DTSEC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause \
+ | SUPPORTED_FIBRE \
+ | SUPPORTED_MII)
+
+static DEFINE_MUTEX(eth_lock);
+
+static const u16 phy2speed[] = {
+ [PHY_INTERFACE_MODE_MII] = SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
+};
+
+static struct platform_device *dpaa_eth_add_device(int fman_id,
+ struct mac_device *mac_dev)
+{
+ struct platform_device *pdev;
+ struct dpaa_eth_data data;
+ struct mac_priv_s *priv;
+ static int dpaa_eth_dev_cnt;
+ int ret;
+
+ priv = mac_dev->priv;
+
+ data.mac_dev = mac_dev;
+ data.mac_hw_id = priv->cell_index;
+ data.fman_hw_id = fman_id;
+
+ mutex_lock(&eth_lock);
+ pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto no_mem;
+ }
+
+ pdev->dev.parent = mac_dev->dev;
+
+ ret = platform_device_add_data(pdev, &data, sizeof(data));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err;
+
+ dpaa_eth_dev_cnt++;
+ mutex_unlock(&eth_lock);
+
+ return pdev;
+
+err:
+ platform_device_put(pdev);
+no_mem:
+ mutex_unlock(&eth_lock);
+
+ return ERR_PTR(ret);
+}
+
+static const struct of_device_id mac_match[] = {
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mac_match);
+
+static int mac_probe(struct platform_device *_of_dev)
+{
+ int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
+ struct device *dev;
+ struct device_node *mac_node, *dev_node;
+ struct mac_device *mac_dev;
+ struct platform_device *of_dev;
+ struct mac_priv_s *priv;
+ struct fman_mac_params params;
+ u32 val;
+ u8 fman_id;
+ phy_interface_t phy_if;
+
+ dev = &_of_dev->dev;
+ mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
+
+ mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
+ if (!mac_dev)
+ return -ENOMEM;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Save private information */
+ mac_dev->priv = priv;
+ mac_dev->dev = dev;
+
+ INIT_LIST_HEAD(&priv->mc_addr_list);
+
+ /* Get the FM node */
+ dev_node = of_get_parent(mac_node);
+ if (!dev_node) {
+ dev_err(dev, "of_get_parent(%pOF) failed\n",
+ mac_node);
+ return -EINVAL;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (!of_dev) {
+ dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ /* Get the FMan cell-index */
+ err = of_property_read_u32(dev_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+ /* cell-index 0 => FMan id 1 */
+ fman_id = (u8)(val + 1);
+
+ priv->fman = fman_bind(&of_dev->dev);
+ if (!priv->fman) {
+ dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
+ err = -ENODEV;
+ goto _return_of_node_put;
+ }
+
+ of_node_put(dev_node);
+
+ /* Get the address of the memory mapped registers */
+ mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
+ if (!mac_dev->res) {
+ dev_err(dev, "could not get registers\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
+ mac_dev->res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ return err;
+ }
+
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
+ if (!mac_dev->vaddr) {
+ dev_err(dev, "devm_ioremap() failed\n");
+ return -EIO;
+ }
+
+ if (!of_device_is_available(mac_node))
+ return -ENODEV;
+
+ /* Get the cell-index */
+ err = of_property_read_u32(mac_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
+ return -EINVAL;
+ }
+ priv->cell_index = (u8)val;
+
+ /* Get the MAC address */
+ err = of_get_mac_address(mac_node, mac_dev->addr);
+ if (err)
+ dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
+
+ /* Get the port handles */
+ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
+ if (unlikely(nph < 0)) {
+ dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
+ mac_node);
+ return nph;
+ }
+
+ if (nph != ARRAY_SIZE(mac_dev->port)) {
+ dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
+ mac_node);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ /* Find the port node */
+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
+ if (!dev_node) {
+ dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
+ mac_node);
+ return -EINVAL;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+ if (!of_dev) {
+ dev_err(dev, "of_find_device_by_node(%pOF) failed\n",
+ dev_node);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+
+ mac_dev->port[i] = fman_port_bind(&of_dev->dev);
+ if (!mac_dev->port[i]) {
+ dev_err(dev, "dev_get_drvdata(%pOF) failed\n",
+ dev_node);
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
+ of_node_put(dev_node);
+ }
+
+ /* Get the PHY connection type */
+ err = of_get_phy_mode(mac_node, &phy_if);
+ if (err) {
+ dev_warn(dev,
+ "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n",
+ mac_node);
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ }
+ mac_dev->phy_if = phy_if;
+
+ priv->speed = phy2speed[mac_dev->phy_if];
+ params.max_speed = priv->speed;
+ mac_dev->if_support = DTSEC_SUPPORTED;
+ /* We don't support half-duplex in SGMII mode */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
+ mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_100baseT_Half);
+
+ /* Gigabit support (no half-duplex) */
+ if (params.max_speed == 1000)
+ mac_dev->if_support |= SUPPORTED_1000baseT_Full;
+
+ /* The 10G interface only supports one mode */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->if_support = SUPPORTED_10000baseT_Full;
+
+ /* Get the rest of the PHY information */
+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+
+ params.basex_if = false;
+ params.mac_id = priv->cell_index;
+ params.fm = (void *)priv->fman;
+ params.exception_cb = mac_exception;
+ params.event_cb = mac_exception;
+
+ err = init(mac_dev, mac_node, &params);
+ if (err < 0) {
+ dev_err(dev, "mac_dev->init() = %d\n", err);
+ of_node_put(mac_dev->phy_node);
+ return err;
+ }
+
+ /* pause frame autonegotiation enabled */
+ mac_dev->autoneg_pause = true;
+
+ /* By intializing the values to false, force FMD to enable PAUSE frames
+ * on RX and TX
+ */
+ mac_dev->rx_pause_req = true;
+ mac_dev->tx_pause_req = true;
+ mac_dev->rx_pause_active = false;
+ mac_dev->tx_pause_active = false;
+ err = fman_set_mac_active_pause(mac_dev, true, true);
+ if (err < 0)
+ dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
+
+ if (!is_zero_ether_addr(mac_dev->addr))
+ dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
+
+ priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
+ if (IS_ERR(priv->eth_dev)) {
+ dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
+ priv->cell_index);
+ priv->eth_dev = NULL;
+ }
+
+ return err;
+
+_return_of_node_put:
+ of_node_put(dev_node);
+ return err;
+}
+
+static int mac_remove(struct platform_device *pdev)
+{
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ return 0;
+}
+
+static struct platform_driver mac_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = mac_match,
+ },
+ .probe = mac_probe,
+ .remove = mac_remove,
+};
+
+builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
new file mode 100644
index 000000000..13b69ca5f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ */
+
+#ifndef __MAC_H
+#define __MAC_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+#include <linux/list.h>
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_mac.h"
+
+struct fman_mac;
+struct mac_priv_s;
+
+struct mac_device {
+ void __iomem *vaddr;
+ struct device *dev;
+ struct resource *res;
+ u8 addr[ETH_ALEN];
+ struct fman_port *port[2];
+ u32 if_support;
+ struct phy_device *phy_dev;
+ phy_interface_t phy_if;
+ struct device_node *phy_node;
+ struct net_device *net_dev;
+
+ bool autoneg_pause;
+ bool rx_pause_req;
+ bool tx_pause_req;
+ bool rx_pause_active;
+ bool tx_pause_active;
+ bool promisc;
+ bool allmulti;
+
+ int (*enable)(struct fman_mac *mac_dev);
+ void (*disable)(struct fman_mac *mac_dev);
+ void (*adjust_link)(struct mac_device *mac_dev);
+ int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
+ int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
+ int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
+ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
+ int (*set_multi)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
+ int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
+ int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
+ u16 pause_time, u16 thresh_time);
+ int (*set_exception)(struct fman_mac *mac_dev,
+ enum fman_mac_exceptions exception, bool enable);
+ int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
+ enet_addr_t *eth_addr);
+ int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
+ enet_addr_t *eth_addr);
+
+ void (*update_speed)(struct mac_device *mac_dev, int speed);
+
+ struct fman_mac *fman_mac;
+ struct mac_priv_s *priv;
+};
+
+struct dpaa_eth_data {
+ struct mac_device *mac_dev;
+ int mac_hw_id;
+ int fman_hw_id;
+};
+
+extern const char *mac_driver_description;
+
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
+
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+ bool *tx_pause);
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
+
+#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
new file mode 100644
index 000000000..7f20840fd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config FS_ENET
+ tristate "Freescale Ethernet Driver"
+ depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+ select MII
+ select PHYLIB
+
+config FS_ENET_MPC5121_FEC
+ def_bool y if (FS_ENET && PPC_MPC512x)
+ select FS_ENET_HAS_FEC
+
+config FS_ENET_HAS_SCC
+ bool "Chip has an SCC usable for ethernet"
+ depends on FS_ENET && (CPM1 || CPM2)
+ default y
+
+config FS_ENET_HAS_FCC
+ bool "Chip has an FCC usable for ethernet"
+ depends on FS_ENET && CPM2
+ default y
+
+config FS_ENET_HAS_FEC
+ bool "Chip has an FEC usable for ethernet"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+ select FS_ENET_MDIO_FEC
+ default y
+
+config FS_ENET_MDIO_FEC
+ tristate "MDIO driver for FEC"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+
+config FS_ENET_MDIO_FCC
+ tristate "MDIO driver for FCC"
+ depends on FS_ENET && CPM2
+ select MDIO_BITBANG
diff --git a/drivers/net/ethernet/freescale/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile
new file mode 100644
index 000000000..1821f94ef
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale Ethernet controllers
+#
+
+obj-$(CONFIG_FS_ENET) += fs_enet.o
+
+fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
+
+obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+
+fs_enet-objs := fs_enet-main.o $(fs_enet-m)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
new file mode 100644
index 000000000..1dbee5d89
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FS_ENET_FEC_H
+#define FS_ENET_FEC_H
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+/* RMII mode enabled only when MII_MODE bit is set too. */
+#define FEC_RCNTRL_RMII_MODE (0x00000100 | \
+ FEC_RCNTRL_MII_MODE | FEC_RCNTRL_FCE)
+#define FEC_RCNTRL_FCE 0x00000020
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
new file mode 100644
index 000000000..8844a9a04
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -0,0 +1,1128 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include <linux/pgtable.h>
+
+#include <linux/vmalloc.h>
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
+MODULE_DESCRIPTION("Freescale Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
+module_param(fs_enet_debug, int, 0);
+MODULE_PARM_DESC(fs_enet_debug,
+ "Freescale bitmapped debugging message enable value");
+
+#define RX_RING_SIZE 32
+#define TX_RING_SIZE 64
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fs_enet_netpoll(struct net_device *dev);
+#endif
+
+static void fs_set_multicast_list(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ (*fep->ops->set_multicast_list)(dev);
+}
+
+static void skb_align(struct sk_buff *skb, int align)
+{
+ int off = ((unsigned long)skb->data) & (align - 1);
+
+ if (off)
+ skb_reserve(skb, align - off);
+}
+
+/* NAPI function */
+static int fs_enet_napi(struct napi_struct *napi, int budget)
+{
+ struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
+ struct net_device *dev = fep->ndev;
+ const struct fs_platform_info *fpi = fep->fpi;
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb, *skbn;
+ int received = 0;
+ u16 pkt_len, sc;
+ int curidx;
+ int dirtyidx, do_wake, do_restart;
+ int tx_left = TX_RING_SIZE;
+
+ spin_lock(&fep->tx_lock);
+ bdp = fep->dirty_tx;
+
+ /* clear status bits for napi*/
+ (*fep->ops->napi_clear_event)(dev);
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ dev->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ dev->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ dev->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ dev->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ dev->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ dev->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ dev->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ dev->stats.collisions++;
+
+ /* unmap */
+ if (fep->mapped_as_page[dirtyidx])
+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+ else
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ if (skb) {
+ dev_kfree_skb(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+ }
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (++fep->tx_free == MAX_SKB_FRAGS)
+ do_wake = 1;
+ tx_left--;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
+
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
+ received < budget) {
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ dev_warn(fep->dev, "rcv is not +last\n");
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ dev->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ dev->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ dev->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ dev->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ dev->stats.rx_crc_errors++;
+
+ skbn = fep->rx_skbuff[curidx];
+ } else {
+ skb = fep->rx_skbuff[curidx];
+
+ /*
+ * Process the incoming frame.
+ */
+ dev->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ dev->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ skb_copy_from_linear_data(skb,
+ skbn->data, pkt_len);
+ swap(skb, skbn);
+ dma_sync_single_for_cpu(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
+ }
+ } else {
+ skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
+
+ if (skbn) {
+ dma_addr_t dma;
+
+ skb_align(skbn, ENET_RX_ALIGN);
+
+ dma_unmap_single(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ dma = dma_map_single(fep->dev,
+ skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+ CBDW_BUFADDR(bdp, dma);
+ }
+ }
+
+ if (skbn != NULL) {
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ netif_receive_skb(skb);
+ } else {
+ dev->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ (*fep->ops->rx_bd_done)(dev);
+ }
+
+ fep->cur_rx = bdp;
+
+ if (received < budget && tx_left) {
+ /* done */
+ napi_complete_done(napi, received);
+ (*fep->ops->napi_enable)(dev);
+
+ return received;
+ }
+
+ return budget;
+}
+
+/*
+ * The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static irqreturn_t
+fs_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct fs_enet_private *fep;
+ const struct fs_platform_info *fpi;
+ u32 int_events;
+ u32 int_clr_events;
+ int nr, napi_ok;
+ int handled;
+
+ fep = netdev_priv(dev);
+ fpi = fep->fpi;
+
+ nr = 0;
+ while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
+ nr++;
+
+ int_clr_events = int_events;
+ int_clr_events &= ~fep->ev_napi;
+
+ (*fep->ops->clear_int_events)(dev, int_clr_events);
+
+ if (int_events & fep->ev_err)
+ (*fep->ops->ev_error)(dev, int_events);
+
+ if (int_events & fep->ev) {
+ napi_ok = napi_schedule_prep(&fep->napi);
+
+ (*fep->ops->napi_disable)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi);
+ }
+
+ }
+
+ handled = nr > 0;
+ return IRQ_RETVAL(handled);
+}
+
+void fs_init_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb;
+ int i;
+
+ fs_cleanup_bds(dev);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->tx_free = fep->tx_ring;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /*
+ * Initialize the receive buffer descriptors.
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
+ if (skb == NULL)
+ break;
+
+ skb_align(skb, ENET_RX_ALIGN);
+ fep->rx_skbuff[i] = skb;
+ CBDW_BUFADDR(bdp,
+ dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0); /* zero */
+ CBDW_SC(bdp, BD_ENET_RX_EMPTY |
+ ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
+ }
+ /*
+ * if we failed, fillup remainder
+ */
+ for (; i < fep->rx_ring; i++, bdp++) {
+ fep->rx_skbuff[i] = NULL;
+ CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+
+ /*
+ * ...and the same for transmit.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ fep->tx_skbuff[i] = NULL;
+ CBDW_BUFADDR(bdp, 0);
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+}
+
+void fs_cleanup_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct sk_buff *skb;
+ cbd_t __iomem *bdp;
+ int i;
+
+ /*
+ * Reset SKB transmit buffers.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ if ((skb = fep->tx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ skb->len, DMA_TO_DEVICE);
+
+ fep->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Reset SKB receive buffers
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ if ((skb = fep->rx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ fep->rx_skbuff[i] = NULL;
+
+ dev_kfree_skb(skb);
+ }
+}
+
+/**********************************************************************************/
+
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+/*
+ * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
+ */
+static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+
+ if (skb_linearize(skb))
+ return NULL;
+
+ /* Alloc new skb */
+ new_skb = netdev_alloc_skb(dev, skb->len + 4);
+ if (!new_skb)
+ return NULL;
+
+ /* Make sure new skb is properly aligned */
+ skb_align(new_skb, 4);
+
+ /* Copy data to new skb ... */
+ skb_copy_from_linear_data(skb, new_skb->data, skb->len);
+ skb_put(new_skb, skb->len);
+
+ /* ... and free an old one */
+ dev_kfree_skb_any(skb);
+
+ return new_skb;
+}
+#endif
+
+static netdev_tx_t
+fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ int curidx;
+ u16 sc;
+ int nr_frags;
+ skb_frag_t *frag;
+ int len;
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ int is_aligned = 1;
+ int i;
+
+ if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
+ is_aligned = 0;
+ } else {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag = skb_shinfo(skb)->frags;
+ for (i = 0; i < nr_frags; i++, frag++) {
+ if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
+ is_aligned = 0;
+ break;
+ }
+ }
+ }
+
+ if (!is_aligned) {
+ skb = tx_skb_align_workaround(dev, skb);
+ if (!skb) {
+ /*
+ * We have lost packet due to memory allocation error
+ * in tx_skb_align_workaround(). Hopefully original
+ * skb is still valid, so try transmit it later.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ }
+#endif
+
+ spin_lock(&fep->tx_lock);
+
+ /*
+ * Fill in a Tx ring entry
+ */
+ bdp = fep->cur_tx;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+ netif_stop_queue(dev);
+ spin_unlock(&fep->tx_lock);
+
+ /*
+ * Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since the tx queue should be stopped.
+ */
+ dev_warn(fep->dev, "tx queue full!.\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ curidx = bdp - fep->tx_bd_base;
+
+ len = skb->len;
+ dev->stats.tx_bytes += len;
+ if (nr_frags)
+ len -= skb->data_len;
+ fep->tx_free -= nr_frags + 1;
+ /*
+ * Push the data cache so the CPM does not get stale memory data.
+ */
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
+ skb->data, len, DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, len);
+
+ fep->mapped_as_page[curidx] = 0;
+ frag = skb_shinfo(skb)->frags;
+ while (nr_frags) {
+ CBDC_SC(bdp,
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
+ CBDS_SC(bdp, BD_ENET_TX_READY);
+
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) {
+ bdp++;
+ curidx++;
+ } else {
+ bdp = fep->tx_bd_base;
+ curidx = 0;
+ }
+
+ len = skb_frag_size(frag);
+ CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
+ DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, len);
+
+ fep->tx_skbuff[curidx] = NULL;
+ fep->mapped_as_page[curidx] = 1;
+
+ frag++;
+ nr_frags--;
+ }
+
+ /* Trigger transmission start */
+ sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
+ BD_ENET_TX_LAST | BD_ENET_TX_TC;
+
+ /* note that while FEC does not have this bit
+ * it marks it as available for software use
+ * yay for hw reuse :) */
+ if (skb->len <= 60)
+ sc |= BD_ENET_TX_PAD;
+ CBDC_SC(bdp, BD_ENET_TX_STATS);
+ CBDS_SC(bdp, sc);
+
+ /* Save skb pointer. */
+ fep->tx_skbuff[curidx] = skb;
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
+
+ if (fep->tx_free < MAX_SKB_FRAGS)
+ netif_stop_queue(dev);
+
+ skb_tx_timestamp(skb);
+
+ (*fep->ops->tx_kickstart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ return NETDEV_TX_OK;
+}
+
+static void fs_timeout_work(struct work_struct *work)
+{
+ struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+ timeout_work);
+ struct net_device *dev = fep->ndev;
+ unsigned long flags;
+ int wake = 0;
+
+ dev->stats.tx_errors++;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if (dev->flags & IFF_UP) {
+ phy_stop(dev->phydev);
+ (*fep->ops->stop)(dev);
+ (*fep->ops->restart)(dev);
+ }
+
+ phy_start(dev->phydev);
+ wake = fep->tx_free >= MAX_SKB_FRAGS &&
+ !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (wake)
+ netif_wake_queue(dev);
+}
+
+static void fs_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ schedule_work(&fep->timeout_work);
+}
+
+/*-----------------------------------------------------------------------------
+ * generic link-change handler - should be sufficient for most cases
+ *-----------------------------------------------------------------------------*/
+static void generic_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ int new_state = 0;
+
+ if (phydev->link) {
+ /* adjust to duplex mode */
+ if (phydev->duplex != fep->oldduplex) {
+ new_state = 1;
+ fep->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != fep->oldspeed) {
+ new_state = 1;
+ fep->oldspeed = phydev->speed;
+ }
+
+ if (!fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 1;
+ }
+
+ if (new_state)
+ fep->ops->restart(dev);
+ } else if (fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(fep))
+ phy_print_status(phydev);
+}
+
+
+static void fs_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if(fep->ops->adjust_link)
+ fep->ops->adjust_link(dev);
+ else
+ generic_adjust_link(dev);
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fs_init_phy(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev;
+ phy_interface_t iface;
+
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+
+ iface = fep->fpi->use_rmii ?
+ PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
+
+ phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
+ iface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int fs_enet_open(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int r;
+ int err;
+
+ /* to initialize the fep->cur_rx,... */
+ /* not doing this, will cause a crash in fs_enet_napi */
+ fs_init_bds(fep->ndev);
+
+ napi_enable(&fep->napi);
+
+ /* Install our interrupt handler. */
+ r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
+ "fs_enet-mac", dev);
+ if (r != 0) {
+ dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
+ napi_disable(&fep->napi);
+ return -EINVAL;
+ }
+
+ err = fs_init_phy(dev);
+ if (err) {
+ free_irq(fep->interrupt, dev);
+ napi_disable(&fep->napi);
+ return err;
+ }
+ phy_start(dev->phydev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int fs_enet_close(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ napi_disable(&fep->napi);
+ cancel_work_sync(&fep->timeout_work);
+ phy_stop(dev->phydev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ spin_lock(&fep->tx_lock);
+ (*fep->ops->stop)(dev);
+ spin_unlock(&fep->tx_lock);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ /* release any irqs */
+ phy_disconnect(dev->phydev);
+ free_irq(fep->interrupt, dev);
+
+ return 0;
+}
+
+/*************************************************************************/
+
+static void fs_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+}
+
+static int fs_get_regs_len(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return (*fep->ops->get_regs_len)(dev);
+}
+
+static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int r, len;
+
+ len = regs->len;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ r = (*fep->ops->get_regs)(dev, p, &len);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (r == 0)
+ regs->version = 0;
+}
+
+static u32 fs_get_msglevel(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return fep->msg_enable;
+}
+
+static void fs_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fep->msg_enable = value;
+}
+
+static int fs_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fpi->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fs_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fpi->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops fs_ethtool_ops = {
+ .get_drvinfo = fs_get_drvinfo,
+ .get_regs_len = fs_get_regs_len,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = fs_get_msglevel,
+ .set_msglevel = fs_set_msglevel,
+ .get_regs = fs_get_regs,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_tunable = fs_get_tunable,
+ .set_tunable = fs_set_tunable,
+};
+
+/**************************************************************************************/
+
+#ifdef CONFIG_FS_ENET_HAS_FEC
+#define IS_FEC(match) ((match)->data == &fs_fec_ops)
+#else
+#define IS_FEC(match) 0
+#endif
+
+static const struct net_device_ops fs_enet_netdev_ops = {
+ .ndo_open = fs_enet_open,
+ .ndo_stop = fs_enet_close,
+ .ndo_start_xmit = fs_enet_start_xmit,
+ .ndo_tx_timeout = fs_timeout,
+ .ndo_set_rx_mode = fs_set_multicast_list,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fs_enet_netpoll,
+#endif
+};
+
+static const struct of_device_id fs_enet_match[];
+static int fs_enet_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct net_device *ndev;
+ struct fs_enet_private *fep;
+ struct fs_platform_info *fpi;
+ const u32 *data;
+ struct clk *clk;
+ int err;
+ const char *phy_connection_type;
+ int privsize, len, ret = -ENODEV;
+
+ match = of_match_device(fs_enet_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
+ if (!fpi)
+ return -ENOMEM;
+
+ if (!IS_FEC(match)) {
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
+ if (!data || len != 4)
+ goto out_free_fpi;
+
+ fpi->cp_command = *data;
+ }
+
+ fpi->rx_ring = RX_RING_SIZE;
+ fpi->tx_ring = TX_RING_SIZE;
+ fpi->rx_copybreak = 240;
+ fpi->napi_weight = 17;
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(ofdev->dev.of_node);
+ if (err)
+ goto out_free_fpi;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ fpi->phy_node = of_node_get(ofdev->dev.of_node);
+ }
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
+ phy_connection_type = of_get_property(ofdev->dev.of_node,
+ "phy-connection-type", NULL);
+ if (phy_connection_type && !strcmp("rmii", phy_connection_type))
+ fpi->use_rmii = 1;
+ }
+
+ /* make clock lookup non-fatal (the driver is shared among platforms),
+ * but require enable to succeed when a clock was specified/found,
+ * keep a reference to the clock upon successful acquisition
+ */
+ clk = devm_clk_get(&ofdev->dev, "per");
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_deregister_fixed_link;
+
+ fpi->clk_per = clk;
+ }
+
+ privsize = sizeof(*fep) +
+ sizeof(struct sk_buff **) *
+ (fpi->rx_ring + fpi->tx_ring) +
+ sizeof(char) * fpi->tx_ring;
+
+ ndev = alloc_etherdev(privsize);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
+ platform_set_drvdata(ofdev, ndev);
+
+ fep = netdev_priv(ndev);
+ fep->dev = &ofdev->dev;
+ fep->ndev = ndev;
+ fep->fpi = fpi;
+ fep->ops = match->data;
+
+ ret = fep->ops->setup_data(ndev);
+ if (ret)
+ goto out_free_dev;
+
+ fep->rx_skbuff = (struct sk_buff **)&fep[1];
+ fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+ fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
+ fpi->tx_ring);
+
+ spin_lock_init(&fep->lock);
+ spin_lock_init(&fep->tx_lock);
+
+ of_get_ethdev_address(ofdev->dev.of_node, ndev);
+
+ ret = fep->ops->allocate_bd(ndev);
+ if (ret)
+ goto out_cleanup_data;
+
+ fep->rx_bd_base = fep->ring_base;
+ fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
+
+ fep->tx_ring = fpi->tx_ring;
+ fep->rx_ring = fpi->rx_ring;
+
+ ndev->netdev_ops = &fs_enet_netdev_ops;
+ ndev->watchdog_timeo = 2 * HZ;
+ INIT_WORK(&fep->timeout_work, fs_timeout_work);
+ netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi,
+ fpi->napi_weight);
+
+ ndev->ethtool_ops = &fs_ethtool_ops;
+
+ netif_carrier_off(ndev);
+
+ ndev->features |= NETIF_F_SG;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto out_free_bd;
+
+ pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
+
+ return 0;
+
+out_free_bd:
+ fep->ops->free_bd(ndev);
+out_cleanup_data:
+ fep->ops->cleanup_data(ndev);
+out_free_dev:
+ free_netdev(ndev);
+out_put:
+ clk_disable_unprepare(fpi->clk_per);
+out_deregister_fixed_link:
+ of_node_put(fpi->phy_node);
+ if (of_phy_is_fixed_link(ofdev->dev.of_node))
+ of_phy_deregister_fixed_link(ofdev->dev.of_node);
+out_free_fpi:
+ kfree(fpi);
+ return ret;
+}
+
+static int fs_enet_remove(struct platform_device *ofdev)
+{
+ struct net_device *ndev = platform_get_drvdata(ofdev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ fep->ops->free_bd(ndev);
+ fep->ops->cleanup_data(ndev);
+ dev_set_drvdata(fep->dev, NULL);
+ of_node_put(fep->fpi->phy_node);
+ clk_disable_unprepare(fep->fpi->clk_per);
+ if (of_phy_is_fixed_link(ofdev->dev.of_node))
+ of_phy_deregister_fixed_link(ofdev->dev.of_node);
+ free_netdev(ndev);
+ return 0;
+}
+
+static const struct of_device_id fs_enet_match[] = {
+#ifdef CONFIG_FS_ENET_HAS_SCC
+ {
+ .compatible = "fsl,cpm1-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
+ {
+ .compatible = "fsl,cpm2-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
+#endif
+#ifdef CONFIG_FS_ENET_HAS_FCC
+ {
+ .compatible = "fsl,cpm2-fcc-enet",
+ .data = (void *)&fs_fcc_ops,
+ },
+#endif
+#ifdef CONFIG_FS_ENET_HAS_FEC
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ {
+ .compatible = "fsl,mpc5121-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+ {
+ .compatible = "fsl,mpc5125-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+#else
+ {
+ .compatible = "fsl,pq1-fec-enet",
+ .data = (void *)&fs_fec_ops,
+ },
+#endif
+#endif
+ {}
+};
+MODULE_DEVICE_TABLE(of, fs_enet_match);
+
+static struct platform_driver fs_enet_driver = {
+ .driver = {
+ .name = "fs_enet",
+ .of_match_table = fs_enet_match,
+ },
+ .probe = fs_enet_probe,
+ .remove = fs_enet_remove,
+};
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fs_enet_netpoll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ fs_enet_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+module_platform_driver(fs_enet_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
new file mode 100644
index 000000000..cb419aef8
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FS_ENET_H
+#define FS_ENET_H
+
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/phy.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/fs_enet_pd.h>
+#include <asm/fs_pd.h>
+
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#endif
+
+#if defined(CONFIG_FS_ENET_HAS_FEC)
+#include <asm/cpm.h>
+
+#if defined(CONFIG_FS_ENET_MPC5121_FEC)
+/* MPC5121 FEC has different register layout */
+struct fec {
+ u32 fec_reserved0;
+ u32 fec_ievent; /* Interrupt event reg */
+ u32 fec_imask; /* Interrupt mask reg */
+ u32 fec_reserved1;
+ u32 fec_r_des_active; /* Receive descriptor reg */
+ u32 fec_x_des_active; /* Transmit descriptor reg */
+ u32 fec_reserved2[3];
+ u32 fec_ecntrl; /* Ethernet control reg */
+ u32 fec_reserved3[6];
+ u32 fec_mii_data; /* MII manage frame reg */
+ u32 fec_mii_speed; /* MII speed control reg */
+ u32 fec_reserved4[7];
+ u32 fec_mib_ctrlstat; /* MIB control/status reg */
+ u32 fec_reserved5[7];
+ u32 fec_r_cntrl; /* Receive control reg */
+ u32 fec_reserved6[15];
+ u32 fec_x_cntrl; /* Transmit Control reg */
+ u32 fec_reserved7[7];
+ u32 fec_addr_low; /* Low 32bits MAC address */
+ u32 fec_addr_high; /* High 16bits MAC address */
+ u32 fec_opd; /* Opcode + Pause duration */
+ u32 fec_reserved8[10];
+ u32 fec_hash_table_high; /* High 32bits hash table */
+ u32 fec_hash_table_low; /* Low 32bits hash table */
+ u32 fec_grp_hash_table_high; /* High 32bits hash table */
+ u32 fec_grp_hash_table_low; /* Low 32bits hash table */
+ u32 fec_reserved9[7];
+ u32 fec_x_wmrk; /* FIFO transmit water mark */
+ u32 fec_reserved10;
+ u32 fec_r_bound; /* FIFO receive bound reg */
+ u32 fec_r_fstart; /* FIFO receive start reg */
+ u32 fec_reserved11[11];
+ u32 fec_r_des_start; /* Receive descriptor ring */
+ u32 fec_x_des_start; /* Transmit descriptor ring */
+ u32 fec_r_buff_size; /* Maximum receive buff size */
+ u32 fec_reserved12[26];
+ u32 fec_dma_control; /* DMA Endian and other ctrl */
+};
+#endif
+
+struct fec_info {
+ struct fec __iomem *fecp;
+ u32 mii_speed;
+};
+#endif
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+#endif
+
+/* hw driver ops */
+struct fs_ops {
+ int (*setup_data)(struct net_device *dev);
+ int (*allocate_bd)(struct net_device *dev);
+ void (*free_bd)(struct net_device *dev);
+ void (*cleanup_data)(struct net_device *dev);
+ void (*set_multicast_list)(struct net_device *dev);
+ void (*adjust_link)(struct net_device *dev);
+ void (*restart)(struct net_device *dev);
+ void (*stop)(struct net_device *dev);
+ void (*napi_clear_event)(struct net_device *dev);
+ void (*napi_enable)(struct net_device *dev);
+ void (*napi_disable)(struct net_device *dev);
+ void (*rx_bd_done)(struct net_device *dev);
+ void (*tx_kickstart)(struct net_device *dev);
+ u32 (*get_int_events)(struct net_device *dev);
+ void (*clear_int_events)(struct net_device *dev, u32 int_events);
+ void (*ev_error)(struct net_device *dev, u32 int_events);
+ int (*get_regs)(struct net_device *dev, void *p, int *sizep);
+ int (*get_regs_len)(struct net_device *dev);
+ void (*tx_restart)(struct net_device *dev);
+};
+
+struct phy_info {
+ unsigned int id;
+ const char *name;
+ void (*startup) (struct net_device * dev);
+ void (*shutdown) (struct net_device * dev);
+ void (*ack_int) (struct net_device * dev);
+};
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
+#define MIN_MTU 46 /* this is data size */
+#define CRC_LEN 4
+
+#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
+#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
+
+/* Must be a multiple of 32 (to cover both FEC & FCC) */
+#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
+/* This is needed so that invalidate_xxx wont invalidate too much */
+#define ENET_RX_ALIGN 16
+#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
+
+struct fs_enet_private {
+ struct napi_struct napi;
+ struct device *dev; /* pointer back to the device (must be initialized first) */
+ struct net_device *ndev;
+ spinlock_t lock; /* during all ops except TX pckt processing */
+ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
+ struct fs_platform_info *fpi;
+ struct work_struct timeout_work;
+ const struct fs_ops *ops;
+ int rx_ring, tx_ring;
+ dma_addr_t ring_mem_addr;
+ void __iomem *ring_base;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ char *mapped_as_page;
+ cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t __iomem *tx_bd_base;
+ cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
+ cbd_t __iomem *cur_rx;
+ cbd_t __iomem *cur_tx;
+ int tx_free;
+ const struct phy_info *phy;
+ u32 msg_enable;
+ struct mii_if_info mii_if;
+ unsigned int last_mii_status;
+ int interrupt;
+
+ int oldduplex, oldspeed, oldlink; /* current settings */
+
+ /* event masks */
+ u32 ev_napi; /* mask of NAPI events */
+ u32 ev; /* event mask */
+ u32 ev_err; /* error event mask */
+
+ u16 bd_rx_empty; /* mask of BD rx empty */
+ u16 bd_rx_err; /* mask of BD rx errors */
+
+ union {
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void __iomem *fecp; /* hw registers */
+ u32 hthi, htlo; /* state for multicast */
+ } fec;
+
+ struct {
+ int idx; /* FCC1-3 = 0-2 */
+ void __iomem *fccp; /* hw registers */
+ void __iomem *ep; /* parameter ram */
+ void __iomem *fcccp; /* hw registers cont. */
+ void __iomem *mem; /* FCC DPRAM */
+ u32 gaddrh, gaddrl; /* group address */
+ } fcc;
+
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void __iomem *sccp; /* hw registers */
+ void __iomem *ep; /* parameter ram */
+ u32 hthi, htlo; /* state for multicast */
+ } scc;
+
+ };
+};
+
+/***************************************************************************/
+
+void fs_init_bds(struct net_device *dev);
+void fs_cleanup_bds(struct net_device *dev);
+
+/***************************************************************************/
+
+#define DRV_MODULE_NAME "fs_enet"
+#define PFX DRV_MODULE_NAME ": "
+
+/***************************************************************************/
+
+int fs_enet_platform_init(void);
+void fs_enet_platform_cleanup(void);
+
+/***************************************************************************/
+/* buffer descriptor access macros */
+
+/* access macros */
+#if defined(CONFIG_CPM1)
+/* for a CPM1 __raw_xxx's are sufficient */
+#define __cbd_out32(addr, x) __raw_writel(x, addr)
+#define __cbd_out16(addr, x) __raw_writew(x, addr)
+#define __cbd_in32(addr) __raw_readl(addr)
+#define __cbd_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __cbd_out32(addr, x) out_be32(addr, x)
+#define __cbd_out16(addr, x) out_be16(addr, x)
+#define __cbd_in32(addr) in_be32(addr)
+#define __cbd_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
+#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
+#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
+
+/* read */
+#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
+#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
+#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
+
+/* set bits */
+#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
+
+/* clear bits */
+#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
+
+/*******************************************************************/
+
+extern const struct fs_ops fs_fec_ops;
+extern const struct fs_ops fs_fcc_ops;
+extern const struct fs_ops fs_scc_ops;
+
+/*******************************************************************/
+
+#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
new file mode 100644
index 000000000..b47490be8
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -0,0 +1,583 @@
+/*
+ * FCC driver for Motorola MPC82xx (PQ2).
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/gfp.h>
+#include <linux/pgtable.h>
+
+#include <asm/immap_cpm2.h>
+#include <asm/mpc8260.h>
+#include <asm/cpm2.h>
+
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+/* FCC access macros */
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
+#define R32(_p, _m) in_be32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
+#define R16(_p, _m) in_be16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
+#define R8(_p, _m) in_8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+/*************************************************/
+
+#define FCC_MAX_MULTICAST_ADDRS 64
+
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define MAX_CR_CMD_LOOPS 10000
+
+static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ return cpm_command(fpi->cp_command, op);
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = -EINVAL;
+
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!fep->interrupt)
+ goto out;
+
+ fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->fcc.fccp)
+ goto out;
+
+ fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
+ if (!fep->fcc.ep)
+ goto out_fccp;
+
+ fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
+ if (!fep->fcc.fcccp)
+ goto out_ep;
+
+ fep->fcc.mem = (void __iomem *)cpm2_immr;
+ fpi->dpram_offset = cpm_dpalloc(128, 32);
+ if (IS_ERR_VALUE(fpi->dpram_offset)) {
+ ret = fpi->dpram_offset;
+ goto out_fcccp;
+ }
+
+ return 0;
+
+out_fcccp:
+ iounmap(fep->fcc.fcccp);
+out_ep:
+ iounmap(fep->fcc.ep);
+out_fccp:
+ iounmap(fep->fcc.fccp);
+out:
+ return ret;
+}
+
+#define FCC_NAPI_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
+#define FCC_EVENT (FCC_ENET_RXF | FCC_ENET_TXB)
+#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->ev_napi = FCC_NAPI_EVENT_MSK;
+ fep->ev = FCC_EVENT;
+ fep->ev_err = FCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if (fep->ring_base)
+ dma_free_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
+ (void __force *)fep->ring_base, fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+
+ W32(ep, fen_gaddrh, 0);
+ W32(ep, fen_gaddrl, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16)mac[5] << 8) | mac[4];
+ taddrm = ((u16)mac[3] << 8) | mac[2];
+ taddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_taddrh, taddrh);
+ W16(ep, fen_taddrm, taddrm);
+ W16(ep, fen_taddrl, taddrl);
+ fcc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+
+ /* clear promiscuous always */
+ C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
+
+ W32(ep, fen_gaddrh, 0xffffffff);
+ W32(ep, fen_gaddrl, 0xffffffff);
+ }
+
+ /* read back */
+ fep->fcc.gaddrh = R32(ep, fen_gaddrh);
+ fep->fcc.gaddrl = R32(ep, fen_gaddrl);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ fcc_c_t __iomem *fcccp = fep->fcc.fcccp;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ out_8((u8 __iomem *)ep + i, 0);
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /* point to bds */
+ W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
+ W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
+
+ /* Set maximum bytes per receive buffer.
+ * It must be a multiple of 32.
+ */
+ W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
+
+ W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+ W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+
+ /* Allocate space in the reserved FCC area of DPRAM for the
+ * internal buffers. No one uses this space (yet), so we
+ * can do this. Later, we will add resource management for
+ * this area.
+ */
+
+ W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
+ W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);
+
+ W16(ep, fen_padptr, fpi->dpram_offset + 64);
+
+ /* fill with special symbol... */
+ memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
+
+ W32(ep, fen_genfcc.fcc_rbptr, 0);
+ W32(ep, fen_genfcc.fcc_tbptr, 0);
+ W32(ep, fen_genfcc.fcc_rcrc, 0);
+ W32(ep, fen_genfcc.fcc_tcrc, 0);
+ W16(ep, fen_genfcc.fcc_res1, 0);
+ W32(ep, fen_genfcc.fcc_res2, 0);
+
+ /* no CAM */
+ W32(ep, fen_camptr, 0);
+
+ /* Set CRC preset and mask */
+ W32(ep, fen_cmask, 0xdebb20e3);
+ W32(ep, fen_cpres, 0xffffffff);
+
+ W32(ep, fen_crcec, 0); /* CRC Error counter */
+ W32(ep, fen_alec, 0); /* alignment error counter */
+ W32(ep, fen_disfc, 0); /* discard frame counter */
+ W16(ep, fen_retlim, 15); /* Retry limit threshold */
+ W16(ep, fen_pper, 0); /* Normal persistence */
+
+ /* set group address */
+ W32(ep, fen_gaddrh, fep->fcc.gaddrh);
+ W32(ep, fen_gaddrl, fep->fcc.gaddrh);
+
+ /* Clear hash filter tables */
+ W32(ep, fen_iaddrh, 0);
+ W32(ep, fen_iaddrl, 0);
+
+ /* Clear the Out-of-sequence TxBD */
+ W16(ep, fen_tfcstat, 0);
+ W16(ep, fen_tfclen, 0);
+ W32(ep, fen_tfcptr, 0);
+
+ W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
+ W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ /* set address */
+ mac = dev->dev_addr;
+ paddrh = ((u16)mac[5] << 8) | mac[4];
+ paddrm = ((u16)mac[3] << 8) | mac[2];
+ paddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_paddrh, paddrh);
+ W16(ep, fen_paddrm, paddrm);
+ W16(ep, fen_paddrl, paddrl);
+
+ W16(ep, fen_taddrh, 0);
+ W16(ep, fen_taddrm, 0);
+ W16(ep, fen_taddrl, 0);
+
+ W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
+ W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
+
+ /* Clear stat counters, in case we ever enable RMON */
+ W32(ep, fen_octc, 0);
+ W32(ep, fen_colc, 0);
+ W32(ep, fen_broc, 0);
+ W32(ep, fen_mulc, 0);
+ W32(ep, fen_uspc, 0);
+ W32(ep, fen_frgc, 0);
+ W32(ep, fen_ospc, 0);
+ W32(ep, fen_jbrc, 0);
+ W32(ep, fen_p64c, 0);
+ W32(ep, fen_p65c, 0);
+ W32(ep, fen_p128c, 0);
+ W32(ep, fen_p256c, 0);
+ W32(ep, fen_p512c, 0);
+ W32(ep, fen_p1024c, 0);
+
+ W16(ep, fen_rfthr, 0); /* Suggested by manual */
+ W16(ep, fen_rfcnt, 0);
+ W16(ep, fen_cftype, 0);
+
+ fs_init_bds(dev);
+
+ /* adjust to speed (for RMII mode) */
+ if (fpi->use_rmii) {
+ if (dev->phydev->speed == 100)
+ C8(fcccp, fcc_gfemr, 0x20);
+ else
+ S8(fcccp, fcc_gfemr, 0x20);
+ }
+
+ fcc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* Enable interrupts we wish to service */
+ W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
+
+ /* Set GFMR to enable Ethernet operating mode */
+ W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
+
+ /* set sync/delimiters */
+ W16(fccp, fcc_fdsr, 0xd555);
+
+ W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
+
+ if (fpi->use_rmii)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
+
+ /* adjust to duplex mode */
+ if (dev->phydev->duplex)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+ else
+ C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ /* stop ethernet */
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* clear interrupt mask */
+ W16(fccp, fcc_fccm, 0);
+
+ fs_cleanup_bds(dev);
+}
+
+static void napi_clear_event_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
+}
+
+static void napi_enable_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
+}
+
+static void napi_disable_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_ftodr, 0x8000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ return (u32)R16(fccp, fcc_fcce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1)
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
+ p = (char *)p + sizeof(fcc_t);
+
+ memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
+ p = (char *)p + sizeof(fcc_enet_t);
+
+ memcpy_fromio(p, fep->fcc.fcccp, 1);
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1;
+}
+
+/* Some transmit errors cause the transmitter to shut
+ * down. We now issue a restart transmit.
+ * Also, to workaround 8260 device erratum CPM37, we must
+ * disable and then re-enable the transmitterfollowing a
+ * Late Collision, Underrun, or Retry Limit error.
+ * In addition, tbptr may point beyond BDs beyond still marked
+ * as ready due to internal pipelining, so we need to look back
+ * through the BDs and adjust tbptr to point to the last BD
+ * marked as ready. This may result in some buffers being
+ * retransmitted.
+ */
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ cbd_t __iomem *curr_tbptr;
+ cbd_t __iomem *recheck_bd;
+ cbd_t __iomem *prev_bd;
+ cbd_t __iomem *last_tx_bd;
+
+ last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
+
+ /* get the current bd held in TBPTR and scan back from this point */
+ recheck_bd = curr_tbptr = (cbd_t __iomem *)
+ ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
+ fep->ring_base);
+
+ prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
+
+ /* Move through the bds in reverse, look for the earliest buffer
+ * that is not ready. Adjust TBPTR to the following buffer */
+ while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
+ /* Go back one buffer */
+ recheck_bd = prev_bd;
+
+ /* update the previous buffer */
+ prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
+
+ /* We should never see all bds marked as ready, check anyway */
+ if (recheck_bd == curr_tbptr)
+ break;
+ }
+ /* Now update the TBPTR and dirty flag to the current buffer */
+ W32(ep, fen_genfcc.fcc_tbptr,
+ (uint) (((void *)recheck_bd - fep->ring_base) +
+ fep->ring_mem_addr));
+ fep->dirty_tx = recheck_bd;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+ udelay(10);
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+
+ fcc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fcc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
new file mode 100644
index 000000000..61f4b6e50
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -0,0 +1,486 @@
+/*
+ * Freescale Ethernet controllers
+ *
+ * Copyright (c) 2005 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/gfp.h>
+
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/*************************************************/
+
+#if defined(CONFIG_CPM1)
+/* for a CPM1 __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
+
+/* read */
+#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
+
+/* set bits */
+#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
+
+/* clear bits */
+#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+
+static int whack_reset(struct fec __iomem *fecp)
+{
+ int i;
+
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; i++) {
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
+ return 0; /* OK */
+ udelay(1);
+ }
+
+ return -1;
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!fep->interrupt)
+ return -EINVAL;
+
+ fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->fec.fecp)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
+#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
+#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
+ FEC_ENET_BABT | FEC_ENET_EBERR)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+
+ fep->ev_napi = FEC_NAPI_EVENT_MSK;
+ fep->ev = FEC_EVENT;
+ fep->ev_err = FEC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if(fep->ring_base)
+ dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
+ * sizeof(cbd_t),
+ (void __force *)fep->ring_base,
+ fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int temp, hash_index;
+ u32 crc, csrVal;
+
+ crc = ether_crc(6, mac);
+
+ temp = (crc & 0x3f) >> 1;
+ hash_index = ((temp & 0x01) << 4) |
+ ((temp & 0x02) << 2) |
+ ((temp & 0x04)) |
+ ((temp & 0x08) >> 2) |
+ ((temp & 0x10) >> 4);
+ csrVal = 1 << hash_index;
+ if (crc & 1)
+ fep->fec.hthi |= csrVal;
+ else
+ fep->fec.htlo |= csrVal;
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
+ fep->fec.hthi = 0xffffffffU;
+ fep->fec.htlo = 0xffffffffU;
+ }
+
+ FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ int r;
+ u32 addrhi, addrlo;
+
+ struct mii_bus *mii = dev->phydev->mdio.bus;
+ struct fec_info* fec_inf = mii->priv;
+
+ r = whack_reset(fep->fec.fecp);
+ if (r != 0)
+ dev_err(fep->dev, "FEC Reset FAILED!\n");
+ /*
+ * Set station address.
+ */
+ addrhi = ((u32) dev->dev_addr[0] << 24) |
+ ((u32) dev->dev_addr[1] << 16) |
+ ((u32) dev->dev_addr[2] << 8) |
+ (u32) dev->dev_addr[3];
+ addrlo = ((u32) dev->dev_addr[4] << 24) |
+ ((u32) dev->dev_addr[5] << 16);
+ FW(fecp, addr_low, addrhi);
+ FW(fecp, addr_high, addrlo);
+
+ /*
+ * Reset all multicast.
+ */
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
+
+ /*
+ * Set maximum receive buffer size.
+ */
+ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
+#else
+ FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+#endif
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ FW(fecp, r_des_start, rx_bd_base_phys);
+ FW(fecp, x_des_start, tx_bd_base_phys);
+
+ fs_init_bds(dev);
+
+ /*
+ * Enable big endian and don't care about SDMA FC.
+ */
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FS(fecp, dma_control, 0xC0000000);
+#else
+ FW(fecp, fun_code, 0x78000000);
+#endif
+
+ /*
+ * Set MII speed.
+ */
+ FW(fecp, mii_speed, fec_inf->mii_speed);
+
+ /*
+ * Clear any outstanding interrupt.
+ */
+ FW(fecp, ievent, 0xffc0);
+#ifndef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
+
+ FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+#else
+ /*
+ * Only set MII/RMII mode - do not touch maximum frame length
+ * configured before.
+ */
+ FS(fecp, r_cntrl, fpi->use_rmii ?
+ FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
+#endif
+ /*
+ * adjust to duplex mode
+ */
+ if (dev->phydev->duplex) {
+ FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
+ } else {
+ FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
+ }
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ /*
+ * Enable interrupts we wish to service.
+ */
+ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB);
+
+ /*
+ * And last, enable the transmit and receive processing.
+ */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ struct fec_info *feci = dev->phydev->mdio.bus->priv;
+
+ int i;
+
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
+ return; /* already down */
+
+ FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
+ for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
+ i < FEC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == FEC_RESET_DELAY)
+ dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
+ /*
+ * Disable FEC. Let only MII interrupts.
+ */
+ FW(fecp, imask, 0);
+ FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
+
+ fs_cleanup_bds(dev);
+
+ /* shut down FEC1? that's where the mii bus is */
+ if (fpi->has_phy) {
+ FS(fecp, r_cntrl, fpi->use_rmii ?
+ FEC_RCNTRL_RMII_MODE :
+ FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ FW(fecp, mii_speed, feci->mii_speed);
+ }
+}
+
+static void napi_clear_event_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
+}
+
+static void napi_enable_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, imask, FEC_NAPI_EVENT_MSK);
+}
+
+static void napi_disable_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FC(fecp, imask, FEC_NAPI_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, x_des_active, 0x01000000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ return FR(fecp, ievent) & FR(fecp, imask);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, int_events);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(struct fec))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct fec);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fec_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
+
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
new file mode 100644
index 000000000..64300ac13
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -0,0 +1,479 @@
+/*
+ * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+#if defined(CONFIG_CPM1)
+/* for a 8xx __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_out8(addr, x) __raw_writeb(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#define __fs_in8(addr) __raw_readb(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#define __fs_out8(addr, x) out_8(addr, x)
+#define __fs_in8(addr) in_8(addr)
+#endif
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
+#define R32(_p, _m) __fs_in32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
+#define R16(_p, _m) __fs_in16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
+#define R8(_p, _m) __fs_in8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+#define SCC_MAX_MULTICAST_ADDRS 64
+
+/*
+ * Delay to wait for SCC reset command to complete (in us)
+ */
+#define SCC_RESET_DELAY 50
+
+static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ return cpm_command(fpi->cp_command, op);
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!fep->interrupt)
+ return -EINVAL;
+
+ fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->scc.sccp)
+ return -EINVAL;
+
+ fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
+ if (!fep->scc.ep) {
+ iounmap(fep->scc.sccp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
+#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
+#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ do_pd_setup(fep);
+
+ fep->scc.hthi = 0;
+ fep->scc.htlo = 0;
+
+ fep->ev_napi = SCC_NAPI_EVENT_MSK;
+ fep->ev = SCC_EVENT | SCCE_ENET_TXE;
+ fep->ev_err = SCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fep->ring_mem_addr))
+ return -ENOMEM;
+
+ fep->ring_base = (void __iomem __force*)
+ cpm_dpram_addr(fep->ring_mem_addr);
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->ring_base)
+ cpm_dpfree(fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_psmr, SCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t __iomem *ep = fep->scc.ep;
+
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 * mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t __iomem *ep = fep->scc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16) mac[5] << 8) | mac[4];
+ taddrm = ((u16) mac[3] << 8) | mac[2];
+ taddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_taddrh, taddrh);
+ W16(ep, sen_taddrm, taddrm);
+ W16(ep, sen_taddrl, taddrl);
+ scc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ scc_enet_t __iomem *ep = fep->scc.ep;
+
+ /* clear promiscuous always */
+ C16(sccp, scc_psmr, SCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
+
+ W16(ep, sen_gaddr1, 0xffff);
+ W16(ep, sen_gaddr2, 0xffff);
+ W16(ep, sen_gaddr3, 0xffff);
+ W16(ep, sen_gaddr4, 0xffff);
+ }
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ scc_enet_t __iomem *ep = fep->scc.ep;
+ const struct fs_platform_info *fpi = fep->fpi;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ __fs_out8((u8 __iomem *)ep + i, 0);
+
+ /* point to bds */
+ W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_tbase,
+ fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+
+ /* Initialize function code registers for big-endian.
+ */
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
+#else
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB);
+#endif
+
+ /* Set maximum bytes per receive buffer.
+ * This appears to be an Ethernet frame size, not the buffer
+ * fragment size. It must be a multiple of four.
+ */
+ W16(ep, sen_genscc.scc_mrblr, 0x5f0);
+
+ /* Set CRC preset and mask.
+ */
+ W32(ep, sen_cpres, 0xffffffff);
+ W32(ep, sen_cmask, 0xdebb20e3);
+
+ W32(ep, sen_crcec, 0); /* CRC Error counter */
+ W32(ep, sen_alec, 0); /* alignment error counter */
+ W32(ep, sen_disfc, 0); /* discard frame counter */
+
+ W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
+ W16(ep, sen_retlim, 15); /* Retry limit threshold */
+
+ W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
+
+ W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
+ W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
+
+ /* Clear hash tables.
+ */
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+ W16(ep, sen_iaddr1, 0);
+ W16(ep, sen_iaddr2, 0);
+ W16(ep, sen_iaddr3, 0);
+ W16(ep, sen_iaddr4, 0);
+
+ /* set address
+ */
+ mac = dev->dev_addr;
+ paddrh = ((u16) mac[5] << 8) | mac[4];
+ paddrm = ((u16) mac[3] << 8) | mac[2];
+ paddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_paddrh, paddrh);
+ W16(ep, sen_paddrm, paddrm);
+ W16(ep, sen_paddrl, paddrl);
+
+ W16(ep, sen_pper, 0);
+ W16(ep, sen_taddrl, 0);
+ W16(ep, sen_taddrm, 0);
+ W16(ep, sen_taddrh, 0);
+
+ fs_init_bds(dev);
+
+ scc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ W16(sccp, scc_scce, 0xffff);
+
+ /* Enable interrupts we wish to service.
+ */
+ W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
+
+ /* Set GSMR_H to enable all normal operating modes.
+ * Set GSMR_L to enable Ethernet to MC68160.
+ */
+ W32(sccp, scc_gsmrh, 0);
+ W32(sccp, scc_gsmrl,
+ SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
+ SCC_GSMRL_MODE_ENET);
+
+ /* Set sync/delimiters.
+ */
+ W16(sccp, scc_dsr, 0xd555);
+
+ /* Set processing mode. Use Ethernet CRC, catch broadcast, and
+ * start frame search 22 bit times after RENA.
+ */
+ W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
+
+ /* Set full duplex mode if needed */
+ if (dev->phydev->duplex)
+ S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ int i;
+
+ for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == SCC_RESET_DELAY)
+ dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
+
+ W16(sccp, scc_sccm, 0);
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ fs_cleanup_bds(dev);
+}
+
+static void napi_clear_event_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
+}
+
+static void napi_enable_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
+}
+
+static void napi_disable_fs(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ return (u32) R16(sccp, scc_scce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
+ p = (char *)p + sizeof(scc_t);
+
+ memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ scc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+
+
+/*************************************************************************/
+
+const struct fs_ops fs_scc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
new file mode 100644
index 000000000..21de56345
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -0,0 +1,226 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+
+#include "fs_enet.h"
+
+struct bb_info {
+ struct mdiobb_ctrl ctrl;
+ __be32 __iomem *dir;
+ __be32 __iomem *dat;
+ u32 mdio_msk;
+ u32 mdc_msk;
+};
+
+/* FIXME: If any other users of GPIO crop up, then these will have to
+ * have some sort of global synchronization to avoid races with other
+ * pins on the same port. The ideal solution would probably be to
+ * bind the ports to a GPIO driver, and have this be a client of it.
+ */
+static inline void bb_set(u32 __iomem *p, u32 m)
+{
+ out_be32(p, in_be32(p) | m);
+}
+
+static inline void bb_clr(u32 __iomem *p, u32 m)
+{
+ out_be32(p, in_be32(p) & ~m);
+}
+
+static inline int bb_read(u32 __iomem *p, u32 m)
+{
+ return (in_be32(p) & m) != 0;
+}
+
+static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (dir)
+ bb_set(bitbang->dir, bitbang->mdio_msk);
+ else
+ bb_clr(bitbang->dir, bitbang->mdio_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dir);
+}
+
+static inline int mdio_read(struct mdiobb_ctrl *ctrl)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+ return bb_read(bitbang->dat, bitbang->mdio_msk);
+}
+
+static inline void mdio(struct mdiobb_ctrl *ctrl, int what)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (what)
+ bb_set(bitbang->dat, bitbang->mdio_msk);
+ else
+ bb_clr(bitbang->dat, bitbang->mdio_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dat);
+}
+
+static inline void mdc(struct mdiobb_ctrl *ctrl, int what)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (what)
+ bb_set(bitbang->dat, bitbang->mdc_msk);
+ else
+ bb_clr(bitbang->dat, bitbang->mdc_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dat);
+}
+
+static const struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = mdc,
+ .set_mdio_dir = mdio_dir,
+ .set_mdio_data = mdio,
+ .get_mdio_data = mdio_read,
+};
+
+static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
+{
+ struct resource res;
+ const u32 *data;
+ int mdio_pin, mdc_pin, len;
+ struct bb_info *bitbang = bus->priv;
+
+ int ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ if (resource_size(&res) <= 13)
+ return -ENODEV;
+
+ /* This should really encode the pin number as well, but all
+ * we get is an int, and the odds of multiple bitbang mdio buses
+ * is low enough that it's not worth going too crazy.
+ */
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+
+ data = of_get_property(np, "fsl,mdio-pin", &len);
+ if (!data || len != 4)
+ return -ENODEV;
+ mdio_pin = *data;
+
+ data = of_get_property(np, "fsl,mdc-pin", &len);
+ if (!data || len != 4)
+ return -ENODEV;
+ mdc_pin = *data;
+
+ bitbang->dir = ioremap(res.start, resource_size(&res));
+ if (!bitbang->dir)
+ return -ENOMEM;
+
+ bitbang->dat = bitbang->dir + 4;
+ bitbang->mdio_msk = 1 << (31 - mdio_pin);
+ bitbang->mdc_msk = 1 << (31 - mdc_pin);
+
+ return 0;
+}
+
+static int fs_enet_mdio_probe(struct platform_device *ofdev)
+{
+ struct mii_bus *new_bus;
+ struct bb_info *bitbang;
+ int ret = -ENOMEM;
+
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang)
+ goto out;
+
+ bitbang->ctrl.ops = &bb_ops;
+
+ new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
+ if (!new_bus)
+ goto out_free_priv;
+
+ new_bus->name = "CPM2 Bitbanged MII",
+
+ ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_bus;
+
+ new_bus->phy_mask = ~0;
+
+ new_bus->parent = &ofdev->dev;
+ platform_set_drvdata(ofdev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_unmap_regs;
+
+ return 0;
+
+out_unmap_regs:
+ iounmap(bitbang->dir);
+out_free_bus:
+ free_mdio_bitbang(new_bus);
+out_free_priv:
+ kfree(bitbang);
+out:
+ return ret;
+}
+
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(ofdev);
+ struct bb_info *bitbang = bus->priv;
+
+ mdiobus_unregister(bus);
+ free_mdio_bitbang(bus);
+ iounmap(bitbang->dir);
+ kfree(bitbang);
+
+ return 0;
+}
+
+static const struct of_device_id fs_enet_mdio_bb_match[] = {
+ {
+ .compatible = "fsl,cpm2-mdio-bitbang",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
+
+static struct platform_driver fs_enet_bb_mdio_driver = {
+ .driver = {
+ .name = "fsl-bb-mdio",
+ .of_match_table = fs_enet_mdio_bb_match,
+ },
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
new file mode 100644
index 000000000..d37d7a19a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -0,0 +1,227 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pgtable.h>
+
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+#include <asm/mpc5xxx.h>
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define FEC_MII_LOOPS 10000
+
+static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
+{
+ struct fec_info* fec = bus->priv;
+ struct fec __iomem *fecp = fec->fecp;
+ int i, ret = -1;
+
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ ret = in_be32(&fecp->fec_mii_data) & 0xffff;
+ }
+
+ return ret;
+}
+
+static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ struct fec_info* fec = bus->priv;
+ struct fec __iomem *fecp = fec->fecp;
+ int i;
+
+ /* this must never happen */
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+
+ return 0;
+
+}
+
+static const struct of_device_id fs_enet_mdio_fec_match[];
+static int fs_enet_mdio_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct resource res;
+ struct mii_bus *new_bus;
+ struct fec_info *fec;
+ int (*get_bus_freq)(struct device *);
+ int ret = -ENOMEM, clock, speed;
+
+ match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+ get_bus_freq = match->data;
+
+ new_bus = mdiobus_alloc();
+ if (!new_bus)
+ goto out;
+
+ fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
+ if (!fec)
+ goto out_mii;
+
+ new_bus->priv = fec;
+ new_bus->name = "FEC MII Bus";
+ new_bus->read = &fs_enet_fec_mii_read;
+ new_bus->write = &fs_enet_fec_mii_write;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ if (ret)
+ goto out_res;
+
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+
+ fec->fecp = ioremap(res.start, resource_size(&res));
+ if (!fec->fecp) {
+ ret = -ENOMEM;
+ goto out_fec;
+ }
+
+ if (get_bus_freq) {
+ clock = get_bus_freq(&ofdev->dev);
+ if (!clock) {
+ /* Use maximum divider if clock is unknown */
+ dev_warn(&ofdev->dev, "could not determine IPS clock\n");
+ clock = 0x3F * 5000000;
+ }
+ } else
+ clock = ppc_proc_freq;
+
+ /*
+ * Scale for a MII clock <= 2.5 MHz
+ * Note that only 6 bits (25:30) are available for MII speed.
+ */
+ speed = (clock + 4999999) / 5000000;
+ if (speed > 0x3F) {
+ speed = 0x3F;
+ dev_err(&ofdev->dev,
+ "MII clock (%d Hz) exceeds max (2.5 MHz)\n",
+ clock / speed);
+ }
+
+ fec->mii_speed = speed << 1;
+
+ setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
+ setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX |
+ FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII);
+ clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
+
+ new_bus->phy_mask = ~0;
+
+ new_bus->parent = &ofdev->dev;
+ platform_set_drvdata(ofdev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_unmap_regs;
+
+ return 0;
+
+out_unmap_regs:
+ iounmap(fec->fecp);
+out_res:
+out_fec:
+ kfree(fec);
+out_mii:
+ mdiobus_free(new_bus);
+out:
+ return ret;
+}
+
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(ofdev);
+ struct fec_info *fec = bus->priv;
+
+ mdiobus_unregister(bus);
+ iounmap(fec->fecp);
+ kfree(fec);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id fs_enet_mdio_fec_match[] = {
+ {
+ .compatible = "fsl,pq1-fec-mdio",
+ },
+#if defined(CONFIG_PPC_MPC512x)
+ {
+ .compatible = "fsl,mpc5121-fec-mdio",
+ .data = mpc5xxx_get_bus_frequency,
+ },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
+
+static struct platform_driver fs_enet_fec_mdio_driver = {
+ .driver = {
+ .name = "fsl-fec-mdio",
+ .of_match_table = fs_enet_mdio_fec_match,
+ },
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
new file mode 100644
index 000000000..9d58d8334
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
+ * Provides Bus interface for MIIM regs
+ *
+ * Author: Andy Fleming <afleming@freescale.com>
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
+ *
+ * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+
+#include <asm/io.h>
+#if IS_ENABLED(CONFIG_UCC_GETH)
+#include <soc/fsl/qe/ucc.h>
+#endif
+
+#include "gianfar.h"
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+
+#define MII_READ_COMMAND 0x00000001
+
+struct fsl_pq_mii {
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+ u8 res1[16];
+ u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
+ u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
+ u8 res2[4];
+ u32 emapm; /* MDIO Event mapping register (for etsec2)*/
+ u8 res3[1280];
+ struct fsl_pq_mii mii;
+ u8 res4[28];
+ u32 utbipar; /* TBI phy address reg (only on UCC) */
+ u8 res5[2728];
+} __packed;
+
+/* Number of microseconds to wait for an MII register to respond */
+#define MII_TIMEOUT 1000
+
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mii __iomem *regs;
+};
+
+/*
+ * Per-device-type data. Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node. Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+ unsigned int mii_offset; /* offset of the MII registers */
+ uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+ void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
+};
+
+/*
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
+ * controlling the external PHYs, for example.
+ */
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
+ unsigned int timeout;
+
+ /* Set the PHY address and the register address we want to write */
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
+
+ /* Write out the value we want */
+ iowrite32be(value, &regs->miimcon);
+
+ /* Wait for the transaction to finish */
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ return timeout ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus. This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
+ * same as system mdio bus, used for controlling the external PHYs, for eg.
+ */
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
+ unsigned int timeout;
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
+
+ /* Clear miimcom, and then initiate a read */
+ iowrite32be(0, &regs->miimcom);
+ iowrite32be(MII_READ_COMMAND, &regs->miimcom);
+
+ /* Wait for the transaction to finish, normally less than 100us */
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Grab the value of the register from miimstat */
+ value = ioread32be(&regs->miimstat);
+
+ dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
+ return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int fsl_pq_mdio_reset(struct mii_bus *bus)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
+ unsigned int timeout;
+
+ mutex_lock(&bus->mdio_lock);
+
+ /* Reset the management interface */
+ iowrite32be(MIIMCFG_RESET, &regs->miimcfg);
+
+ /* Setup the MII Mgmt clock speed */
+ iowrite32be(MIIMCFG_INIT_VALUE, &regs->miimcfg);
+
+ /* Wait until the bus is free */
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (!timeout) {
+ dev_err(&bus->dev, "timeout waiting for MII bus\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_GIANFAR)
+/*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MDIO registers (struct gfar)
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
+{
+ struct gfar __iomem *enet_regs = p;
+
+ return &enet_regs->tbipa;
+}
+
+/*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
+ */
+static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
+{
+ return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
+}
+
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+ return p;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_UCC_GETH)
+/*
+ * Return the TBIPAR address for a QE MDIO node, starting from the address
+ * of the mapped MII registers (struct fsl_pq_mii)
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
+{
+ struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
+
+ return &mdio->utbipar;
+}
+
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them. Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node. We do this by comparing
+ * physical addresses. The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
+{
+ static bool found_mii_master;
+ struct device_node *np = NULL;
+
+ if (found_mii_master)
+ return;
+
+ for_each_compatible_node(np, NULL, "ucc_geth") {
+ struct resource res;
+ const uint32_t *iprop;
+ uint32_t id;
+ int ret;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret < 0) {
+ pr_debug("fsl-pq-mdio: no address range in node %pOF\n",
+ np);
+ continue;
+ }
+
+ /* if our mdio regs fall within this UCC regs range */
+ if ((start < res.start) || (end > res.end))
+ continue;
+
+ iprop = of_get_property(np, "cell-index", NULL);
+ if (!iprop) {
+ iprop = of_get_property(np, "device-id", NULL);
+ if (!iprop) {
+ pr_debug("fsl-pq-mdio: no UCC ID in node %pOF\n",
+ np);
+ continue;
+ }
+ }
+
+ id = be32_to_cpup(iprop);
+
+ /*
+ * cell-index and device-id for QE nodes are
+ * numbered from 1, not 0.
+ */
+ if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+ pr_debug("fsl-pq-mdio: invalid UCC ID in node %pOF\n",
+ np);
+ continue;
+ }
+
+ pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+ found_mii_master = true;
+ }
+}
+
+#endif
+
+static const struct of_device_id fsl_pq_mdio_match[] = {
+#if IS_ENABLED(CONFIG_GIANFAR)
+ {
+ .compatible = "fsl,gianfar-tbi",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_gfar_tbipa_from_mii,
+ },
+ },
+ {
+ .compatible = "fsl,gianfar-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_gfar_tbipa_from_mii,
+ },
+ },
+ {
+ .type = "mdio",
+ .compatible = "gianfar",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_gfar_tbipa_from_mdio,
+ },
+ },
+ {
+ .compatible = "fsl,etsec2-tbi",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_etsec_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,etsec2-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_etsec_tbipa,
+ },
+ },
+#endif
+#if IS_ENABLED(CONFIG_UCC_GETH)
+ {
+ .compatible = "fsl,ucc-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_ucc_tbipa,
+ .ucc_configure = ucc_configure,
+ },
+ },
+ {
+ /* Legacy UCC MDIO node */
+ .type = "mdio",
+ .compatible = "ucc_geth_phy",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_ucc_tbipa,
+ .ucc_configure = ucc_configure,
+ },
+ },
+#endif
+ /* No Kconfig option for Fman support yet */
+ {
+ .compatible = "fsl,fman-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ /* Fman TBI operations are handled elsewhere */
+ },
+ },
+
+ {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev,
+ uint32_t __iomem * (*get_tbipa)(void __iomem *),
+ void __iomem *reg_map, struct resource *reg_res)
+{
+ struct device_node *np = pdev->dev.of_node;
+ uint32_t __iomem *tbipa;
+ bool tbipa_mapped;
+
+ tbipa = of_iomap(np, 1);
+ if (tbipa) {
+ tbipa_mapped = true;
+ } else {
+ tbipa_mapped = false;
+ tbipa = (*get_tbipa)(reg_map);
+
+ /*
+ * Add consistency check to make sure TBI is contained within
+ * the mapped range (not because we would get a segfault,
+ * rather to catch bugs in computing TBI address). Print error
+ * message but continue anyway.
+ */
+ if ((void *)tbipa > reg_map + resource_size(reg_res) - 4)
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
+ ((void *)tbipa - reg_map) + 4);
+ }
+
+ iowrite32be(be32_to_cpu(tbipa_val), tbipa);
+
+ if (tbipa_mapped)
+ iounmap(tbipa);
+}
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id =
+ of_match_device(fsl_pq_mdio_match, &pdev->dev);
+ const struct fsl_pq_mdio_data *data;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource res;
+ struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
+ struct mii_bus *new_bus;
+ int err;
+
+ if (!id) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
+
+ data = id->data;
+
+ dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
+
+ new_bus = mdiobus_alloc_size(sizeof(*priv));
+ if (!new_bus)
+ return -ENOMEM;
+
+ priv = new_bus->priv;
+ new_bus->name = "Freescale PowerQUICC MII Bus";
+ new_bus->read = &fsl_pq_mdio_read;
+ new_bus->write = &fsl_pq_mdio_write;
+ new_bus->reset = &fsl_pq_mdio_reset;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err < 0) {
+ dev_err(&pdev->dev, "could not obtain address information\n");
+ goto error;
+ }
+
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np,
+ (unsigned long long)res.start);
+
+ priv->map = of_iomap(np, 0);
+ if (!priv->map) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Some device tree nodes represent only the MII registers, and
+ * others represent the MAC and MII registers. The 'mii_offset' field
+ * contains the offset of the MII registers inside the mapped register
+ * space.
+ */
+ if (data->mii_offset > resource_size(&res)) {
+ dev_err(&pdev->dev, "invalid register map\n");
+ err = -EINVAL;
+ goto error;
+ }
+ priv->regs = priv->map + data->mii_offset;
+
+ new_bus->parent = &pdev->dev;
+ platform_set_drvdata(pdev, new_bus);
+
+ if (data->get_tbipa) {
+ for_each_child_of_node(np, tbi) {
+ if (of_node_is_type(tbi, "tbi-phy")) {
+ dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
+ tbi);
+ break;
+ }
+ }
+
+ if (tbi) {
+ const u32 *prop = of_get_property(tbi, "reg", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev,
+ "missing 'reg' property in node %pOF\n",
+ tbi);
+ err = -EBUSY;
+ goto error;
+ }
+ set_tbipa(*prop, pdev,
+ data->get_tbipa, priv->map, &res);
+ }
+ }
+
+ if (data->ucc_configure)
+ data->ucc_configure(res.start, res.end);
+
+ err = of_mdiobus_register(new_bus, np);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+ new_bus->name);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (priv->map)
+ iounmap(priv->map);
+
+ kfree(new_bus);
+
+ return err;
+}
+
+
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
+{
+ struct device *device = &pdev->dev;
+ struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ mdiobus_unregister(bus);
+
+ iounmap(priv->map);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static struct platform_driver fsl_pq_mdio_driver = {
+ .driver = {
+ .name = "fsl-pq_mdio",
+ .of_match_table = fsl_pq_mdio_match,
+ },
+ .probe = fsl_pq_mdio_probe,
+ .remove = fsl_pq_mdio_remove,
+};
+
+module_platform_driver(fsl_pq_mdio_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
new file mode 100644
index 000000000..b2def2955
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -0,0 +1,3648 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* drivers/net/ethernet/freescale/gianfar.c
+ *
+ * Gianfar Ethernet Driver
+ * This driver is designed for the non-CPM ethernet controllers
+ * on the 85xx and 83xx family of integrated processors
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
+ *
+ * Gianfar: AKA Lambda Draconis, "Dragon"
+ * RA 11 31 24.2
+ * Dec +69 19 52
+ * V 3.84
+ * B-V +1.62
+ *
+ * Theory of operation
+ *
+ * The driver is initialized through of_device. Configuration information
+ * is therefore conveyed through an OF-style device tree.
+ *
+ * The Gianfar Ethernet Controller uses a ring of buffer
+ * descriptors. The beginning is indicated by a register
+ * pointing to the physical address of the start of the ring.
+ * The end is determined by a "wrap" bit being set in the
+ * last descriptor of the ring.
+ *
+ * When a packet is received, the RXF bit in the
+ * IEVENT register is set, triggering an interrupt when the
+ * corresponding bit in the IMASK register is also set (if
+ * interrupt coalescing is active, then the interrupt may not
+ * happen immediately, but will wait until either a set number
+ * of frames or amount of time have passed). In NAPI, the
+ * interrupt handler will signal there is work to be done, and
+ * exit. This method will start at the last known empty
+ * descriptor, and process every subsequent descriptor until there
+ * are none left with data (NAPI will stop after a set number of
+ * packets to give time to other tasks, but will eventually
+ * process all the packets). The data arrives inside a
+ * pre-allocated skb, and so after the skb is passed up to the
+ * stack, a new skb must be allocated, and the address field in
+ * the buffer descriptor must be updated to indicate this new
+ * skb.
+ *
+ * When the kernel requests that a packet be transmitted, the
+ * driver starts where it left off last time, and points the
+ * descriptor at the buffer which was passed in. The driver
+ * then informs the DMA engine that there are packets ready to
+ * be transmitted. Once the controller is finished transmitting
+ * the packet, an interrupt may be triggered (under the same
+ * conditions as for reception, but depending on the TXF bit).
+ * The driver then cleans up the buffer.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/net_tstamp.h>
+
+#include <asm/io.h>
+#ifdef CONFIG_PPC
+#include <asm/reg.h>
+#include <asm/mpc85xx.h>
+#endif
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+#include "gianfar.h"
+
+#define TX_TIMEOUT (5*HZ)
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Gianfar Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ dma_addr_t buf)
+{
+ u32 lstatus;
+
+ bdp->bufPtr = cpu_to_be32(buf);
+
+ lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+ if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+ lstatus |= BD_LFLAG(RXBD_WRAP);
+
+ gfar_wmb();
+
+ bdp->lstatus = cpu_to_be32(lstatus);
+}
+
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->tbase0;
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+ baddr += 2;
+ }
+
+ baddr = &regs->rbase0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+ baddr += 2;
+ }
+}
+
+static void gfar_init_rqprm(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->rqprm0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
+ (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
+ baddr++;
+ }
+}
+
+static void gfar_rx_offload_en(struct gfar_private *priv)
+{
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
+ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
+ priv->uses_rxfcb = 1;
+
+ if (priv->hwts_rx_en || priv->rx_filer_enable)
+ priv->uses_rxfcb = 1;
+}
+
+static void gfar_mac_rx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+
+ if (priv->rx_filer_enable) {
+ rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+ /* Program the RIR0 reg with the required distribution */
+ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
+ }
+
+ /* Restore PROMISC mode */
+ if (priv->ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+ if (priv->ndev->features & NETIF_F_RXCSUM)
+ rctrl |= RCTRL_CHECKSUMMING;
+
+ if (priv->extended_hash)
+ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
+
+ if (priv->padding) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(priv->padding);
+ }
+
+ /* Enable HW time stamping if requested from user space */
+ if (priv->hwts_rx_en)
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+
+ /* Clear the LFC bit */
+ gfar_write(&regs->rctrl, rctrl);
+ /* Init flow control threshold values */
+ gfar_init_rqprm(priv);
+ gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
+ rctrl |= RCTRL_LFC;
+
+ /* Init rctrl based on our settings */
+ gfar_write(&regs->rctrl, rctrl);
+}
+
+static void gfar_mac_tx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tctrl = 0;
+
+ if (priv->ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+ if (priv->prio_sched_en)
+ tctrl |= TCTRL_TXSCHED_PRIO;
+ else {
+ tctrl |= TCTRL_TXSCHED_WRRS;
+ gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
+ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+ }
+
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tctrl |= TCTRL_VLINS;
+
+ gfar_write(&regs->tctrl, tctrl);
+}
+
+static void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+
+ if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
+
+ baddr = &regs->txic0;
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
+ gfar_write(baddr + i, priv->tx_queue[i]->txic);
+ }
+
+ baddr = &regs->rxic0;
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
+ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+ }
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+ }
+}
+
+static void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+}
+
+static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
+ unsigned long flags;
+ u32 rdrp, car, car_before;
+ u64 rdrp_offset;
+
+ spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ do {
+ car_before = car;
+ rdrp = gfar_read(&rmon->rdrp);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ } while (car != car_before);
+ if (car) {
+ priv->rmon_overflow.rdrp++;
+ gfar_write(&rmon->car1, car);
+ }
+ rdrp_offset = priv->rmon_overflow.rdrp;
+ spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
+
+ stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
+ }
+}
+
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table. The table is controlled through 8 32-bit registers:
+ * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7. This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry.
+ */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 result = ether_crc(ETH_ALEN, addr);
+ int width = priv->hash_width;
+ u8 whichbit = (result >> (32 - width)) & 0x1f;
+ u8 whichreg = result >> (32 - width + 5);
+ u32 value = (1 << (31-whichbit));
+
+ tempval = gfar_read(priv->hash_regs[whichreg]);
+ tempval |= value;
+ gfar_write(priv->hash_regs[whichreg], tempval);
+}
+
+/* There are multiple MAC Address register pairs on some controllers
+ * This function sets the numth pair to a given address
+ */
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ u32 __iomem *macptr = &regs->macstnaddr1;
+
+ macptr += num*2;
+
+ /* For a station address of 0x12345678ABCD in transmission
+ * order (BE), MACnADDR1 is set to 0xCDAB7856 and
+ * MACnADDR2 is set to 0x34120000.
+ */
+ tempval = (addr[5] << 24) | (addr[4] << 16) |
+ (addr[3] << 8) | addr[2];
+
+ gfar_write(macptr, tempval);
+
+ tempval = (addr[1] << 24) | (addr[0] << 16);
+
+ gfar_write(macptr+1, tempval);
+}
+
+static int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
+
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
+static void gfar_ints_disable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask,
+ IMASK_DEFAULT | priv->rmon_overflow.imask);
+ }
+}
+
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i])
+ return -ENOMEM;
+
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+ return 0;
+}
+
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i])
+ return -ENOMEM;
+
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->ndev = priv->ndev;
+ }
+ return 0;
+}
+
+static void gfar_free_tx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ kfree(priv->tx_queue[i]);
+}
+
+static void gfar_free_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < MAXGROUPS; i++)
+ if (priv->gfargrp[i].regs)
+ iounmap(priv->gfargrp[i].regs);
+}
+
+static void free_gfar_dev(struct gfar_private *priv)
+{
+ int i, j;
+
+ for (i = 0; i < priv->num_grps; i++)
+ for (j = 0; j < GFAR_NUM_IRQS; j++) {
+ kfree(priv->gfargrp[i].irqinfo[j]);
+ priv->gfargrp[i].irqinfo[j] = NULL;
+ }
+
+ free_netdev(priv->ndev);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_disable(&priv->gfargrp[i].napi_rx);
+ napi_disable(&priv->gfargrp[i].napi_tx);
+ }
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_enable(&priv->gfargrp[i].napi_rx);
+ napi_enable(&priv->gfargrp[i].napi_tx);
+ }
+}
+
+static int gfar_parse_group(struct device_node *np,
+ struct gfar_private *priv, const char *model)
+{
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+ grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+ GFP_KERNEL);
+ if (!grp->irqinfo[i])
+ return -ENOMEM;
+ }
+
+ grp->regs = of_iomap(np, 0);
+ if (!grp->regs)
+ return -ENOMEM;
+
+ gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
+
+ /* If we aren't the FEC we have multiple interrupts */
+ if (model && strcasecmp(model, "FEC")) {
+ gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
+ gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
+ if (!gfar_irq(grp, TX)->irq ||
+ !gfar_irq(grp, RX)->irq ||
+ !gfar_irq(grp, ER)->irq)
+ return -EINVAL;
+ }
+
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
+ if (priv->mode == MQ_MG_MODE) {
+ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
+ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ } else {
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
+ }
+
+ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+ * right to left, so we need to revert the 8 bits to get the q index
+ */
+ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups
+ */
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ if (!grp->rx_queue)
+ grp->rx_queue = priv->rx_queue[i];
+ grp->num_rx_queues++;
+ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ priv->rx_queue[i]->grp = grp;
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ if (!grp->tx_queue)
+ grp->tx_queue = priv->tx_queue[i];
+ grp->num_tx_queues++;
+ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+ priv->tqueue |= (TQUEUE_EN0 >> i);
+ priv->tx_queue[i]->grp = grp;
+ }
+
+ priv->num_grps++;
+
+ return 0;
+}
+
+static int gfar_of_group_count(struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ if (of_node_name_eq(child, "queue-group"))
+ num++;
+
+ return num;
+}
+
+/* Reads the controller's registers to determine what interface
+ * connects it to the PHY.
+ */
+static phy_interface_t gfar_get_interface(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 ecntrl;
+
+ ecntrl = gfar_read(&regs->ecntrl);
+
+ if (ecntrl & ECNTRL_SGMII_MODE)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (ecntrl & ECNTRL_TBI_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MODE)
+ return PHY_INTERFACE_MODE_RTBI;
+ else
+ return PHY_INTERFACE_MODE_TBI;
+ }
+
+ if (ecntrl & ECNTRL_REDUCED_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
+ return PHY_INTERFACE_MODE_RMII;
+ }
+ else {
+ phy_interface_t interface = priv->interface;
+
+ /* This isn't autodetected right now, so it must
+ * be set by the device tree or platform code.
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+
+ return PHY_INTERFACE_MODE_RGMII;
+ }
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ return PHY_INTERFACE_MODE_GMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
+{
+ const char *model;
+ int err = 0, i;
+ phy_interface_t interface;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct device_node *np = ofdev->dev.of_node;
+ struct device_node *child = NULL;
+ u32 stash_len = 0;
+ u32 stash_idx = 0;
+ unsigned int num_tx_qs, num_rx_qs;
+ unsigned short mode;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_device_is_compatible(np, "fsl,etsec2"))
+ mode = MQ_MG_MODE;
+ else
+ mode = SQ_SG_MODE;
+
+ if (mode == SQ_SG_MODE) {
+ num_tx_qs = 1;
+ num_rx_qs = 1;
+ } else { /* MQ_MG_MODE */
+ /* get the actual number of supported groups */
+ unsigned int num_grps = gfar_of_group_count(np);
+
+ if (num_grps == 0 || num_grps > MAXGROUPS) {
+ dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
+ num_grps);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ num_tx_qs = num_grps; /* one txq per int group */
+ num_rx_qs = num_grps; /* one rxq per int group */
+ }
+
+ if (num_tx_qs > MAX_TX_QS) {
+ pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+ num_tx_qs, MAX_TX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ if (num_rx_qs > MAX_RX_QS) {
+ pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_rx_qs, MAX_RX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+ dev = *pdev;
+ if (NULL == dev)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+
+ priv->mode = mode;
+
+ priv->num_tx_queues = num_tx_qs;
+ netif_set_real_num_rx_queues(dev, num_rx_qs);
+ priv->num_rx_queues = num_rx_qs;
+
+ err = gfar_alloc_tx_queues(priv);
+ if (err)
+ goto tx_alloc_failed;
+
+ err = gfar_alloc_rx_queues(priv);
+ if (err)
+ goto rx_alloc_failed;
+
+ err = of_property_read_string(np, "model", &model);
+ if (err) {
+ pr_err("Device model property missing, aborting\n");
+ goto rx_alloc_failed;
+ }
+
+ /* Init Rx queue filer rule set linked list */
+ INIT_LIST_HEAD(&priv->rx_list.list);
+ priv->rx_list.count = 0;
+ mutex_init(&priv->rx_queue_access);
+
+ for (i = 0; i < MAXGROUPS; i++)
+ priv->gfargrp[i].regs = NULL;
+
+ /* Parse and initialize group specific information */
+ if (priv->mode == MQ_MG_MODE) {
+ for_each_available_child_of_node(np, child) {
+ if (!of_node_name_eq(child, "queue-group"))
+ continue;
+
+ err = gfar_parse_group(child, priv, model);
+ if (err) {
+ of_node_put(child);
+ goto err_grp_init;
+ }
+ }
+ } else { /* SQ_SG_MODE */
+ err = gfar_parse_group(np, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+
+ if (of_property_read_bool(np, "bd-stash")) {
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
+ priv->bd_stash_en = 1;
+ }
+
+ err = of_property_read_u32(np, "rx-stash-len", &stash_len);
+
+ if (err == 0)
+ priv->rx_stash_size = stash_len;
+
+ err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
+
+ if (err == 0)
+ priv->rx_stash_index = stash_idx;
+
+ if (stash_len || stash_idx)
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
+
+ err = of_get_ethdev_address(np, dev);
+ if (err) {
+ eth_hw_addr_random(dev);
+ dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
+ }
+
+ if (model && !strcasecmp(model, "TSEC"))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
+ if (model && !strcasecmp(model, "eTSEC"))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER |
+ FSL_GIANFAR_DEV_HAS_RX_FILER;
+
+ /* Use PHY connection type from the DT node if one is specified there.
+ * rgmii-id really needs to be specified. Other types can be
+ * detected by hardware
+ */
+ err = of_get_phy_mode(np, &interface);
+ if (!err)
+ priv->interface = interface;
+ else
+ priv->interface = gfar_get_interface(dev);
+
+ if (of_find_property(np, "fsl,magic-packet", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+
+ if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_node && of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ goto err_grp_init;
+
+ priv->phy_node = of_node_get(np);
+ }
+
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ return 0;
+
+err_grp_init:
+ unmap_group_regs(priv);
+rx_alloc_failed:
+ gfar_free_rx_queues(priv);
+tx_alloc_failed:
+ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+ return err;
+}
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
+{
+ u32 rqfpr = FPR_FILER_MASK;
+ u32 rqfcr = 0x0;
+
+ rqfar--;
+ rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_NOMATCH;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+ rqfpr = class;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+ rqfpr = class;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+ int i = 0x0;
+ u32 rqfar = MAX_FILER_IDX;
+ u32 rqfcr = 0x0;
+ u32 rqfpr = FPR_FILER_MASK;
+
+ /* Default rule */
+ rqfcr = RQFCR_CMP_MATCH;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+ /* cur_filer_idx indicated the first non-masked rule */
+ priv->cur_filer_idx = rqfar;
+
+ /* Rest are masked rules */
+ rqfcr = RQFCR_CMP_NOMATCH;
+ for (i = 0; i < rqfar; i++) {
+ priv->ftp_rqfcr[i] = rqfcr;
+ priv->ftp_rqfpr[i] = rqfpr;
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+}
+
+#ifdef CONFIG_PPC
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
+{
+ unsigned int pvr = mfspr(SPRN_PVR);
+ unsigned int svr = mfspr(SPRN_SVR);
+ unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
+ unsigned int rev = svr & 0xffff;
+
+ /* MPC8313 Rev 2.0 and higher; All MPC837x */
+ if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_74;
+
+ /* MPC8313 and MPC837x all rev */
+ if ((pvr == 0x80850010 && mod == 0x80b0) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_76;
+
+ /* MPC8313 Rev < 2.0 */
+ if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+ priv->errata |= GFAR_ERRATA_12;
+}
+
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+ unsigned int svr = mfspr(SPRN_SVR);
+
+ if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
+ priv->errata |= GFAR_ERRATA_12;
+ /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
+ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
+ priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+#endif
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+ struct device *dev = &priv->ofdev->dev;
+
+ /* no plans to fix */
+ priv->errata |= GFAR_ERRATA_A002;
+
+#ifdef CONFIG_PPC
+ if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+ __gfar_detect_errata_85xx(priv);
+ else /* non-mpc85xx parts, i.e. e300 core based */
+ __gfar_detect_errata_83xx(priv);
+#endif
+
+ if (priv->errata)
+ dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+ priv->errata);
+}
+
+static void gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+ priv->extended_hash = 1;
+ priv->hash_width = 9;
+
+ priv->hash_regs[0] = &regs->igaddr0;
+ priv->hash_regs[1] = &regs->igaddr1;
+ priv->hash_regs[2] = &regs->igaddr2;
+ priv->hash_regs[3] = &regs->igaddr3;
+ priv->hash_regs[4] = &regs->igaddr4;
+ priv->hash_regs[5] = &regs->igaddr5;
+ priv->hash_regs[6] = &regs->igaddr6;
+ priv->hash_regs[7] = &regs->igaddr7;
+ priv->hash_regs[8] = &regs->gaddr0;
+ priv->hash_regs[9] = &regs->gaddr1;
+ priv->hash_regs[10] = &regs->gaddr2;
+ priv->hash_regs[11] = &regs->gaddr3;
+ priv->hash_regs[12] = &regs->gaddr4;
+ priv->hash_regs[13] = &regs->gaddr5;
+ priv->hash_regs[14] = &regs->gaddr6;
+ priv->hash_regs[15] = &regs->gaddr7;
+
+ } else {
+ priv->extended_hash = 0;
+ priv->hash_width = 8;
+
+ priv->hash_regs[0] = &regs->gaddr0;
+ priv->hash_regs[1] = &regs->gaddr1;
+ priv->hash_regs[2] = &regs->gaddr2;
+ priv->hash_regs[3] = &regs->gaddr3;
+ priv->hash_regs[4] = &regs->gaddr4;
+ priv->hash_regs[5] = &regs->gaddr5;
+ priv->hash_regs[6] = &regs->gaddr6;
+ priv->hash_regs[7] = &regs->gaddr7;
+ }
+}
+
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+ u32 res;
+
+ /* Normaly TSEC should not hang on GRS commands, so we should
+ * actually wait for IEVENT_GRSC flag.
+ */
+ if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
+ return 0;
+
+ /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+ * and the Rx can be safely reset.
+ */
+ res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+ res &= 0x7f807f80;
+ if ((res & 0xffff) == (res >> 16))
+ return 1;
+
+ return 0;
+}
+
+/* Halt the receive and transmit queues */
+static void gfar_halt_nodisable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ unsigned int timeout;
+ int stopped;
+
+ gfar_ints_disable(priv);
+
+ if (gfar_is_dma_stopped(priv))
+ return;
+
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
+
+retry:
+ timeout = 1000;
+ while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout)
+ stopped = gfar_is_dma_stopped(priv);
+
+ if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
+ !__gfar_is_rx_idle(priv))
+ goto retry;
+}
+
+/* Halt the receive and transmit queues */
+static void gfar_halt(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ /* Dissable the Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, 0);
+ gfar_write(&regs->tqueue, 0);
+
+ mdelay(10);
+
+ gfar_halt_nodisable(priv);
+
+ /* Disable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+}
+
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
+{
+ struct txbd8 *txbdp;
+ struct gfar_private *priv = netdev_priv(tx_queue->dev);
+ int i, j;
+
+ txbdp = tx_queue->tx_bd_base;
+
+ for (i = 0; i < tx_queue->tx_ring_size; i++) {
+ if (!tx_queue->tx_skbuff[i])
+ continue;
+
+ dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
+ be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
+ txbdp->lstatus = 0;
+ for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+ j++) {
+ txbdp++;
+ dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
+ be16_to_cpu(txbdp->length),
+ DMA_TO_DEVICE);
+ }
+ txbdp++;
+ dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+ tx_queue->tx_skbuff[i] = NULL;
+ }
+ kfree(tx_queue->tx_skbuff);
+ tx_queue->tx_skbuff = NULL;
+}
+
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+ int i;
+
+ struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+ dev_kfree_skb(rx_queue->skb);
+
+ for (i = 0; i < rx_queue->rx_ring_size; i++) {
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
+ rxbdp->lstatus = 0;
+ rxbdp->bufPtr = 0;
+ rxbdp++;
+
+ if (!rxb->page)
+ continue;
+
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+
+ rxb->page = NULL;
+ }
+
+ kfree(rx_queue->rx_buff);
+ rx_queue->rx_buff = NULL;
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff
+ */
+static void free_skb_resources(struct gfar_private *priv)
+{
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+
+ /* Go through all the buffer descriptors and free their data buffers */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ struct netdev_queue *txq;
+
+ tx_queue = priv->tx_queue[i];
+ txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
+ if (tx_queue->tx_skbuff)
+ free_skb_tx_queue(tx_queue);
+ netdev_tx_reset_queue(txq);
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ if (rx_queue->rx_buff)
+ free_skb_rx_queue(rx_queue);
+ }
+
+ dma_free_coherent(priv->dev,
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ netif_tx_stop_all_queues(dev);
+
+ smp_mb__before_atomic();
+ set_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_atomic();
+
+ disable_napi(priv);
+
+ /* disable ints and gracefully shut down Rx/Tx DMA */
+ gfar_halt(priv);
+
+ phy_stop(dev->phydev);
+
+ free_skb_resources(priv);
+}
+
+static void gfar_start(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+ /* Enable Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+ gfar_write(&regs->tqueue, priv->tqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear THLT/RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ }
+
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ gfar_ints_enable(priv);
+
+ netif_trans_update(priv->ndev); /* prevent tx timeout */
+}
+
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
+{
+ struct page *page;
+ dma_addr_t addr;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+ __free_page(page);
+
+ return false;
+ }
+
+ rxb->dma = addr;
+ rxb->page = page;
+ rxb->page_offset = 0;
+
+ return true;
+}
+
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
+{
+ struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+ atomic64_inc(&estats->rx_alloc_err);
+}
+
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt)
+{
+ struct rxbd8 *bdp;
+ struct gfar_rx_buff *rxb;
+ int i;
+
+ i = rx_queue->next_to_use;
+ bdp = &rx_queue->rx_bd_base[i];
+ rxb = &rx_queue->rx_buff[i];
+
+ while (alloc_cnt--) {
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+ gfar_rx_alloc_err(rx_queue);
+ break;
+ }
+ }
+
+ /* Setup the new RxBD */
+ gfar_init_rxbdp(rx_queue, bdp,
+ rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+ /* Update to the next pointer */
+ bdp++;
+ rxb++;
+
+ if (unlikely(++i == rx_queue->rx_ring_size)) {
+ i = 0;
+ bdp = rx_queue->rx_bd_base;
+ rxb = rx_queue->rx_buff;
+ }
+ }
+
+ rx_queue->next_to_use = i;
+ rx_queue->next_to_alloc = i;
+}
+
+static void gfar_init_bds(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct txbd8 *txbdp;
+ u32 __iomem *rfbptr;
+ int i, j;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ /* Initialize some variables in our dev structure */
+ tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+ tx_queue->dirty_tx = tx_queue->tx_bd_base;
+ tx_queue->cur_tx = tx_queue->tx_bd_base;
+ tx_queue->skb_curtx = 0;
+ tx_queue->skb_dirtytx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = tx_queue->tx_bd_base;
+ for (j = 0; j < tx_queue->tx_ring_size; j++) {
+ txbdp->lstatus = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
+ TXBD_WRAP);
+ }
+
+ rfbptr = &regs->rfbptr0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+
+ rx_queue->next_to_clean = 0;
+ rx_queue->next_to_use = 0;
+ rx_queue->next_to_alloc = 0;
+
+ /* make sure next_to_clean != next_to_use after this
+ * by leaving at least 1 unused descriptor
+ */
+ gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
+
+ rx_queue->rfbptr = rfbptr;
+ rfbptr += 2;
+ }
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+ void *vaddr;
+ dma_addr_t addr;
+ int i, j;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct device *dev = priv->dev;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ priv->total_tx_ring_size = 0;
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+ priv->total_rx_ring_size = 0;
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+ /* Allocate memory for the buffer descriptors */
+ vaddr = dma_alloc_coherent(dev,
+ (priv->total_tx_ring_size *
+ sizeof(struct txbd8)) +
+ (priv->total_rx_ring_size *
+ sizeof(struct rxbd8)),
+ &addr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_bd_base = vaddr;
+ tx_queue->tx_bd_dma_base = addr;
+ tx_queue->dev = ndev;
+ /* enet DMA only understands physical addresses */
+ addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ }
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_bd_base = vaddr;
+ rx_queue->rx_bd_dma_base = addr;
+ rx_queue->ndev = ndev;
+ rx_queue->dev = dev;
+ addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ }
+
+ /* Setup the skbuff rings */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_skbuff =
+ kmalloc_array(tx_queue->tx_ring_size,
+ sizeof(*tx_queue->tx_skbuff),
+ GFP_KERNEL);
+ if (!tx_queue->tx_skbuff)
+ goto cleanup;
+
+ for (j = 0; j < tx_queue->tx_ring_size; j++)
+ tx_queue->tx_skbuff[j] = NULL;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_buff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_buff)
+ goto cleanup;
+ }
+
+ gfar_init_bds(ndev);
+
+ return 0;
+
+cleanup:
+ free_skb_resources(priv);
+ return -ENOMEM;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ int err;
+
+ gfar_mac_reset(priv);
+
+ err = gfar_alloc_skb_resources(ndev);
+ if (err)
+ return err;
+
+ gfar_init_tx_rx_base(priv);
+
+ smp_mb__before_atomic();
+ clear_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_atomic();
+
+ /* Start Rx/Tx DMA and enable the interrupts */
+ gfar_start(priv);
+
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phy_start(ndev->phydev);
+
+ enable_napi(priv);
+
+ netif_tx_wake_all_queues(ndev);
+
+ return 0;
+}
+
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
+ u32 val = 0;
+
+ if (!phydev->duplex)
+ return val;
+
+ if (!priv->pause_aneg_en) {
+ if (priv->tx_pause_en)
+ val |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ val |= MACCFG1_RX_FLOW;
+ } else {
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_TX)
+ val |= MACCFG1_TX_FLOW;
+ if (flowctrl & FLOW_CTRL_RX)
+ val |= MACCFG1_RX_FLOW;
+ }
+
+ return val;
+}
+
+static noinline void gfar_update_link_state(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+
+ if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
+ return;
+
+ if (phydev->link) {
+ u32 tempval1 = gfar_read(&regs->maccfg1);
+ u32 tempval = gfar_read(&regs->maccfg2);
+ u32 ecntrl = gfar_read(&regs->ecntrl);
+ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
+
+ if (phydev->duplex != priv->oldduplex) {
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ else
+ tempval |= MACCFG2_FULL_DUPLEX;
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->oldspeed) {
+ switch (phydev->speed) {
+ case 1000:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ case 100:
+ case 10:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+ /* Reduced mode distinguishes
+ * between 10 and 100
+ */
+ if (phydev->speed == SPEED_100)
+ ecntrl |= ECNTRL_R100;
+ else
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ default:
+ netif_warn(priv, link, priv->ndev,
+ "Ack! Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
+ break;
+ }
+
+ priv->oldspeed = phydev->speed;
+ }
+
+ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+ /* Turn last free buffer recording on */
+ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ u32 bdp_dma;
+
+ rx_queue = priv->rx_queue[i];
+ bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+ gfar_write(rx_queue->rfbptr, bdp_dma);
+ }
+
+ priv->tx_actual_en = 1;
+ }
+
+ if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
+ priv->tx_actual_en = 0;
+
+ gfar_write(&regs->maccfg1, tempval1);
+ gfar_write(&regs->maccfg2, tempval);
+ gfar_write(&regs->ecntrl, ecntrl);
+
+ if (!priv->oldlink)
+ priv->oldlink = 1;
+
+ } else if (priv->oldlink) {
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+
+ if (unlikely(phydev->link != priv->oldlink ||
+ (phydev->link && (phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))))
+ gfar_update_link_state(priv);
+}
+
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the TBIPA register. We assume
+ * that the TBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void gfar_configure_serdes(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *tbiphy;
+
+ if (!priv->tbi_node) {
+ dev_warn(&dev->dev, "error: SGMII mode requires that the "
+ "device tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(priv->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /* If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
+ put_device(&tbiphy->mdio.dev);
+ return;
+ }
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
+
+ phy_write(tbiphy, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
+
+ put_device(&tbiphy->mdio.dev);
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *dev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct gfar_private *priv = netdev_priv(dev);
+ phy_interface_t interface = priv->interface;
+ struct phy_device *phydev;
+ struct ethtool_eee edata;
+
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+ if (!phydev) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ gfar_configure_serdes(dev);
+
+ /* Remove any features not supported by the controller */
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
+
+ /* Add support for flow control */
+ phy_support_asym_pause(phydev);
+
+ /* disable EEE autoneg, EEE not supported by eTSEC */
+ memset(&edata, 0, sizeof(struct ethtool_eee));
+ phy_ethtool_set_eee(phydev, &edata);
+
+ return 0;
+}
+
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
+{
+ struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
+
+ memset(fcb, 0, GMAC_FCB_LEN);
+
+ return fcb;
+}
+
+static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
+ int fcb_length)
+{
+ /* If we're here, it's a IP packet with a TCP or UDP
+ * payload. We set it to checksum, using a pseudo-header
+ * we provide
+ */
+ u8 flags = TXFCB_DEFAULT;
+
+ /* Tell the controller what the protocol is
+ * And provide the already calculated phcs
+ */
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ flags |= TXFCB_UDP;
+ fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
+ } else
+ fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
+
+ /* l3os is the distance between the start of the
+ * frame (skb->data) and the start of the IP hdr.
+ * l4os is the distance between the start of the
+ * l3 hdr and the l4 hdr
+ */
+ fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
+ fcb->l4os = skb_network_header_len(skb);
+
+ fcb->flags = flags;
+}
+
+static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+{
+ fcb->flags |= TXFCB_VLN;
+ fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
+}
+
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+ struct txbd8 *base, int ring_size)
+{
+ struct txbd8 *new_bd = bdp + stride;
+
+ return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+ int ring_size)
+{
+ return skip_txbd(bdp, 1, base, ring_size);
+}
+
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+ unsigned long fcb_addr)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+ unsigned int len)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ (len > 2500));
+}
+
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
+static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct netdev_queue *txq;
+ struct gfar __iomem *regs = NULL;
+ struct txfcb *fcb = NULL;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
+ u32 lstatus;
+ skb_frag_t *frag;
+ int i, rq = 0;
+ int do_tstamp, do_csum, do_vlan;
+ u32 bufaddr;
+ unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
+
+ rq = skb->queue_mapping;
+ tx_queue = priv->tx_queue[rq];
+ txq = netdev_get_tx_queue(dev, rq);
+ base = tx_queue->tx_bd_base;
+ regs = tx_queue->grp->regs;
+
+ do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+ do_vlan = skb_vlan_tag_present(skb);
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
+
+ if (do_csum || do_vlan)
+ fcb_len = GMAC_FCB_LEN;
+
+ /* check if time stamp should be generated */
+ if (unlikely(do_tstamp))
+ fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+
+ /* make space for additional header when fcb is needed */
+ if (fcb_len) {
+ if (unlikely(skb_cow_head(skb, fcb_len))) {
+ dev->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* total number of fragments in the SKB */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
+ /* check if there is space to queue this packet */
+ if (nr_txbds > tx_queue->num_txbdfree) {
+ /* no space, stop the queue */
+ netif_tx_stop_queue(txq);
+ dev->stats.tx_fifo_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Update transmit stats */
+ bytes_sent = skb->len;
+ tx_queue->stats.tx_bytes += bytes_sent;
+ /* keep Tx bytes on wire for BQL accounting */
+ GFAR_CB(skb)->bytes_sent = bytes_sent;
+ tx_queue->stats.tx_packets++;
+
+ txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = be32_to_cpu(txbdp->lstatus);
+
+ /* Add TxPAL between FCB and frame if required */
+ if (unlikely(do_tstamp)) {
+ skb_push(skb, GMAC_TXPAL_LEN);
+ memset(skb->data, 0, GMAC_TXPAL_LEN);
+ }
+
+ /* Add TxFCB if required */
+ if (fcb_len) {
+ fcb = gfar_add_fcb(skb);
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ /* Set up checksumming */
+ if (do_csum) {
+ gfar_tx_checksum(skb, fcb, fcb_len);
+
+ if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+ unlikely(gfar_csum_errata_76(priv, skb->len))) {
+ __skb_pull(skb, GMAC_FCB_LEN);
+ skb_checksum_help(skb);
+ if (do_vlan || do_tstamp) {
+ /* put back a new fcb for vlan/tstamp TOE */
+ fcb = gfar_add_fcb(skb);
+ } else {
+ /* Tx TOE not used */
+ lstatus &= ~(BD_LFLAG(TXBD_TOE));
+ fcb = NULL;
+ }
+ }
+ }
+
+ if (do_vlan)
+ gfar_tx_vlan(skb, fcb);
+
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ txbdp_start->bufPtr = cpu_to_be32(bufaddr);
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
+
+ if (likely(!nr_frags)) {
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ } else {
+ u32 lstatus_start = lstatus;
+
+ /* Place the fragment addresses and lengths into the TxBDs */
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 0; i < nr_frags; i++, frag++) {
+ unsigned int size;
+
+ /* Point at the next BD, wrapping as needed */
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ size = skb_frag_size(frag);
+
+ lstatus = be32_to_cpu(txbdp->lstatus) | size |
+ BD_LFLAG(TXBD_READY);
+
+ /* Handle the last BD specially */
+ if (i == nr_frags - 1)
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ /* set the TxBD length and buffer pointer */
+ txbdp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp->lstatus = cpu_to_be32(lstatus);
+ }
+
+ lstatus = lstatus_start;
+ }
+
+ /* If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+ bufaddr = be32_to_cpu(txbdp_start->bufPtr);
+ bufaddr += fcb_len;
+
+ lstatus_ts |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - fcb_len);
+ if (!nr_frags)
+ lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+
+ /* Setup tx hardware time stamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ fcb->ptp = 1;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
+
+ skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(txq, bytes_sent);
+
+ gfar_wmb();
+
+ txbdp_start->lstatus = cpu_to_be32(lstatus);
+
+ gfar_wmb(); /* force lstatus write before tx_skbuff */
+
+ tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
+ /* Update the current skb pointer to the next entry we will use
+ * (wrapping if necessary)
+ */
+ tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+
+ tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ /* We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow.
+ */
+ spin_lock_bh(&tx_queue->txlock);
+ /* reduce TxBD free count */
+ tx_queue->num_txbdfree -= (nr_txbds);
+ spin_unlock_bh(&tx_queue->txlock);
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ * are full. We need to tell the kernel to stop sending us stuff.
+ */
+ if (!tx_queue->num_txbdfree) {
+ netif_tx_stop_queue(txq);
+
+ dev->stats.tx_fifo_errors++;
+ }
+
+ /* Tell the DMA to go go go */
+ gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+ if (do_tstamp)
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ for (i = 0; i < nr_frags; i++) {
+ lstatus = be32_to_cpu(txbdp->lstatus);
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
+ break;
+
+ lstatus &= ~BD_LFLAG(TXBD_READY);
+ txbdp->lstatus = cpu_to_be32(lstatus);
+ bufaddr = be32_to_cpu(txbdp->bufPtr);
+ dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
+ DMA_TO_DEVICE);
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ }
+ gfar_wmb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Changes the mac address if the controller is not running. */
+static int gfar_set_mac_address(struct net_device *dev)
+{
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+
+ dev->mtu = new_mtu;
+
+ if (dev->flags & IFF_UP)
+ startup_gfar(dev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return 0;
+}
+
+static void reset_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ stop_gfar(ndev);
+ startup_gfar(ndev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+}
+
+/* gfar_reset_task gets scheduled when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem.
+ */
+static void gfar_reset_task(struct work_struct *work)
+{
+ struct gfar_private *priv = container_of(work, struct gfar_private,
+ reset_task);
+ reset_gfar(priv->ndev);
+}
+
+static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+ schedule_work(&priv->reset_task);
+}
+
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (priv->hwts_rx_en) {
+ priv->hwts_rx_en = 0;
+ reset_gfar(netdev);
+ }
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ if (!priv->hwts_rx_en) {
+ priv->hwts_rx_en = 1;
+ reset_gfar(netdev);
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ config.flags = 0;
+ config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (priv->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return gfar_hwtstamp_get(dev, rq);
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+/* Interrupt Handler for Transmit complete */
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+{
+ struct net_device *dev = tx_queue->dev;
+ struct netdev_queue *txq;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp, *next = NULL;
+ struct txbd8 *lbdp = NULL;
+ struct txbd8 *base = tx_queue->tx_bd_base;
+ struct sk_buff *skb;
+ int skb_dirtytx;
+ int tx_ring_size = tx_queue->tx_ring_size;
+ int frags = 0, nr_txbds = 0;
+ int i;
+ int howmany = 0;
+ int tqi = tx_queue->qindex;
+ unsigned int bytes_sent = 0;
+ u32 lstatus;
+ size_t buflen;
+
+ txq = netdev_get_tx_queue(dev, tqi);
+ bdp = tx_queue->dirty_tx;
+ skb_dirtytx = tx_queue->skb_dirtytx;
+
+ while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ bool do_tstamp;
+
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
+
+ frags = skb_shinfo(skb)->nr_frags;
+
+ /* When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ if (unlikely(do_tstamp))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
+
+ lstatus = be32_to_cpu(lbdp->lstatus);
+
+ /* Only clean completed frames */
+ if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+ (lstatus & BD_LENGTH_MASK))
+ break;
+
+ if (unlikely(do_tstamp)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = be16_to_cpu(next->length) +
+ GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ } else
+ buflen = be16_to_cpu(bdp->length);
+
+ dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(do_tstamp)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+ ~0x7UL);
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
+ skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ gfar_clear_txbd_status(bdp);
+ bdp = next;
+ }
+
+ gfar_clear_txbd_status(bdp);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+
+ for (i = 0; i < frags; i++) {
+ dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
+ be16_to_cpu(bdp->length),
+ DMA_TO_DEVICE);
+ gfar_clear_txbd_status(bdp);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+ }
+
+ bytes_sent += GFAR_CB(skb)->bytes_sent;
+
+ dev_kfree_skb_any(skb);
+
+ tx_queue->tx_skbuff[skb_dirtytx] = NULL;
+
+ skb_dirtytx = (skb_dirtytx + 1) &
+ TX_RING_MOD_MASK(tx_ring_size);
+
+ howmany++;
+ spin_lock(&tx_queue->txlock);
+ tx_queue->num_txbdfree += nr_txbds;
+ spin_unlock(&tx_queue->txlock);
+ }
+
+ /* If we freed a buffer, we can restart transmission, if necessary */
+ if (tx_queue->num_txbdfree &&
+ netif_tx_queue_stopped(txq) &&
+ !(test_bit(GFAR_DOWN, &priv->state)))
+ netif_wake_subqueue(priv->ndev, tqi);
+
+ /* Update dirty indicators */
+ tx_queue->skb_dirtytx = skb_dirtytx;
+ tx_queue->dirty_tx = bdp;
+
+ netdev_tx_completed_queue(txq, howmany, bytes_sent);
+}
+
+static void count_errors(u32 lstatus, struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ /* If the packet was truncated, none of the other errors matter */
+ if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
+ stats->rx_length_errors++;
+
+ atomic64_inc(&estats->rx_trunc);
+
+ return;
+ }
+ /* Count the errors, if there were any */
+ if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
+ stats->rx_length_errors++;
+
+ if (lstatus & BD_LFLAG(RXBD_LARGE))
+ atomic64_inc(&estats->rx_large);
+ else
+ atomic64_inc(&estats->rx_short);
+ }
+ if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
+ stats->rx_frame_errors++;
+ atomic64_inc(&estats->rx_nonoctet);
+ }
+ if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
+ atomic64_inc(&estats->rx_crcerr);
+ stats->rx_crc_errors++;
+ }
+ if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
+ atomic64_inc(&estats->rx_overrun);
+ stats->rx_over_errors++;
+ }
+}
+
+static irqreturn_t gfar_receive(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask, ievent;
+
+ ievent = gfar_read(&grp->regs->ievent);
+
+ if (unlikely(ievent & IEVENT_FGPI)) {
+ gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+ return IRQ_HANDLED;
+ }
+
+ if (likely(napi_schedule_prep(&grp->napi_rx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_rx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_tx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_tx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+ struct sk_buff *skb, bool first)
+{
+ int size = lstatus & BD_LENGTH_MASK;
+ struct page *page = rxb->page;
+
+ if (likely(first)) {
+ skb_put(skb, size);
+ } else {
+ /* the last fragments' length contains the full frame length */
+ if (lstatus & BD_LFLAG(RXBD_LAST))
+ size -= skb->len;
+
+ WARN(size < 0, "gianfar: rx fragment size underflow");
+ if (size < 0)
+ return false;
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+ }
+
+ /* try reuse page */
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
+ return false;
+
+ /* change offset to the other half */
+ rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+ page_ref_inc(page);
+
+ return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+ struct gfar_rx_buff *old_rxb)
+{
+ struct gfar_rx_buff *new_rxb;
+ u16 nta = rxq->next_to_alloc;
+
+ new_rxb = &rxq->rx_buff[nta];
+
+ /* find next buf that can reuse a page */
+ nta++;
+ rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+ /* copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+ old_rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+ u32 lstatus, struct sk_buff *skb)
+{
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+ struct page *page = rxb->page;
+ bool first = false;
+
+ if (likely(!skb)) {
+ void *buff_addr = page_address(page) + rxb->page_offset;
+
+ skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ gfar_rx_alloc_err(rx_queue);
+ return NULL;
+ }
+ skb_reserve(skb, RXBUF_ALIGNMENT);
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+ if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+ /* reuse the free half of the page */
+ gfar_reuse_rx_page(rx_queue, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear rxb content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
+static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
+{
+ /* If valid headers were found, and valid sums
+ * were verified, then we tell the kernel that no
+ * checksumming is necessary. Otherwise, it is [FIXME]
+ */
+ if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
+ (RXFCB_CIP | RXFCB_CTU))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+}
+
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct rxfcb *fcb = NULL;
+
+ /* fcb is at the beginning if exists */
+ fcb = (struct rxfcb *)skb->data;
+
+ /* Remove the FCB from the skb
+ * Remove the padded bytes, if there are any
+ */
+ if (priv->uses_rxfcb)
+ skb_pull(skb, GMAC_FCB_LEN);
+
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
+ /* Trim off the FCS */
+ pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ if (ndev->features & NETIF_F_RXCSUM)
+ gfar_rx_checksum(skb, fcb);
+
+ /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
+ * Even if vlan rx accel is disabled, on some chips
+ * RXFCB_VLN is pseudo randomly set.
+ */
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ be16_to_cpu(fcb->flags) & RXFCB_VLN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(fcb->vlctl));
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
+ */
+static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+ int rx_work_limit)
+{
+ struct net_device *ndev = rx_queue->ndev;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct rxbd8 *bdp;
+ int i, howmany = 0;
+ struct sk_buff *skb = rx_queue->skb;
+ int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+ unsigned int total_bytes = 0, total_pkts = 0;
+
+ /* Get the first full descriptor */
+ i = rx_queue->next_to_clean;
+
+ while (rx_work_limit--) {
+ u32 lstatus;
+
+ if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+ cleaned_cnt = 0;
+ }
+
+ bdp = &rx_queue->rx_bd_base[i];
+ lstatus = be32_to_cpu(bdp->lstatus);
+ if (lstatus & BD_LFLAG(RXBD_EMPTY))
+ break;
+
+ /* lost RXBD_LAST descriptor due to overrun */
+ if (skb &&
+ (lstatus & BD_LFLAG(RXBD_FIRST))) {
+ /* discard faulty buffer */
+ dev_kfree_skb(skb);
+ skb = NULL;
+ rx_queue->stats.rx_dropped++;
+
+ /* can continue normally */
+ }
+
+ /* order rx buffer descriptor reads */
+ rmb();
+
+ /* fetch next to clean buffer from the ring */
+ skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+ if (unlikely(!skb))
+ break;
+
+ cleaned_cnt++;
+ howmany++;
+
+ if (unlikely(++i == rx_queue->rx_ring_size))
+ i = 0;
+
+ rx_queue->next_to_clean = i;
+
+ /* fetch next buffer if not the last in frame */
+ if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+ continue;
+
+ if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+ count_errors(lstatus, ndev);
+
+ /* discard faulty buffer */
+ dev_kfree_skb(skb);
+ skb = NULL;
+ rx_queue->stats.rx_dropped++;
+ continue;
+ }
+
+ gfar_process_frame(ndev, skb);
+
+ /* Increment the number of packets */
+ total_pkts++;
+ total_bytes += skb->len;
+
+ skb_record_rx_queue(skb, rx_queue->qindex);
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* Send the packet up the stack */
+ napi_gro_receive(&rx_queue->grp->napi_rx, skb);
+
+ skb = NULL;
+ }
+
+ /* Store incomplete frames for completion */
+ rx_queue->skb = skb;
+
+ rx_queue->stats.rx_packets += total_pkts;
+ rx_queue->stats.rx_bytes += total_bytes;
+
+ if (cleaned_cnt)
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(priv->tx_actual_en)) {
+ u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+ gfar_write(rx_queue->rfbptr, bdp_dma);
+ }
+
+ return howmany;
+}
+
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
+ int work_done = 0;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ work_done = gfar_clean_rx_ring(rx_queue, budget);
+
+ if (work_done < budget) {
+ u32 imask;
+ napi_complete_done(napi, work_done);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+}
+
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
+ u32 imask;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+ gfar_clean_tx_ring(tx_queue);
+
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+
+ return 0;
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *gfargrp = grp_id;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_private *priv= gfargrp->priv;
+ struct net_device *dev = priv->ndev;
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
+
+ /* Magic Packet is not an error. */
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ (events & IEVENT_MAG))
+ events &= ~IEVENT_MAG;
+
+ /* Hmm... */
+ if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
+ netdev_dbg(dev,
+ "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ events, gfar_read(&regs->imask));
+
+ /* Update the error counters */
+ if (events & IEVENT_TXE) {
+ dev->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ dev->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ dev->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+ netif_dbg(priv, tx_err, dev,
+ "TX FIFO underrun, packet dropped\n");
+ dev->stats.tx_dropped++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
+
+ schedule_work(&priv->reset_task);
+ }
+ netif_dbg(priv, tx_err, dev, "Transmit Error\n");
+ }
+ if (events & IEVENT_MSRO) {
+ struct rmon_mib __iomem *rmon = &regs->rmon;
+ u32 car;
+
+ spin_lock(&priv->rmon_overflow.lock);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ if (car) {
+ priv->rmon_overflow.rdrp++;
+ gfar_write(&rmon->car1, car);
+ }
+ spin_unlock(&priv->rmon_overflow.lock);
+ }
+ if (events & IEVENT_BSY) {
+ dev->stats.rx_over_errors++;
+ atomic64_inc(&priv->extra_stats.rx_bsy);
+
+ netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
+ gfar_read(&regs->rstat));
+ }
+ if (events & IEVENT_BABR) {
+ dev->stats.rx_errors++;
+ atomic64_inc(&priv->extra_stats.rx_babr);
+
+ netif_dbg(priv, rx_err, dev, "babbling RX error\n");
+ }
+ if (events & IEVENT_EBERR) {
+ atomic64_inc(&priv->extra_stats.eberr);
+ netif_dbg(priv, rx_err, dev, "bus error\n");
+ }
+ if (events & IEVENT_RXC)
+ netif_dbg(priv, rx_status, dev, "control frame\n");
+
+ if (events & IEVENT_BABT) {
+ atomic64_inc(&priv->extra_stats.tx_babt);
+ netif_dbg(priv, tx_err, dev, "babbling TX error\n");
+ }
+ return IRQ_HANDLED;
+}
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *gfargrp = grp_id;
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&gfargrp->regs->ievent);
+
+ /* Check for reception */
+ if (events & IEVENT_RX_MASK)
+ gfar_receive(irq, grp_id);
+
+ /* Check for transmit completion */
+ if (events & IEVENT_TX_MASK)
+ gfar_transmit(irq, grp_id);
+
+ /* Check for errors */
+ if (events & IEVENT_ERR_MASK)
+ gfar_error(irq, grp_id);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void gfar_netpoll(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i;
+
+ /* If the device has multiple interrupts, run tx/rx */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ disable_irq(gfar_irq(grp, RX)->irq);
+ disable_irq(gfar_irq(grp, ER)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, ER)->irq);
+ enable_irq(gfar_irq(grp, RX)->irq);
+ enable_irq(gfar_irq(grp, TX)->irq);
+ }
+ } else {
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, TX)->irq);
+ }
+ }
+}
+#endif
+
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
+}
+
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+ struct gfar_private *priv = grp->priv;
+ struct net_device *dev = priv->ndev;
+ int err;
+
+ /* If the device has multiple interrupts, register for
+ * them. Otherwise, only register for the one
+ */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ /* Install our interrupt handlers for Error,
+ * Transmit, and Receive
+ */
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ gfar_irq(grp, ER)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, ER)->irq);
+
+ goto err_irq_fail;
+ }
+ enable_irq_wake(gfar_irq(grp, ER)->irq);
+
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, TX)->irq);
+ goto tx_irq_fail;
+ }
+ err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+ gfar_irq(grp, RX)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, RX)->irq);
+ goto rx_irq_fail;
+ }
+ enable_irq_wake(gfar_irq(grp, RX)->irq);
+
+ } else {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, TX)->irq);
+ goto err_irq_fail;
+ }
+ enable_irq_wake(gfar_irq(grp, TX)->irq);
+ }
+
+ return 0;
+
+rx_irq_fail:
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+tx_irq_fail:
+ free_irq(gfar_irq(grp, ER)->irq, grp);
+err_irq_fail:
+ return err;
+
+}
+
+static void gfar_free_irq(struct gfar_private *priv)
+{
+ int i;
+
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++)
+ free_grp_irqs(&priv->gfargrp[i]);
+ } else {
+ for (i = 0; i < priv->num_grps; i++)
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+ &priv->gfargrp[i]);
+ }
+}
+
+static int gfar_request_irq(struct gfar_private *priv)
+{
+ int err, i, j;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ err = register_grp_irqs(&priv->gfargrp[i]);
+ if (err) {
+ for (j = 0; j < i; j++)
+ free_grp_irqs(&priv->gfargrp[j]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
+static int gfar_enet_open(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err;
+
+ err = init_phy(dev);
+ if (err)
+ return err;
+
+ err = gfar_request_irq(priv);
+ if (err)
+ return err;
+
+ err = startup_gfar(dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ cancel_work_sync(&priv->reset_task);
+ stop_gfar(dev);
+
+ /* Disconnect from the PHY */
+ phy_disconnect(dev->phydev);
+
+ gfar_free_irq(priv);
+
+ return 0;
+}
+
+/* Clears each of the exact match registers to zero, so they
+ * don't interfere with normal reception
+ */
+static void gfar_clear_exact_match(struct net_device *dev)
+{
+ int idx;
+ static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+ for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
+ gfar_set_mac_for_addr(dev, idx, zero_arr);
+}
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to. Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed
+ */
+static void gfar_set_multi(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Set RCTRL to PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= RCTRL_PROM;
+ gfar_write(&regs->rctrl, tempval);
+ } else {
+ /* Set RCTRL to not PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval &= ~(RCTRL_PROM);
+ gfar_write(&regs->rctrl, tempval);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ gfar_write(&regs->igaddr0, 0xffffffff);
+ gfar_write(&regs->igaddr1, 0xffffffff);
+ gfar_write(&regs->igaddr2, 0xffffffff);
+ gfar_write(&regs->igaddr3, 0xffffffff);
+ gfar_write(&regs->igaddr4, 0xffffffff);
+ gfar_write(&regs->igaddr5, 0xffffffff);
+ gfar_write(&regs->igaddr6, 0xffffffff);
+ gfar_write(&regs->igaddr7, 0xffffffff);
+ gfar_write(&regs->gaddr0, 0xffffffff);
+ gfar_write(&regs->gaddr1, 0xffffffff);
+ gfar_write(&regs->gaddr2, 0xffffffff);
+ gfar_write(&regs->gaddr3, 0xffffffff);
+ gfar_write(&regs->gaddr4, 0xffffffff);
+ gfar_write(&regs->gaddr5, 0xffffffff);
+ gfar_write(&regs->gaddr6, 0xffffffff);
+ gfar_write(&regs->gaddr7, 0xffffffff);
+ } else {
+ int em_num;
+ int idx;
+
+ /* zero out the hash */
+ gfar_write(&regs->igaddr0, 0x0);
+ gfar_write(&regs->igaddr1, 0x0);
+ gfar_write(&regs->igaddr2, 0x0);
+ gfar_write(&regs->igaddr3, 0x0);
+ gfar_write(&regs->igaddr4, 0x0);
+ gfar_write(&regs->igaddr5, 0x0);
+ gfar_write(&regs->igaddr6, 0x0);
+ gfar_write(&regs->igaddr7, 0x0);
+ gfar_write(&regs->gaddr0, 0x0);
+ gfar_write(&regs->gaddr1, 0x0);
+ gfar_write(&regs->gaddr2, 0x0);
+ gfar_write(&regs->gaddr3, 0x0);
+ gfar_write(&regs->gaddr4, 0x0);
+ gfar_write(&regs->gaddr5, 0x0);
+ gfar_write(&regs->gaddr6, 0x0);
+ gfar_write(&regs->gaddr7, 0x0);
+
+ /* If we have extended hash tables, we need to
+ * clear the exact match registers to prepare for
+ * setting them
+ */
+ if (priv->extended_hash) {
+ em_num = GFAR_EM_NUM + 1;
+ gfar_clear_exact_match(dev);
+ idx = 1;
+ } else {
+ idx = 0;
+ em_num = 0;
+ }
+
+ if (netdev_mc_empty(dev))
+ return;
+
+ /* Parse the list, and set the appropriate bits */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (idx < em_num) {
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
+ idx++;
+ } else
+ gfar_set_hash_for_addr(dev, ha->addr);
+ }
+ }
+}
+
+void gfar_mac_reset(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ /* Reset MAC layer */
+ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ /* We need to delay at least 3 TX clocks */
+ udelay(3);
+
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
+ gfar_write(&regs->maccfg1, 0);
+
+ udelay(3);
+
+ gfar_rx_offload_en(priv);
+
+ /* Initialize the max receive frame/buffer lengths */
+ gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+ gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+
+ /* Initialize MACCFG2. */
+ tempval = MACCFG2_INIT_SETTINGS;
+
+ /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+ * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
+ * and by checking RxBD[LG] and discarding larger than MAXFRM.
+ */
+ if (gfar_has_errata(priv, GFAR_ERRATA_74))
+ tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
+ gfar_write(&regs->maccfg2, tempval);
+
+ /* Clear mac addr hash registers */
+ gfar_write(&regs->igaddr0, 0);
+ gfar_write(&regs->igaddr1, 0);
+ gfar_write(&regs->igaddr2, 0);
+ gfar_write(&regs->igaddr3, 0);
+ gfar_write(&regs->igaddr4, 0);
+ gfar_write(&regs->igaddr5, 0);
+ gfar_write(&regs->igaddr6, 0);
+ gfar_write(&regs->igaddr7, 0);
+
+ gfar_write(&regs->gaddr0, 0);
+ gfar_write(&regs->gaddr1, 0);
+ gfar_write(&regs->gaddr2, 0);
+ gfar_write(&regs->gaddr3, 0);
+ gfar_write(&regs->gaddr4, 0);
+ gfar_write(&regs->gaddr5, 0);
+ gfar_write(&regs->gaddr6, 0);
+ gfar_write(&regs->gaddr7, 0);
+
+ if (priv->extended_hash)
+ gfar_clear_exact_match(priv->ndev);
+
+ gfar_mac_rx_config(priv);
+
+ gfar_mac_tx_config(priv);
+
+ gfar_set_mac_address(priv->ndev);
+
+ gfar_set_multi(priv->ndev);
+
+ /* clear ievent and imask before configuring coalescing */
+ gfar_ints_disable(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing_all(priv);
+}
+
+static void gfar_hw_init(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 attrs;
+
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
+ gfar_halt(priv);
+
+ gfar_mac_reset(priv);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset_io(&regs->rmon, 0, offsetof(struct rmon_mib, car1));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&regs->rmon.cam1, 0xffffffff);
+ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ /* Clear the CAR registers (w1c style) */
+ gfar_write(&regs->rmon.car1, 0xffffffff);
+ gfar_write(&regs->rmon.car2, 0xffffffff);
+ }
+
+ /* Initialize ECNTRL */
+ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
+
+ gfar_write(&regs->attreli, attrs);
+
+ /* Start with defaults, and add stashing
+ * depending on driver parameters
+ */
+ attrs = ATTR_INIT_SETTINGS;
+
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
+
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ /* FIFO configs */
+ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+ /* Program the interrupt steering regs, only for MG devices */
+ if (priv->num_grps > 1)
+ gfar_write_isrg(priv);
+}
+
+static const struct net_device_ops gfar_netdev_ops = {
+ .ndo_open = gfar_enet_open,
+ .ndo_start_xmit = gfar_start_xmit,
+ .ndo_stop = gfar_close,
+ .ndo_change_mtu = gfar_change_mtu,
+ .ndo_set_features = gfar_set_features,
+ .ndo_set_rx_mode = gfar_set_multi,
+ .ndo_tx_timeout = gfar_timeout,
+ .ndo_eth_ioctl = gfar_ioctl,
+ .ndo_get_stats64 = gfar_get_stats64,
+ .ndo_change_carrier = fixed_phy_change_carrier,
+ .ndo_set_mac_address = gfar_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gfar_netpoll,
+#endif
+};
+
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start
+ */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ int err = 0, i;
+
+ err = gfar_of_init(ofdev, &dev);
+
+ if (err)
+ return err;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->dev = &ofdev->dev;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ platform_set_drvdata(ofdev, priv);
+
+ gfar_detect_errata(priv);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ /* MTU range: 50 - 9586 */
+ dev->mtu = 1500;
+ dev->min_mtu = 50;
+ dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ for (i = 0; i < priv->num_grps; i++) {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx_sq);
+ netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ gfar_init_addr_hash_table(priv);
+
+ /* Insert receive time stamps into padding alignment bytes, and
+ * plus 2 bytes padding to ensure the cpu alignment.
+ */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->padding = 8 + DEFAULT_PADDING;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+
+ /* Initializing some of the rx/tx queue level parameters */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+ priv->tx_queue[i]->txic = DEFAULT_TXIC;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+ priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+ priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+ }
+
+ /* Always enable rx filer if available */
+ priv->rx_filer_enable =
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
+ /* Enable most messages by default */
+ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+ /* use pritority h/w tx queue scheduling for single queue devices */
+ if (priv->num_tx_queues == 1)
+ priv->prio_sched_en = 1;
+
+ set_bit(GFAR_DOWN, &priv->state);
+
+ gfar_hw_init(priv);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
+
+ spin_lock_init(&priv->rmon_overflow.lock);
+ priv->rmon_overflow.imask = IMASK_MSRO;
+ gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
+ }
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+
+ if (err) {
+ pr_err("%s: Cannot register net device, aborting\n", dev->name);
+ goto register_fail;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
+ priv->wol_supported |= GFAR_WOL_MAGIC;
+
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
+ priv->rx_filer_enable)
+ priv->wol_supported |= GFAR_WOL_FILER_UCAST;
+
+ device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
+
+ /* fill out IRQ number and name fields */
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_tx");
+ sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_rx");
+ sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_er");
+ } else
+ strcpy(gfar_irq(grp, TX)->name, dev->name);
+ }
+
+ /* Initialize the filer table */
+ gfar_init_filer_table(priv);
+
+ /* Print out the device info */
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+ /* Even more device info helps when determining which kernel
+ * provided which set of benchmarks.
+ */
+ netdev_info(dev, "Running with NAPI enabled\n");
+ for (i = 0; i < priv->num_rx_queues; i++)
+ netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
+ i, priv->rx_queue[i]->rx_ring_size);
+ for (i = 0; i < priv->num_tx_queues; i++)
+ netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
+ i, priv->tx_queue[i]->tx_ring_size);
+
+ return 0;
+
+register_fail:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
+ of_node_put(priv->phy_node);
+ of_node_put(priv->tbi_node);
+ free_gfar_dev(priv);
+ return err;
+}
+
+static int gfar_remove(struct platform_device *ofdev)
+{
+ struct gfar_private *priv = platform_get_drvdata(ofdev);
+ struct device_node *np = ofdev->dev.of_node;
+
+ of_node_put(priv->phy_node);
+ of_node_put(priv->tbi_node);
+
+ unregister_netdev(priv->ndev);
+
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+
+ unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static void __gfar_filer_disable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
+ gfar_write(&regs->rctrl, temp);
+}
+
+static void __gfar_filer_enable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+ gfar_write(&regs->rctrl, temp);
+}
+
+/* Filer rules implementing wol capabilities */
+static void gfar_filer_config_wol(struct gfar_private *priv)
+{
+ unsigned int i;
+ u32 rqfcr;
+
+ __gfar_filer_disable(priv);
+
+ /* clear the filer table, reject any packet by default */
+ rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
+ for (i = 0; i <= MAX_FILER_IDX; i++)
+ gfar_write_filer(priv, i, rqfcr, 0);
+
+ i = 0;
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
+ /* unicast packet, accept it */
+ struct net_device *ndev = priv->ndev;
+ /* get the default rx queue index */
+ u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
+ u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
+ (ndev->dev_addr[1] << 8) |
+ ndev->dev_addr[2];
+
+ rqfcr = (qindex << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAH;
+
+ gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+
+ dest_mac_addr = (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5];
+ rqfcr = (qindex << 10) | RQFCR_GPI |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAL;
+ gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+static void gfar_filer_restore_table(struct gfar_private *priv)
+{
+ u32 rqfcr, rqfpr;
+ unsigned int i;
+
+ __gfar_filer_disable(priv);
+
+ for (i = 0; i <= MAX_FILER_IDX; i++) {
+ rqfcr = priv->ftp_rqfcr[i];
+ rqfpr = priv->ftp_rqfpr[i];
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
+static void gfar_start_wol_filer(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+ /* Enable Rx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~DMACTRL_GRS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ /* enable the Filer General Purpose Interrupt */
+ gfar_write(&regs->imask, IMASK_FGPI);
+ }
+
+ /* Enable Rx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+}
+
+static int gfar_suspend(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ u16 wol = priv->wol_opts;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ disable_napi(priv);
+ netif_tx_lock(ndev);
+ netif_device_detach(ndev);
+ netif_tx_unlock(ndev);
+
+ gfar_halt(priv);
+
+ if (wol & GFAR_WOL_MAGIC) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
+
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+ /* re-enable the Rx block */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+
+ } else if (wol & GFAR_WOL_FILER_UCAST) {
+ gfar_filer_config_wol(priv);
+ gfar_start_wol_filer(priv);
+
+ } else {
+ phy_stop(ndev->phydev);
+ }
+
+ return 0;
+}
+
+static int gfar_resume(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ u16 wol = priv->wol_opts;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (wol & GFAR_WOL_MAGIC) {
+ /* Disable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+ } else if (wol & GFAR_WOL_FILER_UCAST) {
+ /* need to stop rx only, tx is already down */
+ gfar_halt(priv);
+ gfar_filer_restore_table(priv);
+
+ } else {
+ phy_start(ndev->phydev);
+ }
+
+ gfar_start(priv);
+
+ netif_device_attach(ndev);
+ enable_napi(priv);
+
+ return 0;
+}
+
+static int gfar_restore(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
+
+ return 0;
+ }
+
+ gfar_init_bds(ndev);
+
+ gfar_mac_reset(priv);
+
+ gfar_init_tx_rx_base(priv);
+
+ gfar_start(priv);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
+
+ netif_device_attach(ndev);
+ enable_napi(priv);
+
+ return 0;
+}
+
+static const struct dev_pm_ops gfar_pm_ops = {
+ .suspend = gfar_suspend,
+ .resume = gfar_resume,
+ .freeze = gfar_suspend,
+ .thaw = gfar_resume,
+ .restore = gfar_restore,
+};
+
+#define GFAR_PM_OPS (&gfar_pm_ops)
+
+#else
+
+#define GFAR_PM_OPS NULL
+
+#endif
+
+static const struct of_device_id gfar_match[] =
+{
+ {
+ .type = "network",
+ .compatible = "gianfar",
+ },
+ {
+ .compatible = "fsl,etsec2",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gfar_match);
+
+/* Structure for a device driver */
+static struct platform_driver gfar_driver = {
+ .driver = {
+ .name = "fsl-gianfar",
+ .pm = GFAR_PM_OPS,
+ .of_match_table = gfar_match,
+ },
+ .probe = gfar_probe,
+ .remove = gfar_remove,
+};
+
+module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
new file mode 100644
index 000000000..68b59d320
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -0,0 +1,1369 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * drivers/net/ethernet/freescale/gianfar.h
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * Still left to do:
+ * -Add support for module parameters
+ * -Add patch for ethtool phys id
+ */
+#ifndef __GIANFAR_H
+#define __GIANFAR_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+
+struct ethtool_flow_spec_container {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_list {
+ struct list_head list;
+ unsigned int count;
+};
+
+/* Length for FCB */
+#define GMAC_FCB_LEN 8
+
+/* Length for TxPAL */
+#define GMAC_TXPAL_LEN 16
+
+/* Default padding amount */
+#define DEFAULT_PADDING 2
+
+/* Number of bytes to align the rx bufs to */
+#define RXBUF_ALIGNMENT 64
+
+#define DRV_NAME "gfar-enet"
+
+/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
+#define MAX_TX_QS 0x8
+#define MAX_RX_QS 0x8
+
+/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
+#define MAXGROUPS 0x2
+
+/* These need to be powers of 2 for this driver */
+#define DEFAULT_TX_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+
+#define GFAR_RX_BUFF_ALLOC 16
+
+#define GFAR_RX_MAX_RING_SIZE 256
+#define GFAR_TX_MAX_RING_SIZE 256
+
+#define FBTHR_SHIFT 24
+#define DEFAULT_RX_LFC_THR 16
+#define DEFAULT_LFC_PTVVAL 4
+
+#define GFAR_RXB_TRUESIZE 2048
+#define GFAR_SKBFRAG_OVR (RXBUF_ALIGNMENT \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_SIZE rounddown(GFAR_RXB_TRUESIZE - GFAR_SKBFRAG_OVR, 64)
+#define GFAR_SKBFRAG_SIZE (GFAR_RXB_SIZE + GFAR_SKBFRAG_OVR)
+
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+#define GFAR_JUMBO_FRAME_SIZE 9600
+
+#define DEFAULT_FIFO_TX_THR 0x100
+#define DEFAULT_FIFO_TX_STARVE 0x40
+#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
+
+/* The number of Exact Match registers */
+#define GFAR_EM_NUM 15
+
+/* Latency of interface clock in nanoseconds */
+/* Interface clock latency , in this case, means the
+ * time described by a value of 1 in the interrupt
+ * coalescing registers' time fields. Since those fields
+ * refer to the time it takes for 64 clocks to pass, the
+ * latencies are as such:
+ * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
+ * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
+ * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
+ */
+#define GFAR_GBIT_TIME 512
+#define GFAR_100_TIME 2560
+#define GFAR_10_TIME 25600
+
+#define DEFAULT_TX_COALESCE 1
+#define DEFAULT_TXCOUNT 16
+#define DEFAULT_TXTIME 21
+
+#define DEFAULT_RXTIME 21
+
+#define DEFAULT_RX_COALESCE 0
+#define DEFAULT_RXCOUNT 0
+
+/* TBI register addresses */
+#define MII_TBICON 0x11
+
+/* TBICON register bit fields */
+#define TBICON_CLK_SELECT 0x0020
+
+/* MAC register bits */
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RESET_RX_MC 0x00080000
+#define MACCFG1_RESET_TX_MC 0x00040000
+#define MACCFG1_RESET_RX_FUN 0x00020000
+#define MACCFG1_RESET_TX_FUN 0x00010000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_SYNCD_RX_EN 0x00000008
+#define MACCFG1_RX_EN 0x00000004
+#define MACCFG1_SYNCD_TX_EN 0x00000002
+#define MACCFG1_TX_EN 0x00000001
+
+#define MACCFG2_INIT_SETTINGS 0x00007205
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_IF 0x00000300
+#define MACCFG2_MII 0x00000100
+#define MACCFG2_GMII 0x00000200
+#define MACCFG2_HUGEFRAME 0x00000020
+#define MACCFG2_LENGTHCHECK 0x00000010
+#define MACCFG2_MPEN 0x00000008
+
+#define ECNTRL_FIFM 0x00008000
+#define ECNTRL_INIT_SETTINGS 0x00001000
+#define ECNTRL_TBI_MODE 0x00000020
+#define ECNTRL_REDUCED_MODE 0x00000010
+#define ECNTRL_R100 0x00000008
+#define ECNTRL_REDUCED_MII_MODE 0x00000004
+#define ECNTRL_SGMII_MODE 0x00000002
+
+#define MINFLR_INIT_SETTINGS 0x00000040
+
+/* Tqueue control */
+#define TQUEUE_EN0 0x00008000
+#define TQUEUE_EN1 0x00004000
+#define TQUEUE_EN2 0x00002000
+#define TQUEUE_EN3 0x00001000
+#define TQUEUE_EN4 0x00000800
+#define TQUEUE_EN5 0x00000400
+#define TQUEUE_EN6 0x00000200
+#define TQUEUE_EN7 0x00000100
+#define TQUEUE_EN_ALL 0x0000FF00
+
+#define TR03WT_WT0_MASK 0xFF000000
+#define TR03WT_WT1_MASK 0x00FF0000
+#define TR03WT_WT2_MASK 0x0000FF00
+#define TR03WT_WT3_MASK 0x000000FF
+
+#define TR47WT_WT4_MASK 0xFF000000
+#define TR47WT_WT5_MASK 0x00FF0000
+#define TR47WT_WT6_MASK 0x0000FF00
+#define TR47WT_WT7_MASK 0x000000FF
+
+/* Rqueue control */
+#define RQUEUE_EX0 0x00800000
+#define RQUEUE_EX1 0x00400000
+#define RQUEUE_EX2 0x00200000
+#define RQUEUE_EX3 0x00100000
+#define RQUEUE_EX4 0x00080000
+#define RQUEUE_EX5 0x00040000
+#define RQUEUE_EX6 0x00020000
+#define RQUEUE_EX7 0x00010000
+#define RQUEUE_EX_ALL 0x00FF0000
+
+#define RQUEUE_EN0 0x00000080
+#define RQUEUE_EN1 0x00000040
+#define RQUEUE_EN2 0x00000020
+#define RQUEUE_EN3 0x00000010
+#define RQUEUE_EN4 0x00000008
+#define RQUEUE_EN5 0x00000004
+#define RQUEUE_EN6 0x00000002
+#define RQUEUE_EN7 0x00000001
+#define RQUEUE_EN_ALL 0x000000FF
+
+/* Init to do tx snooping for buffers and descriptors */
+#define DMACTRL_INIT_SETTINGS 0x000000c3
+#define DMACTRL_GRS 0x00000010
+#define DMACTRL_GTS 0x00000008
+
+#define TSTAT_CLEAR_THALT_ALL 0xFF000000
+#define TSTAT_CLEAR_THALT 0x80000000
+#define TSTAT_CLEAR_THALT0 0x80000000
+#define TSTAT_CLEAR_THALT1 0x40000000
+#define TSTAT_CLEAR_THALT2 0x20000000
+#define TSTAT_CLEAR_THALT3 0x10000000
+#define TSTAT_CLEAR_THALT4 0x08000000
+#define TSTAT_CLEAR_THALT5 0x04000000
+#define TSTAT_CLEAR_THALT6 0x02000000
+#define TSTAT_CLEAR_THALT7 0x01000000
+
+/* Interrupt coalescing macros */
+#define IC_ICEN 0x80000000
+#define IC_ICFT_MASK 0x1fe00000
+#define IC_ICFT_SHIFT 21
+#define mk_ic_icft(x) \
+ (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
+#define IC_ICTT_MASK 0x0000ffff
+#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
+
+#define mk_ic_value(count, time) (IC_ICEN | \
+ mk_ic_icft(count) | \
+ mk_ic_ictt(time))
+#define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \
+ IC_ICFT_SHIFT)
+#define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK)
+
+#define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME)
+#define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME)
+
+#define RCTRL_TS_ENABLE 0x01000000
+#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_LFC 0x00004000
+#define RCTRL_VLEX 0x00002000
+#define RCTRL_FILREN 0x00001000
+#define RCTRL_GHTX 0x00000400
+#define RCTRL_IPCSEN 0x00000200
+#define RCTRL_TUCSEN 0x00000100
+#define RCTRL_PRSDEP_MASK 0x000000c0
+#define RCTRL_PRSDEP_INIT 0x000000c0
+#define RCTRL_PRSFM 0x00000020
+#define RCTRL_PROM 0x00000008
+#define RCTRL_EMEN 0x00000002
+#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
+ RCTRL_TUCSEN | RCTRL_FILREN)
+#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
+ RCTRL_PRSDEP_INIT)
+#define RCTRL_EXTHASH (RCTRL_GHTX)
+#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
+#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
+
+
+#define RSTAT_CLEAR_RHALT 0x00800000
+#define RSTAT_CLEAR_RXF0 0x00000080
+#define RSTAT_RXF_MASK 0x000000ff
+
+#define TCTRL_IPCSEN 0x00004000
+#define TCTRL_TUCSEN 0x00002000
+#define TCTRL_VLINS 0x00001000
+#define TCTRL_THDF 0x00000800
+#define TCTRL_RFCPAUSE 0x00000010
+#define TCTRL_TFCPAUSE 0x00000008
+#define TCTRL_TXSCHED_MASK 0x00000006
+#define TCTRL_TXSCHED_INIT 0x00000000
+/* priority scheduling */
+#define TCTRL_TXSCHED_PRIO 0x00000002
+/* weighted round-robin scheduling (WRRS) */
+#define TCTRL_TXSCHED_WRRS 0x00000004
+/* default WRRS weight and policy setting,
+ * tailored to the tr03wt and tr47wt registers:
+ * equal weight for all Tx Qs, measured in 64byte units
+ */
+#define DEFAULT_WRRS_WEIGHT 0x18181818
+
+#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
+
+#define IEVENT_INIT_CLEAR 0xffffffff
+#define IEVENT_BABR 0x80000000
+#define IEVENT_RXC 0x40000000
+#define IEVENT_BSY 0x20000000
+#define IEVENT_EBERR 0x10000000
+#define IEVENT_MSRO 0x04000000
+#define IEVENT_GTSC 0x02000000
+#define IEVENT_BABT 0x01000000
+#define IEVENT_TXC 0x00800000
+#define IEVENT_TXE 0x00400000
+#define IEVENT_TXB 0x00200000
+#define IEVENT_TXF 0x00100000
+#define IEVENT_LC 0x00040000
+#define IEVENT_CRL 0x00020000
+#define IEVENT_XFUN 0x00010000
+#define IEVENT_RXB0 0x00008000
+#define IEVENT_MAG 0x00000800
+#define IEVENT_GRSC 0x00000100
+#define IEVENT_RXF0 0x00000080
+#define IEVENT_FGPI 0x00000010
+#define IEVENT_FIR 0x00000008
+#define IEVENT_FIQ 0x00000004
+#define IEVENT_DPE 0x00000002
+#define IEVENT_PERR 0x00000001
+#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY)
+#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
+#define IEVENT_ERR_MASK \
+(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
+ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
+ | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
+ | IEVENT_MAG | IEVENT_BABR)
+
+#define IMASK_INIT_CLEAR 0x00000000
+#define IMASK_BABR 0x80000000
+#define IMASK_RXC 0x40000000
+#define IMASK_BSY 0x20000000
+#define IMASK_EBERR 0x10000000
+#define IMASK_MSRO 0x04000000
+#define IMASK_GTSC 0x02000000
+#define IMASK_BABT 0x01000000
+#define IMASK_TXC 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_TXBEN 0x00200000
+#define IMASK_TXFEN 0x00100000
+#define IMASK_LC 0x00040000
+#define IMASK_CRL 0x00020000
+#define IMASK_XFUN 0x00010000
+#define IMASK_RXB0 0x00008000
+#define IMASK_MAG 0x00000800
+#define IMASK_GRSC 0x00000100
+#define IMASK_RXFEN0 0x00000080
+#define IMASK_FGPI 0x00000010
+#define IMASK_FIR 0x00000008
+#define IMASK_FIQ 0x00000004
+#define IMASK_DPE 0x00000002
+#define IMASK_PERR 0x00000001
+#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
+ | IMASK_PERR)
+#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
+
+#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
+#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
+
+/* Attribute fields */
+
+/* This enables rx snooping for buffers and descriptors */
+#define ATTR_BDSTASH 0x00000800
+
+#define ATTR_BUFSTASH 0x00004000
+
+#define ATTR_SNOOPING 0x000000c0
+#define ATTR_INIT_SETTINGS ATTR_SNOOPING
+
+#define ATTRELI_INIT_SETTINGS 0x0
+#define ATTRELI_EL_MASK 0x3fff0000
+#define ATTRELI_EL(x) (x << 16)
+#define ATTRELI_EI_MASK 0x00003fff
+#define ATTRELI_EI(x) (x)
+
+#define BD_LFLAG(flags) ((flags) << 16)
+#define BD_LENGTH_MASK 0x0000ffff
+
+#define FPR_FILER_MASK 0xFFFFFFFF
+#define MAX_FILER_IDX 0xFF
+
+/* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+#define DEFAULT_8RXQ_RIR0 0x05397700
+/* Map even hash values to Q0, and odd ones to Q1 */
+#define DEFAULT_2RXQ_RIR0 0x04104100
+
+/* RQFCR register bits */
+#define RQFCR_GPI 0x80000000
+#define RQFCR_HASHTBL_Q 0x00000000
+#define RQFCR_HASHTBL_0 0x00020000
+#define RQFCR_HASHTBL_1 0x00040000
+#define RQFCR_HASHTBL_2 0x00060000
+#define RQFCR_HASHTBL_3 0x00080000
+#define RQFCR_HASH 0x00010000
+#define RQFCR_QUEUE 0x0000FC00
+#define RQFCR_CLE 0x00000200
+#define RQFCR_RJE 0x00000100
+#define RQFCR_AND 0x00000080
+#define RQFCR_CMP_EXACT 0x00000000
+#define RQFCR_CMP_MATCH 0x00000020
+#define RQFCR_CMP_NOEXACT 0x00000040
+#define RQFCR_CMP_NOMATCH 0x00000060
+
+/* RQFCR PID values */
+#define RQFCR_PID_MASK 0x00000000
+#define RQFCR_PID_PARSE 0x00000001
+#define RQFCR_PID_ARB 0x00000002
+#define RQFCR_PID_DAH 0x00000003
+#define RQFCR_PID_DAL 0x00000004
+#define RQFCR_PID_SAH 0x00000005
+#define RQFCR_PID_SAL 0x00000006
+#define RQFCR_PID_ETY 0x00000007
+#define RQFCR_PID_VID 0x00000008
+#define RQFCR_PID_PRI 0x00000009
+#define RQFCR_PID_TOS 0x0000000A
+#define RQFCR_PID_L4P 0x0000000B
+#define RQFCR_PID_DIA 0x0000000C
+#define RQFCR_PID_SIA 0x0000000D
+#define RQFCR_PID_DPT 0x0000000E
+#define RQFCR_PID_SPT 0x0000000F
+
+/* RQFPR when PID is 0x0001 */
+#define RQFPR_HDR_GE_512 0x00200000
+#define RQFPR_LERR 0x00100000
+#define RQFPR_RAR 0x00080000
+#define RQFPR_RARQ 0x00040000
+#define RQFPR_AR 0x00020000
+#define RQFPR_ARQ 0x00010000
+#define RQFPR_EBC 0x00008000
+#define RQFPR_VLN 0x00004000
+#define RQFPR_CFI 0x00002000
+#define RQFPR_JUM 0x00001000
+#define RQFPR_IPF 0x00000800
+#define RQFPR_FIF 0x00000400
+#define RQFPR_IPV4 0x00000200
+#define RQFPR_IPV6 0x00000100
+#define RQFPR_ICC 0x00000080
+#define RQFPR_ICV 0x00000040
+#define RQFPR_TCP 0x00000020
+#define RQFPR_UDP 0x00000010
+#define RQFPR_TUC 0x00000008
+#define RQFPR_TUV 0x00000004
+#define RQFPR_PER 0x00000002
+#define RQFPR_EER 0x00000001
+
+/* CAR1 bits */
+#define CAR1_C164 0x80000000
+#define CAR1_C1127 0x40000000
+#define CAR1_C1255 0x20000000
+#define CAR1_C1511 0x10000000
+#define CAR1_C11K 0x08000000
+#define CAR1_C1MAX 0x04000000
+#define CAR1_C1MGV 0x02000000
+#define CAR1_C1REJ 0x00020000
+#define CAR1_C1RBY 0x00010000
+#define CAR1_C1RPK 0x00008000
+#define CAR1_C1RFC 0x00004000
+#define CAR1_C1RMC 0x00002000
+#define CAR1_C1RBC 0x00001000
+#define CAR1_C1RXC 0x00000800
+#define CAR1_C1RXP 0x00000400
+#define CAR1_C1RXU 0x00000200
+#define CAR1_C1RAL 0x00000100
+#define CAR1_C1RFL 0x00000080
+#define CAR1_C1RCD 0x00000040
+#define CAR1_C1RCS 0x00000020
+#define CAR1_C1RUN 0x00000010
+#define CAR1_C1ROV 0x00000008
+#define CAR1_C1RFR 0x00000004
+#define CAR1_C1RJB 0x00000002
+#define CAR1_C1RDR 0x00000001
+
+/* CAM1 bits */
+#define CAM1_M164 0x80000000
+#define CAM1_M1127 0x40000000
+#define CAM1_M1255 0x20000000
+#define CAM1_M1511 0x10000000
+#define CAM1_M11K 0x08000000
+#define CAM1_M1MAX 0x04000000
+#define CAM1_M1MGV 0x02000000
+#define CAM1_M1REJ 0x00020000
+#define CAM1_M1RBY 0x00010000
+#define CAM1_M1RPK 0x00008000
+#define CAM1_M1RFC 0x00004000
+#define CAM1_M1RMC 0x00002000
+#define CAM1_M1RBC 0x00001000
+#define CAM1_M1RXC 0x00000800
+#define CAM1_M1RXP 0x00000400
+#define CAM1_M1RXU 0x00000200
+#define CAM1_M1RAL 0x00000100
+#define CAM1_M1RFL 0x00000080
+#define CAM1_M1RCD 0x00000040
+#define CAM1_M1RCS 0x00000020
+#define CAM1_M1RUN 0x00000010
+#define CAM1_M1ROV 0x00000008
+#define CAM1_M1RFR 0x00000004
+#define CAM1_M1RJB 0x00000002
+#define CAM1_M1RDR 0x00000001
+
+/* TxBD status field bits */
+#define TXBD_READY 0x8000
+#define TXBD_PADCRC 0x4000
+#define TXBD_WRAP 0x2000
+#define TXBD_INTERRUPT 0x1000
+#define TXBD_LAST 0x0800
+#define TXBD_CRC 0x0400
+#define TXBD_DEF 0x0200
+#define TXBD_HUGEFRAME 0x0080
+#define TXBD_LATECOLLISION 0x0080
+#define TXBD_RETRYLIMIT 0x0040
+#define TXBD_RETRYCOUNTMASK 0x003c
+#define TXBD_UNDERRUN 0x0002
+#define TXBD_TOE 0x0002
+
+/* Tx FCB param bits */
+#define TXFCB_VLN 0x80
+#define TXFCB_IP 0x40
+#define TXFCB_IP6 0x20
+#define TXFCB_TUP 0x10
+#define TXFCB_UDP 0x08
+#define TXFCB_CIP 0x04
+#define TXFCB_CTU 0x02
+#define TXFCB_NPH 0x01
+#define TXFCB_DEFAULT (TXFCB_IP|TXFCB_TUP|TXFCB_CTU|TXFCB_NPH)
+
+/* RxBD status field bits */
+#define RXBD_EMPTY 0x8000
+#define RXBD_RO1 0x4000
+#define RXBD_WRAP 0x2000
+#define RXBD_INTERRUPT 0x1000
+#define RXBD_LAST 0x0800
+#define RXBD_FIRST 0x0400
+#define RXBD_MISS 0x0100
+#define RXBD_BROADCAST 0x0080
+#define RXBD_MULTICAST 0x0040
+#define RXBD_LARGE 0x0020
+#define RXBD_NONOCTET 0x0010
+#define RXBD_SHORT 0x0008
+#define RXBD_CRCERR 0x0004
+#define RXBD_OVERRUN 0x0002
+#define RXBD_TRUNCATED 0x0001
+#define RXBD_STATS 0x01ff
+#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \
+ | RXBD_CRCERR | RXBD_OVERRUN \
+ | RXBD_TRUNCATED)
+
+/* Rx FCB status field bits */
+#define RXFCB_VLN 0x8000
+#define RXFCB_IP 0x4000
+#define RXFCB_IP6 0x2000
+#define RXFCB_TUP 0x1000
+#define RXFCB_CIP 0x0800
+#define RXFCB_CTU 0x0400
+#define RXFCB_EIP 0x0200
+#define RXFCB_ETU 0x0100
+#define RXFCB_CSUM_MASK 0x0f00
+#define RXFCB_PERR_MASK 0x000c
+#define RXFCB_PERR_BADL3 0x0008
+
+#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
+
+#define GFAR_WOL_MAGIC 0x00000001
+#define GFAR_WOL_FILER_UCAST 0x00000002
+
+struct txbd8
+{
+ union {
+ struct {
+ __be16 status; /* Status Fields */
+ __be16 length; /* Buffer length */
+ };
+ __be32 lstatus;
+ };
+ __be32 bufPtr; /* Buffer Pointer */
+};
+
+struct txfcb {
+ u8 flags;
+ u8 ptp; /* Flag to enable tx timestamping */
+ u8 l4os; /* Level 4 Header Offset */
+ u8 l3os; /* Level 3 Header Offset */
+ __be16 phcs; /* Pseudo-header Checksum */
+ __be16 vlctl; /* VLAN control word */
+};
+
+struct rxbd8
+{
+ union {
+ struct {
+ __be16 status; /* Status Fields */
+ __be16 length; /* Buffer Length */
+ };
+ __be32 lstatus;
+ };
+ __be32 bufPtr; /* Buffer Pointer */
+};
+
+struct rxfcb {
+ __be16 flags;
+ u8 rq; /* Receive Queue index */
+ u8 pro; /* Layer 4 Protocol */
+ u16 reserved;
+ __be16 vlctl; /* VLAN control word */
+};
+
+struct gianfar_skb_cb {
+ unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
+struct rmon_mib
+{
+ u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
+ u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
+ u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
+ u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
+ u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
+ u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
+ u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
+ u32 rbyt; /* 0x.69c - Receive Byte Counter */
+ u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
+ u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
+ u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
+ u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
+ u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
+ u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
+ u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
+ u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
+ u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
+ u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
+ u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
+ u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
+ u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
+ u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
+ u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
+ u32 rdrp; /* 0x.6dc - Receive Drop Counter */
+ u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
+ u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
+ u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
+ u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
+ u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
+ u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
+ u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
+ u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
+ u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
+ u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
+ u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
+ u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
+ u8 res1[4];
+ u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
+ u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
+ u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
+ u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
+ u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
+ u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
+ u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
+ u32 car1; /* 0x.730 - Carry Register One */
+ u32 car2; /* 0x.734 - Carry Register Two */
+ u32 cam1; /* 0x.738 - Carry Mask Register One */
+ u32 cam2; /* 0x.73c - Carry Mask Register Two */
+};
+
+struct rmon_overflow {
+ /* lock for synchronization of the rdrp field of this struct, and
+ * CAR1/CAR2 registers
+ */
+ spinlock_t lock;
+ u32 imask;
+ u64 rdrp;
+};
+
+struct gfar_extra_stats {
+ atomic64_t rx_alloc_err;
+ atomic64_t rx_large;
+ atomic64_t rx_short;
+ atomic64_t rx_nonoctet;
+ atomic64_t rx_crcerr;
+ atomic64_t rx_overrun;
+ atomic64_t rx_bsy;
+ atomic64_t rx_babr;
+ atomic64_t rx_trunc;
+ atomic64_t eberr;
+ atomic64_t tx_babt;
+ atomic64_t tx_underrun;
+ atomic64_t tx_timeout;
+};
+
+#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
+#define GFAR_EXTRA_STATS_LEN \
+ (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
+
+/* Number of stats exported via ethtool */
+#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
+
+struct gfar {
+ u32 tsec_id; /* 0x.000 - Controller ID register */
+ u32 tsec_id2; /* 0x.004 - Controller ID2 register */
+ u8 res1[8];
+ u32 ievent; /* 0x.010 - Interrupt Event Register */
+ u32 imask; /* 0x.014 - Interrupt Mask Register */
+ u32 edis; /* 0x.018 - Error Disabled Register */
+ u32 emapg; /* 0x.01c - Group Error mapping register */
+ u32 ecntrl; /* 0x.020 - Ethernet Control Register */
+ u32 minflr; /* 0x.024 - Minimum Frame Length Register */
+ u32 ptv; /* 0x.028 - Pause Time Value Register */
+ u32 dmactrl; /* 0x.02c - DMA Control Register */
+ u32 tbipa; /* 0x.030 - TBI PHY Address Register */
+ u8 res2[28];
+ u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold
+ register */
+ u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff
+ register */
+ u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold
+ register */
+ u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve
+ shutoff register */
+ u8 res3[44];
+ u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
+ u8 res4[8];
+ u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
+ u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
+ u8 res5[96];
+ u32 tctrl; /* 0x.100 - Transmit Control Register */
+ u32 tstat; /* 0x.104 - Transmit Status Register */
+ u32 dfvlan; /* 0x.108 - Default VLAN Control word */
+ u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
+ u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
+ u32 tqueue; /* 0x.114 - Transmit queue control register */
+ u8 res7[40];
+ u32 tr03wt; /* 0x.140 - TxBD Rings 0-3 round-robin weightings */
+ u32 tr47wt; /* 0x.144 - TxBD Rings 4-7 round-robin weightings */
+ u8 res8[52];
+ u32 tbdbph; /* 0x.17c - Tx data buffer pointer high */
+ u8 res9a[4];
+ u32 tbptr0; /* 0x.184 - TxBD Pointer for ring 0 */
+ u8 res9b[4];
+ u32 tbptr1; /* 0x.18c - TxBD Pointer for ring 1 */
+ u8 res9c[4];
+ u32 tbptr2; /* 0x.194 - TxBD Pointer for ring 2 */
+ u8 res9d[4];
+ u32 tbptr3; /* 0x.19c - TxBD Pointer for ring 3 */
+ u8 res9e[4];
+ u32 tbptr4; /* 0x.1a4 - TxBD Pointer for ring 4 */
+ u8 res9f[4];
+ u32 tbptr5; /* 0x.1ac - TxBD Pointer for ring 5 */
+ u8 res9g[4];
+ u32 tbptr6; /* 0x.1b4 - TxBD Pointer for ring 6 */
+ u8 res9h[4];
+ u32 tbptr7; /* 0x.1bc - TxBD Pointer for ring 7 */
+ u8 res9[64];
+ u32 tbaseh; /* 0x.200 - TxBD base address high */
+ u32 tbase0; /* 0x.204 - TxBD Base Address of ring 0 */
+ u8 res10a[4];
+ u32 tbase1; /* 0x.20c - TxBD Base Address of ring 1 */
+ u8 res10b[4];
+ u32 tbase2; /* 0x.214 - TxBD Base Address of ring 2 */
+ u8 res10c[4];
+ u32 tbase3; /* 0x.21c - TxBD Base Address of ring 3 */
+ u8 res10d[4];
+ u32 tbase4; /* 0x.224 - TxBD Base Address of ring 4 */
+ u8 res10e[4];
+ u32 tbase5; /* 0x.22c - TxBD Base Address of ring 5 */
+ u8 res10f[4];
+ u32 tbase6; /* 0x.234 - TxBD Base Address of ring 6 */
+ u8 res10g[4];
+ u32 tbase7; /* 0x.23c - TxBD Base Address of ring 7 */
+ u8 res10[192];
+ u32 rctrl; /* 0x.300 - Receive Control Register */
+ u32 rstat; /* 0x.304 - Receive Status Register */
+ u8 res12[8];
+ u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
+ u32 rqueue; /* 0x.314 - Receive queue control register */
+ u32 rir0; /* 0x.318 - Ring mapping register 0 */
+ u32 rir1; /* 0x.31c - Ring mapping register 1 */
+ u32 rir2; /* 0x.320 - Ring mapping register 2 */
+ u32 rir3; /* 0x.324 - Ring mapping register 3 */
+ u8 res13[8];
+ u32 rbifx; /* 0x.330 - Receive bit field extract control register */
+ u32 rqfar; /* 0x.334 - Receive queue filing table address register */
+ u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
+ u32 rqfpr; /* 0x.33c - Receive queue filing table property register */
+ u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
+ u8 res14[56];
+ u32 rbdbph; /* 0x.37c - Rx data buffer pointer high */
+ u8 res15a[4];
+ u32 rbptr0; /* 0x.384 - RxBD pointer for ring 0 */
+ u8 res15b[4];
+ u32 rbptr1; /* 0x.38c - RxBD pointer for ring 1 */
+ u8 res15c[4];
+ u32 rbptr2; /* 0x.394 - RxBD pointer for ring 2 */
+ u8 res15d[4];
+ u32 rbptr3; /* 0x.39c - RxBD pointer for ring 3 */
+ u8 res15e[4];
+ u32 rbptr4; /* 0x.3a4 - RxBD pointer for ring 4 */
+ u8 res15f[4];
+ u32 rbptr5; /* 0x.3ac - RxBD pointer for ring 5 */
+ u8 res15g[4];
+ u32 rbptr6; /* 0x.3b4 - RxBD pointer for ring 6 */
+ u8 res15h[4];
+ u32 rbptr7; /* 0x.3bc - RxBD pointer for ring 7 */
+ u8 res16[64];
+ u32 rbaseh; /* 0x.400 - RxBD base address high */
+ u32 rbase0; /* 0x.404 - RxBD base address of ring 0 */
+ u8 res17a[4];
+ u32 rbase1; /* 0x.40c - RxBD base address of ring 1 */
+ u8 res17b[4];
+ u32 rbase2; /* 0x.414 - RxBD base address of ring 2 */
+ u8 res17c[4];
+ u32 rbase3; /* 0x.41c - RxBD base address of ring 3 */
+ u8 res17d[4];
+ u32 rbase4; /* 0x.424 - RxBD base address of ring 4 */
+ u8 res17e[4];
+ u32 rbase5; /* 0x.42c - RxBD base address of ring 5 */
+ u8 res17f[4];
+ u32 rbase6; /* 0x.434 - RxBD base address of ring 6 */
+ u8 res17g[4];
+ u32 rbase7; /* 0x.43c - RxBD base address of ring 7 */
+ u8 res17[192];
+ u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
+ u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
+ u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
+ u32 hafdup; /* 0x.50c - Half Duplex Register */
+ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
+ u8 res18[12];
+ u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
+ u32 ifctrl; /* 0x.538 - Interface control register */
+ u32 ifstat; /* 0x.53c - Interface Status Register */
+ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
+ u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
+ u32 mac01addr1; /* 0x.548 - MAC exact match address 1, part 1 */
+ u32 mac01addr2; /* 0x.54c - MAC exact match address 1, part 2 */
+ u32 mac02addr1; /* 0x.550 - MAC exact match address 2, part 1 */
+ u32 mac02addr2; /* 0x.554 - MAC exact match address 2, part 2 */
+ u32 mac03addr1; /* 0x.558 - MAC exact match address 3, part 1 */
+ u32 mac03addr2; /* 0x.55c - MAC exact match address 3, part 2 */
+ u32 mac04addr1; /* 0x.560 - MAC exact match address 4, part 1 */
+ u32 mac04addr2; /* 0x.564 - MAC exact match address 4, part 2 */
+ u32 mac05addr1; /* 0x.568 - MAC exact match address 5, part 1 */
+ u32 mac05addr2; /* 0x.56c - MAC exact match address 5, part 2 */
+ u32 mac06addr1; /* 0x.570 - MAC exact match address 6, part 1 */
+ u32 mac06addr2; /* 0x.574 - MAC exact match address 6, part 2 */
+ u32 mac07addr1; /* 0x.578 - MAC exact match address 7, part 1 */
+ u32 mac07addr2; /* 0x.57c - MAC exact match address 7, part 2 */
+ u32 mac08addr1; /* 0x.580 - MAC exact match address 8, part 1 */
+ u32 mac08addr2; /* 0x.584 - MAC exact match address 8, part 2 */
+ u32 mac09addr1; /* 0x.588 - MAC exact match address 9, part 1 */
+ u32 mac09addr2; /* 0x.58c - MAC exact match address 9, part 2 */
+ u32 mac10addr1; /* 0x.590 - MAC exact match address 10, part 1*/
+ u32 mac10addr2; /* 0x.594 - MAC exact match address 10, part 2*/
+ u32 mac11addr1; /* 0x.598 - MAC exact match address 11, part 1*/
+ u32 mac11addr2; /* 0x.59c - MAC exact match address 11, part 2*/
+ u32 mac12addr1; /* 0x.5a0 - MAC exact match address 12, part 1*/
+ u32 mac12addr2; /* 0x.5a4 - MAC exact match address 12, part 2*/
+ u32 mac13addr1; /* 0x.5a8 - MAC exact match address 13, part 1*/
+ u32 mac13addr2; /* 0x.5ac - MAC exact match address 13, part 2*/
+ u32 mac14addr1; /* 0x.5b0 - MAC exact match address 14, part 1*/
+ u32 mac14addr2; /* 0x.5b4 - MAC exact match address 14, part 2*/
+ u32 mac15addr1; /* 0x.5b8 - MAC exact match address 15, part 1*/
+ u32 mac15addr2; /* 0x.5bc - MAC exact match address 15, part 2*/
+ u8 res20[192];
+ struct rmon_mib rmon; /* 0x.680-0x.73c */
+ u32 rrej; /* 0x.740 - Receive filer rejected packet counter */
+ u8 res21[188];
+ u32 igaddr0; /* 0x.800 - Indivdual/Group address register 0*/
+ u32 igaddr1; /* 0x.804 - Indivdual/Group address register 1*/
+ u32 igaddr2; /* 0x.808 - Indivdual/Group address register 2*/
+ u32 igaddr3; /* 0x.80c - Indivdual/Group address register 3*/
+ u32 igaddr4; /* 0x.810 - Indivdual/Group address register 4*/
+ u32 igaddr5; /* 0x.814 - Indivdual/Group address register 5*/
+ u32 igaddr6; /* 0x.818 - Indivdual/Group address register 6*/
+ u32 igaddr7; /* 0x.81c - Indivdual/Group address register 7*/
+ u8 res22[96];
+ u32 gaddr0; /* 0x.880 - Group address register 0 */
+ u32 gaddr1; /* 0x.884 - Group address register 1 */
+ u32 gaddr2; /* 0x.888 - Group address register 2 */
+ u32 gaddr3; /* 0x.88c - Group address register 3 */
+ u32 gaddr4; /* 0x.890 - Group address register 4 */
+ u32 gaddr5; /* 0x.894 - Group address register 5 */
+ u32 gaddr6; /* 0x.898 - Group address register 6 */
+ u32 gaddr7; /* 0x.89c - Group address register 7 */
+ u8 res23a[352];
+ u32 fifocfg; /* 0x.a00 - FIFO interface config register */
+ u8 res23b[252];
+ u8 res23c[248];
+ u32 attr; /* 0x.bf8 - Attributes Register */
+ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
+ u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */
+ u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */
+ u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */
+ u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */
+ u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */
+ u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */
+ u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */
+ u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */
+ u8 res24[36];
+ u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */
+ u8 res24a[4];
+ u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */
+ u8 res24b[4];
+ u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */
+ u8 res24c[4];
+ u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */
+ u8 res24d[4];
+ u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */
+ u8 res24e[4];
+ u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */
+ u8 res24f[4];
+ u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */
+ u8 res24g[4];
+ u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */
+ u8 res24h[4];
+ u8 res24x[556];
+ u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
+ u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
+ u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
+ u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */
+ u8 res25[16];
+ u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */
+ u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */
+ u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */
+ u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */
+ u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */
+ u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */
+ u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */
+ u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */
+ u8 res26[32];
+ u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */
+ u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */
+ u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */
+ u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */
+ u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */
+ u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */
+ u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */
+ u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */
+ u8 res27[208];
+};
+
+/* Flags related to gianfar device features */
+#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
+#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
+#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
+#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
+#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
+#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
+#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
+#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
+#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
+#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
+#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
+#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
+
+#if (MAXGROUPS == 2)
+#define DEFAULT_MAPPING 0xAA
+#else
+#define DEFAULT_MAPPING 0xFF
+#endif
+
+#define ISRG_RR0 0x80000000
+#define ISRG_TR0 0x00800000
+
+/* The same driver can operate in two modes */
+/* SQ_SG_MODE: Single Queue Single Group Mode
+ * (Backward compatible mode)
+ * MQ_MG_MODE: Multi Queue Multi Group mode
+ */
+enum {
+ SQ_SG_MODE = 0,
+ MQ_MG_MODE
+};
+
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/**
+ * struct gfar_priv_tx_q - per tx queue structure
+ * @txlock: per queue tx spin lock
+ * @tx_skbuff:skb pointers
+ * @skb_curtx: to be used skb pointer
+ * @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
+ * @qindex: index of this queue
+ * @dev: back pointer to the dev structure
+ * @grp: back pointer to the group to which this queue belongs
+ * @tx_bd_base: First tx buffer descriptor
+ * @cur_tx: Next free ring entry
+ * @dirty_tx: First buffer in line to be transmitted
+ * @tx_ring_size: Tx ring size
+ * @num_txbdfree: number of free TxBds
+ * @txcoalescing: enable/disable tx coalescing
+ * @txic: transmit interrupt coalescing value
+ * @txcount: coalescing value if based on tx frame count
+ * @txtime: coalescing value if based on time
+ */
+struct gfar_priv_tx_q {
+ /* cacheline 1 */
+ spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ struct txbd8 *tx_bd_base;
+ struct txbd8 *cur_tx;
+ unsigned int num_txbdfree;
+ unsigned short skb_curtx;
+ unsigned short tx_ring_size;
+ struct tx_q_stats stats;
+ struct gfar_priv_grp *grp;
+ /* cacheline 2 */
+ struct net_device *dev;
+ struct sk_buff **tx_skbuff;
+ struct txbd8 *dirty_tx;
+ unsigned short skb_dirtytx;
+ unsigned short qindex;
+ /* Configuration info for the coalescing features */
+ unsigned int txcoalescing;
+ unsigned long txic;
+ dma_addr_t tx_bd_dma_base;
+};
+
+/*
+ * Per RX queue stats
+ */
+struct rx_q_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+};
+
+struct gfar_rx_buff {
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+};
+
+/**
+ * struct gfar_priv_rx_q - per rx queue structure
+ * @rx_buff: Array of buffer info metadata structs
+ * @rx_bd_base: First rx buffer descriptor
+ * @next_to_use: index of the next buffer to be alloc'd
+ * @next_to_clean: index of the next buffer to be cleaned
+ * @qindex: index of this queue
+ * @ndev: back pointer to net_device
+ * @rx_ring_size: Rx ring size
+ * @rxcoalescing: enable/disable rx-coalescing
+ * @rxic: receive interrupt coalescing vlaue
+ */
+
+struct gfar_priv_rx_q {
+ struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
+ struct rxbd8 *rx_bd_base;
+ struct net_device *ndev;
+ struct device *dev;
+ u16 rx_ring_size;
+ u16 qindex;
+ struct gfar_priv_grp *grp;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+ struct sk_buff *skb;
+ struct rx_q_stats stats;
+ u32 __iomem *rfbptr;
+ unsigned char rxcoalescing;
+ unsigned long rxic;
+ dma_addr_t rx_bd_dma_base;
+};
+
+enum gfar_irqinfo_id {
+ GFAR_TX = 0,
+ GFAR_RX = 1,
+ GFAR_ER = 2,
+ GFAR_NUM_IRQS = 3
+};
+
+struct gfar_irqinfo {
+ unsigned int irq;
+ char name[GFAR_INT_NAME_MAX];
+};
+
+/**
+ * struct gfar_priv_grp - per group structure
+ * @napi: the napi poll function
+ * @priv: back pointer to the priv structure
+ * @regs: the ioremapped register space for this group
+ * @irqinfo: TX/RX/ER irq data for this group
+ */
+
+struct gfar_priv_grp {
+ spinlock_t grplock __aligned(SMP_CACHE_BYTES);
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ struct gfar __iomem *regs;
+ struct gfar_priv_tx_q *tx_queue;
+ struct gfar_priv_rx_q *rx_queue;
+ unsigned int tstat;
+ unsigned int rstat;
+
+ struct gfar_private *priv;
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
+ unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
+};
+
+#define gfar_irq(grp, ID) \
+ ((grp)->irqinfo[GFAR_##ID])
+
+enum gfar_errata {
+ GFAR_ERRATA_74 = 0x01,
+ GFAR_ERRATA_76 = 0x02,
+ GFAR_ERRATA_A002 = 0x04,
+ GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
+};
+
+enum gfar_dev_state {
+ GFAR_DOWN = 1,
+ GFAR_RESETTING
+};
+
+/* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblance)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+ * and tx_bd_base always point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct gfar_private {
+ struct device *dev;
+ struct net_device *ndev;
+ enum gfar_errata errata;
+
+ u16 uses_rxfcb;
+ u16 padding;
+ u32 device_flags;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
+
+ struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
+ struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+ unsigned long state;
+
+ unsigned short mode;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
+ unsigned int num_grps;
+ int tx_actual_en;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+ struct rmon_overflow rmon_overflow;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ struct platform_device *ofdev;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ /* Enable priorty based Tx scheduling in Hw */
+ prio_sched_en:1,
+ /* Flow control flags */
+ pause_aneg_en:1,
+ tx_pause_en:1,
+ rx_pause_en:1;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
+ u32 rqueue;
+ u32 tqueue;
+
+ /* RX per device parameters */
+ unsigned int rx_stash_size;
+ unsigned int rx_stash_index;
+
+ u32 cur_filer_idx;
+
+ /* RX queue filer rule set*/
+ struct ethtool_rx_list rx_list;
+ struct mutex rx_queue_access;
+
+ /* Hash registers and their width */
+ u32 __iomem *hash_regs[16];
+ int hash_width;
+
+ /* wake-on-lan settings */
+ u16 wol_opts;
+ u16 wol_supported;
+
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+};
+
+
+static inline int gfar_has_errata(struct gfar_private *priv,
+ enum gfar_errata err)
+{
+ return priv->errata & err;
+}
+
+static inline u32 gfar_read(unsigned __iomem *addr)
+{
+ u32 val;
+ val = ioread32be(addr);
+ return val;
+}
+
+static inline void gfar_write(unsigned __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline void gfar_write_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int fcr, unsigned int fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ gfar_write(&regs->rqfcr, fcr);
+ gfar_write(&regs->rqfpr, fpr);
+}
+
+static inline void gfar_read_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int *fcr, unsigned int *fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ *fcr = gfar_read(&regs->rqfcr);
+ *fpr = gfar_read(&regs->rqfpr);
+}
+
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr = &regs->isrg0;
+ u32 isrg = 0;
+ int grp_idx, i;
+
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ isrg |= (ISRG_RR0 >> i);
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ isrg |= (ISRG_TR0 >> i);
+ }
+
+ gfar_write(baddr, isrg);
+
+ baddr++;
+ isrg = 0;
+ }
+}
+
+static inline int gfar_is_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return ((gfar_read(&regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC));
+}
+
+static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return gfar_read(&regs->ievent) & IEVENT_GRSC;
+}
+
+static inline void gfar_wmb(void)
+{
+#if defined(CONFIG_PPC)
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
+ * semantics (it requires synchronization between cacheable and
+ * uncacheable mappings, which eieio() doesn't provide and which we
+ * don't need), thus requiring a more expensive sync instruction. At
+ * some point, the set of architecture-independent barrier functions
+ * should be expanded to include weaker barriers.
+ */
+ eieio();
+#else
+ wmb(); /* order write acesses for BD (or FCB) fields */
+#endif
+}
+
+static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
+{
+ u32 lstatus = be32_to_cpu(bdp->lstatus);
+
+ lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp->lstatus = cpu_to_be32(lstatus);
+}
+
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+ if (rxq->next_to_clean > rxq->next_to_use)
+ return rxq->next_to_clean - rxq->next_to_use - 1;
+
+ return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+ struct rxbd8 *bdp;
+ u32 bdp_dma;
+ int i;
+
+ i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+ bdp = &rxq->rx_bd_base[i];
+ bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+ bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+ return bdp_dma;
+}
+
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
+
+extern const struct ethtool_ops gfar_ethtool_ops;
+
+#define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX))
+
+#define RQFCR_PID_PRI_MASK 0xFFFFFFF8
+#define RQFCR_PID_L4P_MASK 0xFFFFFF00
+#define RQFCR_PID_VID_MASK 0xFFFFF000
+#define RQFCR_PID_PORT_MASK 0xFFFF0000
+#define RQFCR_PID_MAC_MASK 0xFF000000
+
+/* Represents a receive filer table entry */
+struct gfar_filer_entry {
+ u32 ctrl;
+ u32 prop;
+};
+
+
+/* The 20 additional entries are a shadow for one extra element */
+struct filer_table {
+ u32 index;
+ struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
+};
+
+#endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
new file mode 100644
index 000000000..b2b0d3c26
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -0,0 +1,1516 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/net/ethernet/freescale/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/sort.h>
+#include <linux/if_vlan.h>
+#include <linux/of_platform.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "gianfar.h"
+
+#define GFAR_MAX_COAL_USECS 0xffff
+#define GFAR_MAX_COAL_FRAMES 0xff
+
+static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+ /* extra stats */
+ "rx-allocation-errors",
+ "rx-large-frame-errors",
+ "rx-short-frame-errors",
+ "rx-non-octet-errors",
+ "rx-crc-errors",
+ "rx-overrun-errors",
+ "rx-busy-errors",
+ "rx-babbling-errors",
+ "rx-truncated-frames",
+ "ethernet-bus-error",
+ "tx-babbling-errors",
+ "tx-underrun-errors",
+ "tx-timeout-errors",
+ /* rmon stats */
+ "tx-rx-64-frames",
+ "tx-rx-65-127-frames",
+ "tx-rx-128-255-frames",
+ "tx-rx-256-511-frames",
+ "tx-rx-512-1023-frames",
+ "tx-rx-1024-1518-frames",
+ "tx-rx-1519-1522-good-vlan",
+ "rx-bytes",
+ "rx-packets",
+ "rx-fcs-errors",
+ "receive-multicast-packet",
+ "receive-broadcast-packet",
+ "rx-control-frame-packets",
+ "rx-pause-frame-packets",
+ "rx-unknown-op-code",
+ "rx-alignment-error",
+ "rx-frame-length-error",
+ "rx-code-error",
+ "rx-carrier-sense-error",
+ "rx-undersize-packets",
+ "rx-oversize-packets",
+ "rx-fragmented-frames",
+ "rx-jabber-frames",
+ "rx-dropped-frames",
+ "tx-byte-counter",
+ "tx-packets",
+ "tx-multicast-packets",
+ "tx-broadcast-packets",
+ "tx-pause-control-frames",
+ "tx-deferral-packets",
+ "tx-excessive-deferral-packets",
+ "tx-single-collision-packets",
+ "tx-multiple-collision-packets",
+ "tx-late-collision-packets",
+ "tx-excessive-collision-packets",
+ "tx-total-collision",
+ "reserved",
+ "tx-dropped-frames",
+ "tx-jabber-frames",
+ "tx-fcs-errors",
+ "tx-control-frames",
+ "tx-oversize-frames",
+ "tx-undersize-frames",
+ "tx-fragmented-frames",
+};
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+ else
+ memcpy(buf, stat_gstrings,
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = atomic64_read(&extra[i]);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
+
+ for (; i < GFAR_STATS_LEN; i++, rmon++)
+ buf[i] = (u64) gfar_read(rmon);
+ }
+}
+
+static int gfar_sset_count(struct net_device *dev, int sset)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ return GFAR_STATS_LEN;
+ else
+ return GFAR_EXTRA_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Fills in the drvinfo structure with some basic info */
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+}
+
+/* Return the length of the register structure */
+static int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = gfar_read(&theregs[i]);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
+ unsigned int usecs)
+{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return DIV_ROUND_UP(usecs * 1000, count);
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
+ unsigned int ticks)
+{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return (ticks * count) / 1000;
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ unsigned long rxtime;
+ unsigned long rxcount;
+ unsigned long txtime;
+ unsigned long txcount;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ rx_queue = priv->rx_queue[0];
+ tx_queue = priv->tx_queue[0];
+
+ rxtime = get_ictt_value(rx_queue->rxic);
+ rxcount = get_icft_value(rx_queue->rxic);
+ txtime = get_ictt_value(tx_queue->txic);
+ txcount = get_icft_value(tx_queue->txic);
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
+ cvals->rx_max_coalesced_frames = rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
+ cvals->tx_max_coalesced_frames = txcount;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i, err = 0;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ /* Check the bounds of the values */
+ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rxic = mk_ic_value(
+ cvals->rx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+ }
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 1;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->txic = mk_ic_value(
+ cvals->tx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+ }
+
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return err;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = rx_queue->rx_ring_size;
+ rvals->rx_mini_pending = rx_queue->rx_ring_size;
+ rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
+ rvals->tx_pending = tx_queue->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in motion.
+ */
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0, i;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+
+ /* Change the sizes */
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+
+ /* Rebuild the rings with the new size */
+ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return err;
+}
+
+static void gfar_gpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ epause->autoneg = !!priv->pause_aneg_en;
+ epause->rx_pause = !!priv->rx_pause_en;
+ epause->tx_pause = !!priv->tx_pause_en;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ if (!phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(phydev, epause))
+ return -EINVAL;
+
+ priv->rx_pause_en = priv->tx_pause_en = 0;
+ phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
+ if (epause->rx_pause) {
+ priv->rx_pause_en = 1;
+
+ if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ }
+ } else if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ }
+
+ if (epause->autoneg)
+ priv->pause_aneg_en = 1;
+ else
+ priv->pause_aneg_en = 0;
+
+ if (!epause->autoneg) {
+ u32 tempval = gfar_read(&regs->maccfg1);
+
+ tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+
+ priv->tx_actual_en = 0;
+ if (priv->tx_pause_en) {
+ priv->tx_actual_en = 1;
+ tempval |= MACCFG1_TX_FLOW;
+ }
+
+ if (priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ gfar_write(&regs->maccfg1, tempval);
+ }
+
+ return 0;
+}
+
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0;
+
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM)))
+ return 0;
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ dev->features = features;
+
+ if (dev->flags & IFF_UP) {
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return err;
+}
+
+static uint32_t gfar_get_msglevel(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ priv->msg_enable = data;
+}
+
+#ifdef CONFIG_PM
+static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->wol_supported & GFAR_WOL_MAGIC)
+ wol->supported |= WAKE_MAGIC;
+
+ if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
+ wol->supported |= WAKE_UCAST;
+
+ if (priv->wol_opts & GFAR_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
+ wol->wolopts |= WAKE_UCAST;
+}
+
+static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ u16 wol_opts = 0;
+ int err;
+
+ if (!priv->wol_supported && wol->wolopts)
+ return -EINVAL;
+
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ wol_opts |= GFAR_WOL_MAGIC;
+ } else {
+ if (wol->wolopts & WAKE_UCAST)
+ wol_opts |= GFAR_WOL_FILER_UCAST;
+ }
+
+ wol_opts &= priv->wol_supported;
+ priv->wol_opts = 0;
+
+ err = device_set_wakeup_enable(priv->dev, wol_opts);
+ if (err)
+ return err;
+
+ priv->wol_opts = wol_opts;
+
+ return 0;
+}
+#endif
+
+static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
+{
+ u32 fcr = 0x0, fpr = FPR_FILER_MASK;
+
+ if (ethflow & RXH_L2DA) {
+ fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+
+ fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_VLAN) {
+ fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_IP_SRC) {
+ fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & (RXH_IP_DST)) {
+ fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L3_PROTO) {
+ fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_0_1) {
+ fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_2_3) {
+ fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+}
+
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
+ u64 class)
+{
+ unsigned int cmp_rqfpr;
+ unsigned int *local_rqfpr;
+ unsigned int *local_rqfcr;
+ int i = 0x0, k = 0x0;
+ int j = MAX_FILER_IDX, l = 0x0;
+ int ret = 1;
+
+ local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!local_rqfpr || !local_rqfcr) {
+ ret = 0;
+ goto err;
+ }
+
+ switch (class) {
+ case TCP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
+ break;
+ case UDP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
+ break;
+ case TCP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
+ break;
+ case UDP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
+ break;
+ default:
+ netdev_err(priv->ndev,
+ "Right now this class is not supported\n");
+ ret = 0;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_FILER_IDX + 1; i++) {
+ local_rqfpr[j] = priv->ftp_rqfpr[i];
+ local_rqfcr[j] = priv->ftp_rqfcr[i];
+ j--;
+ if ((priv->ftp_rqfcr[i] ==
+ (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ break;
+ }
+
+ if (i == MAX_FILER_IDX + 1) {
+ netdev_err(priv->ndev,
+ "No parse rule found, can't create hash rules\n");
+ ret = 0;
+ goto err;
+ }
+
+ /* If a match was found, then it begins the starting of a cluster rule
+ * if it was already programmed, we need to overwrite these rules
+ */
+ for (l = i+1; l < MAX_FILER_IDX; l++) {
+ if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+ gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+ priv->ftp_rqfpr[l]);
+ break;
+ }
+
+ if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ (priv->ftp_rqfcr[l] & RQFCR_AND))
+ continue;
+ else {
+ local_rqfpr[j] = priv->ftp_rqfpr[l];
+ local_rqfcr[j] = priv->ftp_rqfcr[l];
+ j--;
+ }
+ }
+
+ priv->cur_filer_idx = l - 1;
+
+ /* hash rules */
+ ethflow_to_filer_rules(priv, ethflow);
+
+ /* Write back the popped out rules again */
+ for (k = j+1; k < MAX_FILER_IDX; k++) {
+ priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+ priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+ gfar_write_filer(priv, priv->cur_filer_idx,
+ local_rqfcr[k], local_rqfpr[k]);
+ if (!priv->cur_filer_idx)
+ break;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+err:
+ kfree(local_rqfcr);
+ kfree(local_rqfpr);
+ return ret;
+}
+
+static int gfar_set_hash_opts(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ /* write the filer rules here */
+ if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gfar_check_filer_hardware(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 i;
+
+ /* Check if we are in FIFO mode */
+ i = gfar_read(&regs->ecntrl);
+ i &= ECNTRL_FIFM;
+ if (i == ECNTRL_FIFM) {
+ netdev_notice(priv->ndev, "Interface in FIFO mode\n");
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
+ if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ /* Or in standard mode */
+ else {
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK;
+ if (i == RCTRL_PRSDEP_MASK) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Sets the properties for arbitrary filer rule
+ * to the first 4 Layer 4 Bytes
+ */
+ gfar_write(&regs->rbifx, 0xC0C1C2C3);
+ return 0;
+}
+
+/* Write a mask to filer cache */
+static void gfar_set_mask(u32 mask, struct filer_table *tab)
+{
+ tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ tab->fe[tab->index].prop = mask;
+ tab->index++;
+}
+
+/* Sets parse bits (e.g. IP or TCP) */
+static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
+ RQFCR_AND;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+/* For setting a tuple of value and mask of type flag
+ * Example:
+ * IP-Src = 10.0.0.0/255.0.0.0
+ * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
+ *
+ * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
+ * For a don't care mask it gives us a 0
+ *
+ * The check if don't care and the mask adjustment if mask=0 is done for VLAN
+ * and MAC stuff on an upper level (due to missing information on this level).
+ * For these guys we can discard them if they are value=0 and mask=0.
+ *
+ * Further the all masks are one-padded for better hardware efficiency.
+ */
+static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ switch (flag) {
+ /* 3bit */
+ case RQFCR_PID_PRI:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_PRI_MASK;
+ break;
+ /* 8bit */
+ case RQFCR_PID_L4P:
+ case RQFCR_PID_TOS:
+ if (!~(mask | RQFCR_PID_L4P_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_L4P_MASK;
+ break;
+ /* 12bit */
+ case RQFCR_PID_VID:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_VID_MASK;
+ break;
+ /* 16bit */
+ case RQFCR_PID_DPT:
+ case RQFCR_PID_SPT:
+ case RQFCR_PID_ETY:
+ if (!~(mask | RQFCR_PID_PORT_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_PORT_MASK;
+ break;
+ /* 24bit */
+ case RQFCR_PID_DAH:
+ case RQFCR_PID_DAL:
+ case RQFCR_PID_SAH:
+ case RQFCR_PID_SAL:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_MAC_MASK;
+ break;
+ /* for all real 32bit masks */
+ default:
+ if (!~mask)
+ return;
+ if (!mask)
+ mask = ~0;
+ break;
+ }
+ gfar_set_general_attribute(value, mask, flag, tab);
+}
+
+/* Translates value and mask for UDP, TCP or SCTP */
+static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
+ struct ethtool_tcpip4_spec *mask,
+ struct filer_table *tab)
+{
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be16_to_cpu(value->pdst),
+ be16_to_cpu(mask->pdst),
+ RQFCR_PID_DPT, tab);
+ gfar_set_attribute(be16_to_cpu(value->psrc),
+ be16_to_cpu(mask->psrc),
+ RQFCR_PID_SPT, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+}
+
+/* Translates value and mask for RAW-IP4 */
+static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
+ struct ethtool_usrip4_spec *mask,
+ struct filer_table *tab)
+{
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+ gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
+ gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
+ be32_to_cpu(mask->l4_4_bytes),
+ RQFCR_PID_ARB, tab);
+
+}
+
+/* Translates value and mask for ETHER spec */
+static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
+ struct filer_table *tab)
+{
+ u32 upper_temp_mask = 0;
+ u32 lower_temp_mask = 0;
+
+ /* Source address */
+ if (!is_broadcast_ether_addr(mask->h_source)) {
+ if (is_zero_ether_addr(mask->h_source)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_source[0] << 16 |
+ mask->h_source[1] << 8 |
+ mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16 |
+ mask->h_source[4] << 8 |
+ mask->h_source[5];
+ }
+ /* Upper 24bit */
+ gfar_set_attribute(value->h_source[0] << 16 |
+ value->h_source[1] << 8 |
+ value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(value->h_source[3] << 16 |
+ value->h_source[4] << 8 |
+ value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
+ }
+ /* Destination address */
+ if (!is_broadcast_ether_addr(mask->h_dest)) {
+ /* Special for destination is limited broadcast */
+ if ((is_broadcast_ether_addr(value->h_dest) &&
+ is_zero_ether_addr(mask->h_dest))) {
+ gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
+ } else {
+ if (is_zero_ether_addr(mask->h_dest)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_dest[0] << 16 |
+ mask->h_dest[1] << 8 |
+ mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16 |
+ mask->h_dest[4] << 8 |
+ mask->h_dest[5];
+ }
+
+ /* Upper 24bit */
+ gfar_set_attribute(value->h_dest[0] << 16 |
+ value->h_dest[1] << 8 |
+ value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(value->h_dest[3] << 16 |
+ value->h_dest[4] << 8 |
+ value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
+ }
+ }
+
+ gfar_set_attribute(be16_to_cpu(value->h_proto),
+ be16_to_cpu(mask->h_proto),
+ RQFCR_PID_ETY, tab);
+}
+
+static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+/* Convert a rule to binary filter format of gianfar */
+static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
+ struct filer_table *tab)
+{
+ u32 vlan = 0, vlan_mask = 0;
+ u32 id = 0, id_mask = 0;
+ u32 cfi = 0, cfi_mask = 0;
+ u32 prio = 0, prio_mask = 0;
+ u32 old_index = tab->index;
+
+ /* Check if vlan is wanted */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
+ if (!rule->m_ext.vlan_tci)
+ rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
+
+ vlan = RQFPR_VLN;
+ vlan_mask = RQFPR_VLN;
+
+ /* Separate the fields */
+ id = vlan_tci_vid(rule);
+ id_mask = vlan_tci_vidm(rule);
+ cfi = vlan_tci_cfi(rule);
+ cfi_mask = vlan_tci_cfim(rule);
+ prio = vlan_tci_prio(rule);
+ prio_mask = vlan_tci_priom(rule);
+
+ if (cfi_mask) {
+ if (cfi)
+ vlan |= RQFPR_CFI;
+ vlan_mask |= RQFPR_CFI;
+ }
+ }
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
+ &rule->m_u.tcp_ip4_spec, tab);
+ break;
+ case UDP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
+ &rule->m_u.udp_ip4_spec, tab);
+ break;
+ case SCTP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
+ (struct ethtool_tcpip4_spec *)&rule->m_u,
+ tab);
+ break;
+ case IP_USER_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
+ (struct ethtool_usrip4_spec *) &rule->m_u,
+ tab);
+ break;
+ case ETHER_FLOW:
+ if (vlan)
+ gfar_set_parse_bits(vlan, vlan_mask, tab);
+ gfar_set_ether((struct ethhdr *) &rule->h_u,
+ (struct ethhdr *) &rule->m_u, tab);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Set the vlan attributes in the end */
+ if (vlan) {
+ gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
+ gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
+ }
+
+ /* If there has been nothing written till now, it must be a default */
+ if (tab->index == old_index) {
+ gfar_set_mask(0xFFFFFFFF, tab);
+ tab->fe[tab->index].ctrl = 0x20;
+ tab->fe[tab->index].prop = 0x0;
+ tab->index++;
+ }
+
+ /* Remove last AND */
+ tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
+
+ /* Specify which queue to use or to drop */
+ if (rule->ring_cookie == RX_CLS_FLOW_DISC)
+ tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
+ else
+ tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
+
+ /* Only big enough entries can be clustered */
+ if (tab->index > (old_index + 2)) {
+ tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
+ tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
+ }
+
+ /* In rare cases the cache can be full while there is
+ * free space in hw
+ */
+ if (tab->index > MAX_FILER_CACHE_IDX - 1)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Write the bit-pattern from software's buffer to hardware registers */
+static int gfar_write_filer_table(struct gfar_private *priv,
+ struct filer_table *tab)
+{
+ u32 i = 0;
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
+ gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
+ /* Fill the rest with fall-troughs */
+ for (; i < MAX_FILER_IDX; i++)
+ gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
+ /* Last entry must be default accept
+ * because that's what people expect
+ */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+ return 0;
+}
+
+static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
+ struct gfar_private *priv)
+{
+
+ if (flow->flow_type & FLOW_EXT) {
+ if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
+ netdev_warn(priv->ndev,
+ "User-specific data not supported!\n");
+ if (~flow->m_ext.vlan_etype)
+ netdev_warn(priv->ndev,
+ "VLAN-etype not supported!\n");
+ }
+ if (flow->flow_type == IP_USER_FLOW)
+ if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ netdev_warn(priv->ndev,
+ "IP-Version differing from IPv4 not supported!\n");
+
+ return 0;
+}
+
+static int gfar_process_filer_changes(struct gfar_private *priv)
+{
+ struct ethtool_flow_spec_container *j;
+ struct filer_table *tab;
+ s32 ret = 0;
+
+ /* So index is set to zero, too! */
+ tab = kzalloc(sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOMEM;
+
+ /* Now convert the existing filer data from flow_spec into
+ * filer tables binary format
+ */
+ list_for_each_entry(j, &priv->rx_list.list, list) {
+ ret = gfar_convert_to_filer(&j->fs, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev,
+ "Rule not added: No free space!\n");
+ goto end;
+ }
+ if (ret == -1) {
+ netdev_err(priv->ndev,
+ "Rule not added: Unsupported Flow-type!\n");
+ goto end;
+ }
+ }
+
+ /* Write everything to hardware */
+ ret = gfar_write_filer_table(priv, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+
+end:
+ kfree(tab);
+ return ret;
+}
+
+static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ u32 i = 0;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xFF;
+
+ flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
+}
+
+static int gfar_add_cls(struct gfar_private *priv,
+ struct ethtool_rx_flow_spec *flow)
+{
+ struct ethtool_flow_spec_container *temp, *comp;
+ int ret = 0;
+
+ temp = kmalloc(sizeof(*temp), GFP_KERNEL);
+ if (temp == NULL)
+ return -ENOMEM;
+ memcpy(&temp->fs, flow, sizeof(temp->fs));
+
+ gfar_invert_masks(&temp->fs);
+ ret = gfar_check_capability(&temp->fs, priv);
+ if (ret)
+ goto clean_mem;
+ /* Link in the new element at the right @location */
+ if (list_empty(&priv->rx_list.list)) {
+ ret = gfar_check_filer_hardware(priv);
+ if (ret != 0)
+ goto clean_mem;
+ list_add(&temp->list, &priv->rx_list.list);
+ goto process;
+ } else {
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location > flow->location) {
+ list_add_tail(&temp->list, &comp->list);
+ goto process;
+ }
+ if (comp->fs.location == flow->location) {
+ netdev_err(priv->ndev,
+ "Rule not added: ID %d not free!\n",
+ flow->location);
+ ret = -EBUSY;
+ goto clean_mem;
+ }
+ }
+ list_add_tail(&temp->list, &priv->rx_list.list);
+ }
+
+process:
+ priv->rx_list.count++;
+ ret = gfar_process_filer_changes(priv);
+ if (ret)
+ goto clean_list;
+ return ret;
+
+clean_list:
+ priv->rx_list.count--;
+ list_del(&temp->list);
+clean_mem:
+ kfree(temp);
+ return ret;
+}
+
+static int gfar_del_cls(struct gfar_private *priv, u32 loc)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ if (list_empty(&priv->rx_list.list))
+ return ret;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == loc) {
+ list_del(&comp->list);
+ kfree(comp);
+ priv->rx_list.count--;
+ gfar_process_filer_changes(priv);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
+ gfar_invert_masks(&cmd->fs);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls_all(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 i = 0;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (i == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[i] = comp->fs.location;
+ i++;
+ }
+
+ cmd->data = MAX_FILER_IDX;
+ cmd->rule_cnt = i;
+
+ return 0;
+}
+
+static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = gfar_set_hash_opts(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+ cmd->fs.location >= MAX_FILER_IDX) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gfar_add_cls(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gfar_del_cls(priv, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
+}
+
+static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->num_rx_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = priv->rx_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gfar_get_cls(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gfar_get_cls_all(priv, cmd, rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int gfar_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct platform_device *ptp_dev;
+ struct device_node *ptp_node;
+ struct ptp_qoriq *ptp = NULL;
+
+ info->phc_index = -1;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+
+ ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
+ if (ptp_node) {
+ ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+ }
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+const struct ethtool_ops gfar_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_pauseparam = gfar_gpauseparam,
+ .set_pauseparam = gfar_spauseparam,
+ .get_strings = gfar_gstrings,
+ .get_sset_count = gfar_sset_count,
+ .get_ethtool_stats = gfar_fill_stats,
+ .get_msglevel = gfar_get_msglevel,
+ .set_msglevel = gfar_set_msglevel,
+#ifdef CONFIG_PM
+ .get_wol = gfar_get_wol,
+ .set_wol = gfar_set_wol,
+#endif
+ .set_rxnfc = gfar_set_nfc,
+ .get_rxnfc = gfar_get_nfc,
+ .get_ts_info = gfar_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
new file mode 100644
index 000000000..7a4cb4f07
--- /dev/null
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -0,0 +1,3813 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Gigabit Ethernet Driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/workqueue.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+#include <asm/machdep.h>
+
+#include "ucc_geth.h"
+
+#undef DEBUG
+
+#define ugeth_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugeth_dbg(format, arg...) \
+ ugeth_printk(KERN_DEBUG , format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugeth_vdbg ugeth_dbg
+#else
+#define ugeth_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
+
+
+static DEFINE_SPINLOCK(ugeth_lock);
+
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
+
+static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx)
+{
+ static const u8 count[] = {
+ [UCC_GETH_NUM_OF_THREADS_1] = 1,
+ [UCC_GETH_NUM_OF_THREADS_2] = 2,
+ [UCC_GETH_NUM_OF_THREADS_4] = 4,
+ [UCC_GETH_NUM_OF_THREADS_6] = 6,
+ [UCC_GETH_NUM_OF_THREADS_8] = 8,
+ };
+ if (idx >= ARRAY_SIZE(count))
+ return 0;
+ return count[idx];
+}
+
+static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static const struct ucc_geth_info ugeth_primary_info = {
+ .uf_info = {
+ .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
+ .max_rx_buf_length = 1536,
+ /* adjusted at startup if max-speed 1000 */
+ .urfs = UCC_GETH_URFS_INIT,
+ .urfet = UCC_GETH_URFET_INIT,
+ .urfset = UCC_GETH_URFSET_INIT,
+ .utfs = UCC_GETH_UTFS_INIT,
+ .utfet = UCC_GETH_UTFET_INIT,
+ .utftt = UCC_GETH_UTFTT_INIT,
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+ .extendedFilteringChainPointer = ((uint32_t) NULL),
+ .typeorlen = 3072 /*1536 */ ,
+ .nonBackToBackIfgPart1 = 0x40,
+ .nonBackToBackIfgPart2 = 0x60,
+ .miminumInterFrameGapEnforcement = 0x50,
+ .backToBackInterFrameGap = 0x60,
+ .mblinterval = 128,
+ .nortsrbytetime = 5,
+ .fracsiz = 1,
+ .strictpriorityq = 0xff,
+ .altBebTruncation = 0xa,
+ .excessDefer = 1,
+ .maxRetransmission = 0xf,
+ .collisionWindow = 0x37,
+ .receiveFlowControl = 1,
+ .transmitFlowControl = 1,
+ .maxGroupAddrInHash = 4,
+ .maxIndAddrInHash = 4,
+ .prel = 7,
+ .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
+ .minFrameLength = 64,
+ .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
+ .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
+ .vlantype = 0x8100,
+ .ecamptr = ((uint32_t) NULL),
+ .eventRegMask = UCCE_OTHER,
+ .pausePeriod = 0xf000,
+ .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
+ .bdRingLenTx = {
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN},
+
+ .bdRingLenRx = {
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN},
+
+ .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
+ .largestexternallookupkeysize =
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
+ .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
+ .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
+ .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
+ .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
+ .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
+ .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
+ .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
+ .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+};
+
+#ifdef DEBUG
+static void mem_disp(u8 *addr, int size)
+{
+ u8 *i;
+ int size16Aling = (size >> 4) << 4;
+ int size4Aling = (size >> 2) << 2;
+ int notAlign = 0;
+ if (size % 16)
+ notAlign = 1;
+
+ for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
+ printk("0x%08x: %08x %08x %08x %08x\r\n",
+ (u32) i,
+ *((u32 *) (i)),
+ *((u32 *) (i + 4)),
+ *((u32 *) (i + 8)), *((u32 *) (i + 12)));
+ if (notAlign == 1)
+ printk("0x%08x: ", (u32) i);
+ for (; (u32) i < (u32) addr + size4Aling; i += 4)
+ printk("%08x ", *((u32 *) (i)));
+ for (; (u32) i < (u32) addr + size; i++)
+ printk("%02x", *((i)));
+ if (notAlign == 1)
+ printk("\r\n");
+}
+#endif /* DEBUG */
+
+static struct list_head *dequeue(struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ugeth_lock, flags);
+ if (!list_empty(lh)) {
+ struct list_head *node = lh->next;
+ list_del(node);
+ spin_unlock_irqrestore(&ugeth_lock, flags);
+ return node;
+ } else {
+ spin_unlock_irqrestore(&ugeth_lock, flags);
+ return NULL;
+ }
+}
+
+static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
+ u8 __iomem *bd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(ugeth->ndev,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+ if (!skb)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ 1)));
+
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
+ dma_map_single(ugeth->dev,
+ skb->data,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE));
+
+ out_be32((u32 __iomem *)bd,
+ (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
+
+ return skb;
+}
+
+static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
+{
+ u8 __iomem *bd;
+ u32 bd_status;
+ struct sk_buff *skb;
+ int i;
+
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ i = 0;
+
+ do {
+ bd_status = in_be32((u32 __iomem *)bd);
+ skb = get_new_skb(ugeth, bd);
+
+ if (!skb) /* If can not allocate data buffer,
+ abort. Cleanup will be elsewhere */
+ return -ENOMEM;
+
+ ugeth->rx_skbuff[rxQ][i] = skb;
+
+ /* advance the BD pointer */
+ bd += sizeof(struct qe_bd);
+ i++;
+ } while (!(bd_status & R_W));
+
+ return 0;
+}
+
+static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ u32 thread_alignment,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not get SNUM\n");
+ return snum;
+ }
+ if ((i == 0) && skip_page_for_first_entry)
+ /* First entry of Rx does not have page */
+ init_enet_offset = 0;
+ else {
+ init_enet_offset =
+ qe_muram_alloc(thread_size, thread_alignment);
+ if (IS_ERR_VALUE(init_enet_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory\n");
+ qe_put_snum((u8) snum);
+ return -ENOMEM;
+ }
+ }
+ *(p_start++) =
+ ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
+ | risc;
+ }
+
+ return 0;
+}
+
+static int return_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 *p_start,
+ u8 num_entries,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ u32 val = *p_start;
+
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (val & ENET_INIT_PARAM_PTR_MASK);
+ qe_muram_free(init_enet_offset);
+ }
+ *p_start++ = 0;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 __iomem *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ u32 val = in_be32(p_start);
+
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ pr_info("Init enet entry %d:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)qe_muram_addr(init_enet_offset));
+ mem_disp(qe_muram_addr(init_enet_offset),
+ thread_size);
+ }
+ p_start++;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
+{
+ kfree(enet_addr_cont);
+}
+
+static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
+{
+ out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
+ out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
+ out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
+}
+
+static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+
+ if (paddr_num >= NUM_OF_PADDRS) {
+ pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Writing address ff.ff.ff.ff.ff.ff disables address
+ recognition for this register */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
+
+ return 0;
+}
+
+static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
+ u8 *p_enet_addr)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ u32 cecr_subblock;
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Ethernet frames are defined in Little Endian mode,
+ therefore to insert */
+ /* the address to the hash (Big Endian mode), we reverse the bytes.*/
+
+ set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
+
+ qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+}
+
+#ifdef DEBUG
+static void get_statistics(struct ucc_geth_private *ugeth,
+ struct ucc_geth_tx_firmware_statistics *
+ tx_firmware_statistics,
+ struct ucc_geth_rx_firmware_statistics *
+ rx_firmware_statistics,
+ struct ucc_geth_hardware_statistics *hardware_statistics)
+{
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
+ struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = (struct ucc_fast __iomem *) ug_regs;
+ p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
+ p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
+
+ /* Tx firmware only if user handed pointer and driver actually
+ gathers Tx firmware statistics */
+ if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
+ tx_firmware_statistics->sicoltx =
+ in_be32(&p_tx_fw_statistics_pram->sicoltx);
+ tx_firmware_statistics->mulcoltx =
+ in_be32(&p_tx_fw_statistics_pram->mulcoltx);
+ tx_firmware_statistics->latecoltxfr =
+ in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
+ tx_firmware_statistics->frabortduecol =
+ in_be32(&p_tx_fw_statistics_pram->frabortduecol);
+ tx_firmware_statistics->frlostinmactxer =
+ in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
+ tx_firmware_statistics->carriersenseertx =
+ in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
+ tx_firmware_statistics->frtxok =
+ in_be32(&p_tx_fw_statistics_pram->frtxok);
+ tx_firmware_statistics->txfrexcessivedefer =
+ in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
+ tx_firmware_statistics->txpkts256 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts256);
+ tx_firmware_statistics->txpkts512 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts512);
+ tx_firmware_statistics->txpkts1024 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts1024);
+ tx_firmware_statistics->txpktsjumbo =
+ in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
+ }
+
+ /* Rx firmware only if user handed pointer and driver actually
+ * gathers Rx firmware statistics */
+ if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
+ int i;
+ rx_firmware_statistics->frrxfcser =
+ in_be32(&p_rx_fw_statistics_pram->frrxfcser);
+ rx_firmware_statistics->fraligner =
+ in_be32(&p_rx_fw_statistics_pram->fraligner);
+ rx_firmware_statistics->inrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
+ rx_firmware_statistics->outrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
+ rx_firmware_statistics->frtoolong =
+ in_be32(&p_rx_fw_statistics_pram->frtoolong);
+ rx_firmware_statistics->runt =
+ in_be32(&p_rx_fw_statistics_pram->runt);
+ rx_firmware_statistics->verylongevent =
+ in_be32(&p_rx_fw_statistics_pram->verylongevent);
+ rx_firmware_statistics->symbolerror =
+ in_be32(&p_rx_fw_statistics_pram->symbolerror);
+ rx_firmware_statistics->dropbsy =
+ in_be32(&p_rx_fw_statistics_pram->dropbsy);
+ for (i = 0; i < 0x8; i++)
+ rx_firmware_statistics->res0[i] =
+ p_rx_fw_statistics_pram->res0[i];
+ rx_firmware_statistics->mismatchdrop =
+ in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
+ rx_firmware_statistics->underpkts =
+ in_be32(&p_rx_fw_statistics_pram->underpkts);
+ rx_firmware_statistics->pkts256 =
+ in_be32(&p_rx_fw_statistics_pram->pkts256);
+ rx_firmware_statistics->pkts512 =
+ in_be32(&p_rx_fw_statistics_pram->pkts512);
+ rx_firmware_statistics->pkts1024 =
+ in_be32(&p_rx_fw_statistics_pram->pkts1024);
+ rx_firmware_statistics->pktsjumbo =
+ in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
+ rx_firmware_statistics->frlossinmacer =
+ in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
+ rx_firmware_statistics->pausefr =
+ in_be32(&p_rx_fw_statistics_pram->pausefr);
+ for (i = 0; i < 0x4; i++)
+ rx_firmware_statistics->res1[i] =
+ p_rx_fw_statistics_pram->res1[i];
+ rx_firmware_statistics->removevlan =
+ in_be32(&p_rx_fw_statistics_pram->removevlan);
+ rx_firmware_statistics->replacevlan =
+ in_be32(&p_rx_fw_statistics_pram->replacevlan);
+ rx_firmware_statistics->insertvlan =
+ in_be32(&p_rx_fw_statistics_pram->insertvlan);
+ }
+
+ /* Hardware only if user handed pointer and driver actually
+ gathers hardware statistics */
+ if (hardware_statistics &&
+ (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
+ hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
+ hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
+ hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
+ hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
+ hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
+ hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
+ hardware_statistics->txok = in_be32(&ug_regs->txok);
+ hardware_statistics->txcf = in_be16(&ug_regs->txcf);
+ hardware_statistics->tmca = in_be32(&ug_regs->tmca);
+ hardware_statistics->tbca = in_be32(&ug_regs->tbca);
+ hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
+ hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
+ hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
+ hardware_statistics->rmca = in_be32(&ug_regs->rmca);
+ hardware_statistics->rbca = in_be32(&ug_regs->rbca);
+ }
+}
+
+static void dump_bds(struct ucc_geth_private *ugeth)
+{
+ int i;
+ int length;
+
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
+ if (ugeth->p_tx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenTx[i] *
+ sizeof(struct qe_bd));
+ pr_info("TX BDs[%d]\n", i);
+ mem_disp(ugeth->p_tx_bd_ring[i], length);
+ }
+ }
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenRx[i] *
+ sizeof(struct qe_bd));
+ pr_info("RX BDs[%d]\n", i);
+ mem_disp(ugeth->p_rx_bd_ring[i], length);
+ }
+ }
+}
+
+static void dump_regs(struct ucc_geth_private *ugeth)
+{
+ int i;
+
+ pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
+
+ pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->maccfg1,
+ in_be32(&ugeth->ug_regs->maccfg1));
+ pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->maccfg2,
+ in_be32(&ugeth->ug_regs->maccfg2));
+ pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ipgifg,
+ in_be32(&ugeth->ug_regs->ipgifg));
+ pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->hafdup,
+ in_be32(&ugeth->ug_regs->hafdup));
+ pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ifctl,
+ in_be32(&ugeth->ug_regs->ifctl));
+ pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ifstat,
+ in_be32(&ugeth->ug_regs->ifstat));
+ pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->macstnaddr1,
+ in_be32(&ugeth->ug_regs->macstnaddr1));
+ pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->macstnaddr2,
+ in_be32(&ugeth->ug_regs->macstnaddr2));
+ pr_info("uempr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->uempr,
+ in_be32(&ugeth->ug_regs->uempr));
+ pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->utbipar,
+ in_be32(&ugeth->ug_regs->utbipar));
+ pr_info("uescr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->ug_regs->uescr,
+ in_be16(&ugeth->ug_regs->uescr));
+ pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx64,
+ in_be32(&ugeth->ug_regs->tx64));
+ pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx127,
+ in_be32(&ugeth->ug_regs->tx127));
+ pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx255,
+ in_be32(&ugeth->ug_regs->tx255));
+ pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx64,
+ in_be32(&ugeth->ug_regs->rx64));
+ pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx127,
+ in_be32(&ugeth->ug_regs->rx127));
+ pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx255,
+ in_be32(&ugeth->ug_regs->rx255));
+ pr_info("txok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->txok,
+ in_be32(&ugeth->ug_regs->txok));
+ pr_info("txcf : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->ug_regs->txcf,
+ in_be16(&ugeth->ug_regs->txcf));
+ pr_info("tmca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tmca,
+ in_be32(&ugeth->ug_regs->tmca));
+ pr_info("tbca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tbca,
+ in_be32(&ugeth->ug_regs->tbca));
+ pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rxfok,
+ in_be32(&ugeth->ug_regs->rxfok));
+ pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rxbok,
+ in_be32(&ugeth->ug_regs->rxbok));
+ pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rbyt,
+ in_be32(&ugeth->ug_regs->rbyt));
+ pr_info("rmca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rmca,
+ in_be32(&ugeth->ug_regs->rmca));
+ pr_info("rbca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rbca,
+ in_be32(&ugeth->ug_regs->rbca));
+ pr_info("scar : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->scar,
+ in_be32(&ugeth->ug_regs->scar));
+ pr_info("scam : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->scam,
+ in_be32(&ugeth->ug_regs->scam));
+
+ if (ugeth->p_thread_data_tx) {
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx);
+
+ pr_info("Thread data TXs:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_thread_data_tx);
+ for (i = 0; i < count; i++) {
+ pr_info("Thread data TX[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_thread_data_tx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
+ sizeof(struct ucc_geth_thread_data_tx));
+ }
+ }
+ if (ugeth->p_thread_data_rx) {
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx);
+
+ pr_info("Thread data RX:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_thread_data_rx);
+ for (i = 0; i < count; i++) {
+ pr_info("Thread data RX[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_thread_data_rx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
+ sizeof(struct ucc_geth_thread_data_rx));
+ }
+ }
+ if (ugeth->p_exf_glbl_param) {
+ pr_info("EXF global param:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_exf_glbl_param);
+ mem_disp((u8 *) ugeth->p_exf_glbl_param,
+ sizeof(*ugeth->p_exf_glbl_param));
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ pr_info("TX global param:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
+ pr_info("temoder : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_tx_glbl_pram->temoder,
+ in_be16(&ugeth->p_tx_glbl_pram->temoder));
+ pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->sqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->sqptr));
+ pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
+ pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
+ pr_info("tstate : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->tstate,
+ in_be32(&ugeth->p_tx_glbl_pram->tstate));
+ pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
+ ugeth->p_tx_glbl_pram->iphoffset[0]);
+ pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
+ ugeth->p_tx_glbl_pram->iphoffset[1]);
+ pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
+ ugeth->p_tx_glbl_pram->iphoffset[2]);
+ pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
+ ugeth->p_tx_glbl_pram->iphoffset[3]);
+ pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
+ ugeth->p_tx_glbl_pram->iphoffset[4]);
+ pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
+ ugeth->p_tx_glbl_pram->iphoffset[5]);
+ pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
+ ugeth->p_tx_glbl_pram->iphoffset[6]);
+ pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
+ ugeth->p_tx_glbl_pram->iphoffset[7]);
+ pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
+ pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
+ pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
+ pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
+ pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
+ pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
+ pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
+ pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
+ pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->tqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->tqptr));
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ pr_info("RX global param:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
+ pr_info("remoder : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->remoder,
+ in_be32(&ugeth->p_rx_glbl_pram->remoder));
+ pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rqptr));
+ pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->typeorlen,
+ in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
+ pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rxgstpack,
+ ugeth->p_rx_glbl_pram->rxgstpack);
+ pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
+ pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
+ pr_info("rstate : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rstate,
+ ugeth->p_rx_glbl_pram->rstate);
+ pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->mrblr,
+ in_be16(&ugeth->p_rx_glbl_pram->mrblr));
+ pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rbdqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
+ pr_info("mflr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->mflr,
+ in_be16(&ugeth->p_rx_glbl_pram->mflr));
+ pr_info("minflr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->minflr,
+ in_be16(&ugeth->p_rx_glbl_pram->minflr));
+ pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->maxd1,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd1));
+ pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->maxd2,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd2));
+ pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->ecamptr,
+ in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
+ pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l2qt,
+ in_be32(&ugeth->p_rx_glbl_pram->l2qt));
+ pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[0],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
+ pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[1],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
+ pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[2],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
+ pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[3],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
+ pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[4],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
+ pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[5],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
+ pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[6],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
+ pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[7],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
+ pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->vlantype,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantype));
+ pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->vlantci,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantci));
+ for (i = 0; i < 64; i++)
+ pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n",
+ i,
+ (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
+ ugeth->p_rx_glbl_pram->addressfiltering[i]);
+ pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ pr_info("Send Q memory registers:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
+ pr_info("SQQD[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
+ mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
+ sizeof(struct ucc_geth_send_queue_qd));
+ }
+ }
+ if (ugeth->p_scheduler) {
+ pr_info("Scheduler:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
+ mem_disp((u8 *) ugeth->p_scheduler,
+ sizeof(*ugeth->p_scheduler));
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ pr_info("TX FW statistics pram:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_tx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
+ sizeof(*ugeth->p_tx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ pr_info("RX FW statistics pram:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_rx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
+ sizeof(*ugeth->p_rx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ pr_info("RX IRQ coalescing tables:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_rx_irq_coalescing_tbl);
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
+ pr_info("RX IRQ coalescing table entry[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i]);
+ pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingmaxvalue,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingmaxvalue));
+ pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingcounter,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingcounter));
+ }
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ pr_info("RX BD QS tables:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
+ pr_info("RX BD QS table[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i]);
+ pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
+ pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
+ pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].
+ externalbdbaseptr));
+ pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
+ pr_info("ucode RX Prefetched BDs:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)));
+ mem_disp((u8 *)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)),
+ sizeof(struct ucc_geth_rx_prefetched_bds));
+ }
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ int size;
+ pr_info("Init enet param shadow:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32) ugeth->p_init_enet_param_shadow);
+ mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
+ sizeof(*ugeth->p_init_enet_param_shadow));
+
+ size = sizeof(struct ucc_geth_thread_rx_pram);
+ if (ugeth->ug_info->rxExtendedFiltering) {
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ sizeof(struct ucc_geth_thread_tx_pram),
+ ugeth->ug_info->riscTx, 0);
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
+ ugeth->ug_info->riscRx, 1);
+ }
+}
+#endif /* DEBUG */
+
+static void init_default_reg_vals(u32 __iomem *upsmr_register,
+ u32 __iomem *maccfg1_register,
+ u32 __iomem *maccfg2_register)
+{
+ out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
+ out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
+ out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
+}
+
+static int init_half_duplex_params(int alt_beb,
+ int back_pressure_no_backoff,
+ int no_backoff,
+ int excess_defer,
+ u8 alt_beb_truncation,
+ u8 max_retransmissions,
+ u8 collision_window,
+ u32 __iomem *hafdup_register)
+{
+ u32 value = 0;
+
+ if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
+ (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
+ (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
+ return -EINVAL;
+
+ value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
+
+ if (alt_beb)
+ value |= HALFDUP_ALT_BEB;
+ if (back_pressure_no_backoff)
+ value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
+ if (no_backoff)
+ value |= HALFDUP_NO_BACKOFF;
+ if (excess_defer)
+ value |= HALFDUP_EXCESSIVE_DEFER;
+
+ value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
+
+ value |= collision_window;
+
+ out_be32(hafdup_register, value);
+ return 0;
+}
+
+static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
+ u8 non_btb_ipg,
+ u8 min_ifg,
+ u8 btb_ipg,
+ u32 __iomem *ipgifg_register)
+{
+ u32 value = 0;
+
+ /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
+ IPG part 2 */
+ if (non_btb_cs_ipg > non_btb_ipg)
+ return -EINVAL;
+
+ if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
+ (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
+ /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
+ (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
+ return -EINVAL;
+
+ value |=
+ ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
+ IPGIFG_NBTB_CS_IPG_MASK);
+ value |=
+ ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
+ IPGIFG_NBTB_IPG_MASK);
+ value |=
+ ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
+ IPGIFG_MIN_IFG_MASK);
+ value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
+
+ out_be32(ipgifg_register, value);
+ return 0;
+}
+
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable,
+ int tx_flow_control_enable,
+ u16 pause_period,
+ u16 extension_field,
+ u32 __iomem *upsmr_register,
+ u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register)
+{
+ u32 value = 0;
+
+ /* Set UEMPR register */
+ value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
+ value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
+ out_be32(uempr_register, value);
+
+ /* Set UPSMR register */
+ setbits32(upsmr_register, automatic_flow_control_mode);
+
+ value = in_be32(maccfg1_register);
+ if (rx_flow_control_enable)
+ value |= MACCFG1_FLOW_RX;
+ if (tx_flow_control_enable)
+ value |= MACCFG1_FLOW_TX;
+ out_be32(maccfg1_register, value);
+
+ return 0;
+}
+
+static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
+ int auto_zero_hardware_statistics,
+ u32 __iomem *upsmr_register,
+ u16 __iomem *uescr_register)
+{
+ u16 uescr_value = 0;
+
+ /* Enable hardware statistics gathering if requested */
+ if (enable_hardware_statistics)
+ setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
+
+ /* Clear hardware statistics counters */
+ uescr_value = in_be16(uescr_register);
+ uescr_value |= UESCR_CLRCNT;
+ /* Automatically zero hardware statistics counters on read,
+ if requested */
+ if (auto_zero_hardware_statistics)
+ uescr_value |= UESCR_AUTOZ;
+ out_be16(uescr_register, uescr_value);
+
+ return 0;
+}
+
+static int init_firmware_statistics_gathering_mode(int
+ enable_tx_firmware_statistics,
+ int enable_rx_firmware_statistics,
+ u32 __iomem *tx_rmon_base_ptr,
+ u32 tx_firmware_statistics_structure_address,
+ u32 __iomem *rx_rmon_base_ptr,
+ u32 rx_firmware_statistics_structure_address,
+ u16 __iomem *temoder_register,
+ u32 __iomem *remoder_register)
+{
+ /* Note: this function does not check if */
+ /* the parameters it receives are NULL */
+
+ if (enable_tx_firmware_statistics) {
+ out_be32(tx_rmon_base_ptr,
+ tx_firmware_statistics_structure_address);
+ setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
+ }
+
+ if (enable_rx_firmware_statistics) {
+ out_be32(rx_rmon_base_ptr,
+ rx_firmware_statistics_structure_address);
+ setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
+ }
+
+ return 0;
+}
+
+static int init_mac_station_addr_regs(u8 address_byte_0,
+ u8 address_byte_1,
+ u8 address_byte_2,
+ u8 address_byte_3,
+ u8 address_byte_4,
+ u8 address_byte_5,
+ u32 __iomem *macstnaddr1_register,
+ u32 __iomem *macstnaddr2_register)
+{
+ u32 value = 0;
+
+ /* Example: for a station address of 0x12345678ABCD, */
+ /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
+
+ /* MACSTNADDR1 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 5 station address byte 4 */
+ /* 16 23 24 31 */
+ /* station address byte 3 station address byte 2 */
+ value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
+ value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
+ value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr1_register, value);
+
+ /* MACSTNADDR2 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 1 station address byte 0 */
+ /* 16 23 24 31 */
+ /* reserved reserved */
+ value = 0;
+ value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr2_register, value);
+
+ return 0;
+}
+
+static int init_check_frame_length_mode(int length_check,
+ u32 __iomem *maccfg2_register)
+{
+ u32 value = 0;
+
+ value = in_be32(maccfg2_register);
+
+ if (length_check)
+ value |= MACCFG2_LC;
+ else
+ value &= ~MACCFG2_LC;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_preamble_length(u8 preamble_length,
+ u32 __iomem *maccfg2_register)
+{
+ if ((preamble_length < 3) || (preamble_length > 7))
+ return -EINVAL;
+
+ clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
+ preamble_length << MACCFG2_PREL_SHIFT);
+
+ return 0;
+}
+
+static int init_rx_parameters(int reject_broadcast,
+ int receive_short_frames,
+ int promiscuous, u32 __iomem *upsmr_register)
+{
+ u32 value = 0;
+
+ value = in_be32(upsmr_register);
+
+ if (reject_broadcast)
+ value |= UCC_GETH_UPSMR_BRO;
+ else
+ value &= ~UCC_GETH_UPSMR_BRO;
+
+ if (receive_short_frames)
+ value |= UCC_GETH_UPSMR_RSH;
+ else
+ value &= ~UCC_GETH_UPSMR_RSH;
+
+ if (promiscuous)
+ value |= UCC_GETH_UPSMR_PRO;
+ else
+ value &= ~UCC_GETH_UPSMR_PRO;
+
+ out_be32(upsmr_register, value);
+
+ return 0;
+}
+
+static int init_max_rx_buff_len(u16 max_rx_buf_len,
+ u16 __iomem *mrblr_register)
+{
+ /* max_rx_buf_len value must be a multiple of 128 */
+ if ((max_rx_buf_len == 0) ||
+ (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
+ return -EINVAL;
+
+ out_be16(mrblr_register, max_rx_buf_len);
+ return 0;
+}
+
+static int init_min_frame_len(u16 min_frame_length,
+ u16 __iomem *minflr_register,
+ u16 __iomem *mrblr_register)
+{
+ u16 mrblr_value = 0;
+
+ mrblr_value = in_be16(mrblr_register);
+ if (min_frame_length >= (mrblr_value - 4))
+ return -EINVAL;
+
+ out_be16(minflr_register, min_frame_length);
+ return 0;
+}
+
+static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ int ret_val;
+ u32 upsmr, maccfg2;
+ u16 value;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ ug_info = ugeth->ug_info;
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ /* Set MACCFG2 */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ if ((ugeth->max_speed == SPEED_10) ||
+ (ugeth->max_speed == SPEED_100))
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (ugeth->max_speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ maccfg2 |= ug_info->padAndCrc;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+
+ /* Set UPSMR */
+ upsmr = in_be32(&uf_regs->upsmr);
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+ switch (ugeth->max_speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
+ }
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ upsmr |= UCC_GETH_UPSMR_TBIM;
+ }
+ if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
+
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ /* Disable autonegotiation in tbi mode, because by default it
+ comes up in autonegotiation mode. */
+ /* Note that this depends on proper setting in utbipar register. */
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
+
+ ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Preamble length must be between 3 and 7 inclusive\n");
+ return ret_val;
+ }
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+ u32 temp;
+ int i = 10;
+
+ uccf = ugeth->uccf;
+
+ /* Mask GRACEFUL STOP TX interrupt bit and clear it */
+ clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
+ out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
+
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+
+ /* Wait for command to complete */
+ do {
+ msleep(10);
+ temp = in_be32(uccf->p_ucce);
+ } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
+
+ uccf->stopped_tx = 1;
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+ u8 temp;
+ int i = 10;
+
+ uccf = ugeth->uccf;
+
+ /* Clear acknowledge bit */
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
+ temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+ out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
+
+ /* Keep issuing command and checking acknowledge bit until
+ it is asserted, according to spec */
+ do {
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
+ ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+ msleep(10);
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
+ } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
+
+ uccf->stopped_rx = 1;
+
+ return 0;
+}
+
+static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
+ uccf->stopped_tx = 0;
+
+ return 0;
+}
+
+static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_rx = 0;
+
+ return 0;
+}
+
+static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+{
+ struct ucc_fast_private *uccf;
+ int enabled_tx, enabled_rx;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ if (netif_msg_probe(ugeth))
+ pr_err("ucc_num out of range\n");
+ return -EINVAL;
+ }
+
+ enabled_tx = uccf->enabled_tx;
+ enabled_rx = uccf->enabled_rx;
+
+ /* Get Tx and Rx going again, in case this channel was actively
+ disabled. */
+ if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
+ ugeth_restart_tx(ugeth);
+ if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
+ ugeth_restart_rx(ugeth);
+
+ ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
+
+ return 0;
+
+}
+
+static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+{
+ struct ucc_fast_private *uccf;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ if (netif_msg_probe(ugeth))
+ pr_err("ucc_num out of range\n");
+ return -EINVAL;
+ }
+
+ /* Stop any transmissions */
+ if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
+ ugeth_graceful_stop_tx(ugeth);
+
+ /* Stop any receptions */
+ if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
+ ugeth_graceful_stop_rx(ugeth);
+
+ ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
+
+ return 0;
+}
+
+static void ugeth_quiesce(struct ucc_geth_private *ugeth)
+{
+ /* Prevent any further xmits */
+ netif_tx_stop_all_queues(ugeth->ndev);
+
+ /* Disable the interrupt to avoid NAPI rescheduling. */
+ disable_irq(ugeth->ug_info->uf_info.irq);
+
+ /* Stop NAPI, and possibly wait for its completion. */
+ napi_disable(&ugeth->napi);
+}
+
+static void ugeth_activate(struct ucc_geth_private *ugeth)
+{
+ napi_enable(&ugeth->napi);
+ enable_irq(ugeth->ug_info->uf_info.irq);
+
+ /* allow to xmit again */
+ netif_tx_wake_all_queues(ugeth->ndev);
+ __netdev_watchdog_up(ugeth->ndev);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the ugeth structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+
+static void adjust_link(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ struct phy_device *phydev = ugeth->phydev;
+ int new_state = 0;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (phydev->link) {
+ u32 tempval = in_be32(&ug_regs->maccfg2);
+ u32 upsmr = in_be32(&uf_regs->upsmr);
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != ugeth->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FDX);
+ else
+ tempval |= MACCFG2_FDX;
+ ugeth->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != ugeth->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case SPEED_1000:
+ tempval = ((tempval &
+ ~(MACCFG2_INTERFACE_MODE_MASK)) |
+ MACCFG2_INTERFACE_MODE_BYTE);
+ break;
+ case SPEED_100:
+ case SPEED_10:
+ tempval = ((tempval &
+ ~(MACCFG2_INTERFACE_MODE_MASK)) |
+ MACCFG2_INTERFACE_MODE_NIBBLE);
+ /* if reduced mode, re-set UPSMR.R10M */
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ if (phydev->speed == SPEED_10)
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ else
+ upsmr &= ~UCC_GETH_UPSMR_R10M;
+ }
+ break;
+ default:
+ if (netif_msg_link(ugeth))
+ pr_warn(
+ "%s: Ack! Speed (%d) is not 10/100/1000!",
+ dev->name, phydev->speed);
+ break;
+ }
+ ugeth->oldspeed = phydev->speed;
+ }
+
+ if (!ugeth->oldlink) {
+ new_state = 1;
+ ugeth->oldlink = 1;
+ }
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+ } else if (ugeth->oldlink) {
+ new_state = 1;
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(ugeth))
+ phy_print_status(phydev);
+}
+
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the UTBIPA register. We assume
+ * that the UTBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void uec_configure_serdes(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node) {
+ dev_warn(&dev->dev, "SGMII mode requires that the device "
+ "tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /*
+ * If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
+ put_device(&tbiphy->mdio.dev);
+ return;
+ }
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+
+ phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+
+ put_device(&tbiphy->mdio.dev);
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct ucc_geth_private *priv = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = priv->ug_info;
+ struct phy_device *phydev;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(dev);
+
+ phy_set_max_speed(phydev, priv->max_speed);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
+{
+#ifdef DEBUG
+ ucc_fast_dump_regs(ugeth->uccf);
+ dump_regs(ugeth);
+ dump_bds(ugeth);
+#endif
+}
+
+static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
+ ugeth,
+ enum enet_addr_type
+ enet_addr_type)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_fast_private *uccf;
+ enum comm_dir comm_dir;
+ struct list_head *p_lh;
+ u16 i, num;
+ u32 __iomem *addr_h;
+ u32 __iomem *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *)
+ ugeth->p_rx_glbl_pram->addressfiltering;
+
+ if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ } else
+ return -EINVAL;
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ if (!p_lh)
+ return 0;
+
+ num = *p_counter;
+
+ /* Delete all remaining CQ elements */
+ for (i = 0; i < num; i++)
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
+
+ *p_counter = 0;
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+
+static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
+ u8 paddr_num)
+{
+ ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
+ return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
+}
+
+static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ u16 i, j;
+ u8 __iomem *bd;
+
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ /* Return existing data buffers in ring */
+ bd = ugeth->p_rx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
+ if (ugeth->rx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ ugeth->ug_info->
+ uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(
+ ugeth->rx_skbuff[i][j]);
+ ugeth->rx_skbuff[i][j] = NULL;
+ }
+ bd += sizeof(struct qe_bd);
+ }
+
+ kfree(ugeth->rx_skbuff[i]);
+
+ kfree(ugeth->p_rx_bd_ring[i]);
+ ugeth->p_rx_bd_ring[i] = NULL;
+ }
+ }
+
+}
+
+static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ netdev_reset_queue(ugeth->ndev);
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
+ bd = ugeth->p_tx_bd_ring[i];
+ if (!bd)
+ continue;
+ for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
+ if (ugeth->tx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ (in_be32((u32 __iomem *)bd) &
+ BD_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
+ ugeth->tx_skbuff[i][j] = NULL;
+ }
+ }
+
+ kfree(ugeth->tx_skbuff[i]);
+
+ kfree(ugeth->p_tx_bd_ring[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
+ }
+
+}
+
+static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
+{
+ if (!ugeth)
+ return;
+
+ if (ugeth->uccf) {
+ ucc_fast_free(ugeth->uccf);
+ ugeth->uccf = NULL;
+ }
+
+ qe_muram_free_addr(ugeth->p_thread_data_tx);
+ ugeth->p_thread_data_tx = NULL;
+
+ qe_muram_free_addr(ugeth->p_thread_data_rx);
+ ugeth->p_thread_data_rx = NULL;
+
+ qe_muram_free_addr(ugeth->p_exf_glbl_param);
+ ugeth->p_exf_glbl_param = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_glbl_pram);
+ ugeth->p_rx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_glbl_pram);
+ ugeth->p_tx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_send_q_mem_reg);
+ ugeth->p_send_q_mem_reg = NULL;
+
+ qe_muram_free_addr(ugeth->p_scheduler);
+ ugeth->p_scheduler = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_fw_statistics_pram);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_fw_statistics_pram);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_irq_coalescing_tbl);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_bd_qs_tbl);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+
+ if (ugeth->p_init_enet_param_shadow) {
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX,
+ ugeth->ug_info->riscRx, 1);
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ ugeth->ug_info->riscTx, 0);
+ kfree(ugeth->p_init_enet_param_shadow);
+ ugeth->p_init_enet_param_shadow = NULL;
+ }
+ ucc_geth_free_tx(ugeth);
+ ucc_geth_free_rx(ugeth);
+ while (!list_empty(&ugeth->group_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->group_hash_q)));
+ while (!list_empty(&ugeth->ind_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->ind_hash_q)));
+ if (ugeth->ug_regs) {
+ iounmap(ugeth->ug_regs);
+ ugeth->ug_regs = NULL;
+ }
+}
+
+static void ucc_geth_set_multi(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth;
+ struct netdev_hw_addr *ha;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+
+ ugeth = netdev_priv(dev);
+
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (dev->flags & IFF_PROMISC) {
+ setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
+ } else {
+ clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
+ } else {
+ /* Clear filter and add the addresses in the list.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ /* Ask CPM to run CRC and set bit in
+ * filter mask.
+ */
+ hw_add_addr_in_hash(ugeth, ha->addr);
+ }
+ }
+ }
+}
+
+static void ucc_geth_stop(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct phy_device *phydev = ugeth->phydev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
+ /* Disable the controller */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ /* Mask all interrupts */
+ out_be32(ugeth->uccf->p_uccm, 0x00000000);
+
+ /* Clear all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0xffffffff);
+
+ /* Disable Rx and Tx */
+ clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+
+ ucc_geth_memclean(ugeth);
+}
+
+static int ucc_struct_init(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int i;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Rx BD lengths */
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
+ (ug_info->bdRingLenRx[i] %
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Tx BD lengths */
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
+ if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Tx BD ring length must be no smaller than 2\n");
+ return -EINVAL;
+ }
+ }
+
+ /* mrblr */
+ if ((uf_info->max_rx_buf_length == 0) ||
+ (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("max_rx_buf_length must be non-zero multiple of 128\n");
+ return -EINVAL;
+ }
+
+ /* num Tx queues */
+ if (ucc_geth_tx_queues(ug_info) > NUM_TX_QUEUES) {
+ if (netif_msg_probe(ugeth))
+ pr_err("number of tx queues too large\n");
+ return -EINVAL;
+ }
+
+ /* num Rx queues */
+ if (ucc_geth_rx_queues(ug_info) > NUM_RX_QUEUES) {
+ if (netif_msg_probe(ugeth))
+ pr_err("number of rx queues too large\n");
+ return -EINVAL;
+ }
+
+ /* l2qt */
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
+ if (ug_info->l2qt[i] >= ucc_geth_rx_queues(ug_info)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
+ return -EINVAL;
+ }
+ }
+
+ /* l3qt */
+ for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
+ if (ug_info->l3qt[i] >= ucc_geth_rx_queues(ug_info)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("IP priority table entry must not be larger than number of Rx queues\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ug_info->cam && !ug_info->ecamptr) {
+ if (netif_msg_probe(ugeth))
+ pr_err("If cam mode is chosen, must supply cam ptr\n");
+ return -EINVAL;
+ }
+
+ if ((ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
+ ug_info->rxExtendedFiltering) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n");
+ return -EINVAL;
+ }
+
+ /* Generate uccm_mask for receive */
+ uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
+ uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
+
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
+ uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
+ /* Initialize the general fast UCC block. */
+ if (ucc_fast_init(uf_info, &ugeth->uccf)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Failed to init uccf\n");
+ return -ENOMEM;
+ }
+
+ /* read the number of risc engines, update the riscTx and riscRx
+ * if there are 4 riscs in QE
+ */
+ if (qe_get_num_of_risc() == 4) {
+ ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ }
+
+ ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
+ if (!ugeth->ug_regs) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Failed to ioremap regs\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int length;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Allocate Tx bds */
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
+ u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
+ u32 alloc;
+
+ length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
+
+ if (!ugeth->p_tx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate memory for Tx bd rings\n");
+ return -ENOMEM;
+ }
+ /* Zero unused end of bd ring, according to spec */
+ memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
+ }
+
+ /* Init Tx bds */
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
+ /* Setup the skbuff rings */
+ ugeth->tx_skbuff[j] =
+ kcalloc(ugeth->ug_info->bdRingLenTx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
+
+ if (ugeth->tx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Could not allocate tx_skbuff\n");
+ return -ENOMEM;
+ }
+
+ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
+ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
+ }
+
+ return 0;
+}
+
+static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int length;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Allocate Rx bds */
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
+ u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ u32 alloc;
+
+ length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
+ if (!ugeth->p_rx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate memory for Rx bd rings\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Init Rx bds */
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
+ /* Setup the skbuff rings */
+ ugeth->rx_skbuff[j] =
+ kcalloc(ugeth->ug_info->bdRingLenRx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
+
+ if (ugeth->rx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Could not allocate rx_skbuff\n");
+ return -ENOMEM;
+ }
+
+ ugeth->skb_currx[j] = 0;
+ bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_I);
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
+ }
+
+ return 0;
+}
+
+static int ucc_geth_startup(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_geth_init_pram __iomem *p_init_enet_pram;
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ int ret_val = -EINVAL;
+ u32 remoder = UCC_GETH_REMODER_INIT;
+ u32 init_enet_pram_offset, cecr_subblock, command;
+ u32 ifstat, i, j, size, l2qt, l3qt;
+ u16 temoder = UCC_GETH_TEMODER_INIT;
+ u8 function_code = 0;
+ u8 __iomem *endOfRing;
+ u8 numThreadsRxNumerical, numThreadsTxNumerical;
+ s32 rx_glbl_pram_offset, tx_glbl_pram_offset;
+
+ ugeth_vdbg("%s: IN", __func__);
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+ uf_regs = uccf->uf_regs;
+ ug_regs = ugeth->ug_regs;
+
+ numThreadsRxNumerical = ucc_geth_thread_count(ug_info->numThreadsRx);
+ if (!numThreadsRxNumerical) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Bad number of Rx threads value\n");
+ return -EINVAL;
+ }
+
+ numThreadsTxNumerical = ucc_geth_thread_count(ug_info->numThreadsTx);
+ if (!numThreadsTxNumerical) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Bad number of Tx threads value\n");
+ return -EINVAL;
+ }
+
+ /* Calculate rx_extended_features */
+ ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
+ ug_info->ipAddressAlignment ||
+ (ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
+
+ ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
+ (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
+ (ug_info->vlanOperationNonTagged !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
+
+ init_default_reg_vals(&uf_regs->upsmr,
+ &ug_regs->maccfg1, &ug_regs->maccfg2);
+
+ /* Set UPSMR */
+ /* For more details see the hardware spec. */
+ init_rx_parameters(ug_info->bro,
+ ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
+
+ /* We're going to ignore other registers for now, */
+ /* except as needed to get up and running */
+
+ /* Set MACCFG1 */
+ /* For more details see the hardware spec. */
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &uf_regs->upsmr,
+ &ug_regs->uempr, &ug_regs->maccfg1);
+
+ setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+
+ /* Set IPGIFG */
+ /* For more details see the hardware spec. */
+ ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
+ ug_info->nonBackToBackIfgPart2,
+ ug_info->
+ miminumInterFrameGapEnforcement,
+ ug_info->backToBackInterFrameGap,
+ &ug_regs->ipgifg);
+ if (ret_val != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("IPGIFG initialization parameter too large\n");
+ return ret_val;
+ }
+
+ /* Set HAFDUP */
+ /* For more details see the hardware spec. */
+ ret_val = init_half_duplex_params(ug_info->altBeb,
+ ug_info->backPressureNoBackoff,
+ ug_info->noBackoff,
+ ug_info->excessDefer,
+ ug_info->altBebTruncation,
+ ug_info->maxRetransmission,
+ ug_info->collisionWindow,
+ &ug_regs->hafdup);
+ if (ret_val != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Half Duplex initialization parameter too large\n");
+ return ret_val;
+ }
+
+ /* Set IFSTAT */
+ /* For more details see the hardware spec. */
+ /* Read only - resets upon read */
+ ifstat = in_be32(&ug_regs->ifstat);
+
+ /* Clear UEMPR */
+ /* For more details see the hardware spec. */
+ out_be32(&ug_regs->uempr, 0);
+
+ /* Set UESCR */
+ /* For more details see the hardware spec. */
+ init_hw_statistics_gathering_mode((ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
+ 0, &uf_regs->upsmr, &ug_regs->uescr);
+
+ ret_val = ucc_geth_alloc_tx(ugeth);
+ if (ret_val != 0)
+ return ret_val;
+
+ ret_val = ucc_geth_alloc_rx(ugeth);
+ if (ret_val != 0)
+ return ret_val;
+
+ /*
+ * Global PRAM
+ */
+ /* Tx global PRAM */
+ /* Allocate global tx parameter RAM page */
+ tx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
+ UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
+ if (tx_glbl_pram_offset < 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_tx_glbl_pram = qe_muram_addr(tx_glbl_pram_offset);
+ /* Fill global PRAM */
+
+ /* TQPTR */
+ /* Size varies with number of Tx threads */
+ ugeth->thread_dat_tx_offset =
+ qe_muram_alloc(numThreadsTxNumerical *
+ sizeof(struct ucc_geth_thread_data_tx) +
+ 32 * (numThreadsTxNumerical == 1),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_tx =
+ (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
+ thread_dat_tx_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
+
+ /* vtagtable */
+ for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
+ out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
+ ug_info->vtagtable[i]);
+
+ /* iphoffset */
+ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
+ out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
+ ug_info->iphoffset[i]);
+
+ /* SQPTR */
+ /* Size varies with number of Tx queues */
+ ugeth->send_q_mem_reg_offset =
+ qe_muram_alloc(ucc_geth_tx_queues(ug_info) *
+ sizeof(struct ucc_geth_send_queue_qd),
+ UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_send_q_mem_reg =
+ (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
+ send_q_mem_reg_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
+ endOfRing =
+ ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
+ 1) * sizeof(struct qe_bd);
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
+ }
+
+ /* schedulerbasepointer */
+
+ if (ucc_geth_tx_queues(ug_info) > 1) {
+ /* scheduler exists only if more than 1 tx queue */
+ ugeth->scheduler_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
+ UCC_GETH_SCHEDULER_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_scheduler\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_scheduler =
+ (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
+ scheduler_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ ugeth->scheduler_offset);
+
+ /* Set values in scheduler */
+ out_be32(&ugeth->p_scheduler->mblinterval,
+ ug_info->mblinterval);
+ out_be16(&ugeth->p_scheduler->nortsrbytetime,
+ ug_info->nortsrbytetime);
+ out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
+ out_8(&ugeth->p_scheduler->strictpriorityq,
+ ug_info->strictpriorityq);
+ out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
+ out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ out_8(&ugeth->p_scheduler->weightfactor[i],
+ ug_info->weightfactor[i]);
+
+ /* Set pointers to cpucount registers in scheduler */
+ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
+ ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
+ ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
+ ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
+ ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
+ ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
+ ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
+ ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
+ }
+
+ /* schedulerbasepointer */
+ /* TxRMON_PTR (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ ugeth->tx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (struct ucc_geth_tx_firmware_statistics_pram),
+ UCC_GETH_TX_STATISTICS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_tx_fw_statistics_pram =
+ (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
+ qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
+ }
+
+ /* temoder */
+ /* Already has speed set */
+
+ if (ucc_geth_tx_queues(ug_info) > 1)
+ temoder |= TEMODER_SCHEDULER_ENABLE;
+ if (ug_info->ipCheckSumGenerate)
+ temoder |= TEMODER_IP_CHECKSUM_GENERATE;
+ temoder |= ((ucc_geth_tx_queues(ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
+
+ /* Function code register value to be used later */
+ function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
+ /* Required for QE */
+
+ /* function code register */
+ out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
+
+ /* Rx global PRAM */
+ /* Allocate global rx parameter RAM page */
+ rx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
+ UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
+ if (rx_glbl_pram_offset < 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_rx_glbl_pram = qe_muram_addr(rx_glbl_pram_offset);
+ /* Fill global PRAM */
+
+ /* RQPTR */
+ /* Size varies with number of Rx threads */
+ ugeth->thread_dat_rx_offset =
+ qe_muram_alloc(numThreadsRxNumerical *
+ sizeof(struct ucc_geth_thread_data_rx),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_rx =
+ (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
+ thread_dat_rx_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
+
+ /* typeorlen */
+ out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
+
+ /* rxrmonbaseptr (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ ugeth->rx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (struct ucc_geth_rx_firmware_statistics_pram),
+ UCC_GETH_RX_STATISTICS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_rx_fw_statistics_pram =
+ (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
+ qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
+ }
+
+ /* intCoalescingPtr */
+
+ /* Size varies with number of Rx queues */
+ ugeth->rx_irq_coalescing_tbl_offset =
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
+ sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_irq_coalescing_tbl =
+ (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
+ qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ ugeth->rx_irq_coalescing_tbl_offset);
+
+ /* Fill interrupt coalescing table */
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingmaxvalue,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingcounter,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ }
+
+ /* MRBLR */
+ init_max_rx_buff_len(uf_info->max_rx_buf_length,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MFLR */
+ out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
+ /* MINFLR */
+ init_min_frame_len(ug_info->minFrameLength,
+ &ugeth->p_rx_glbl_pram->minflr,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MAXD1 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
+ /* MAXD2 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
+
+ /* l2qt */
+ l2qt = 0;
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
+ l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
+
+ /* l3qt */
+ for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
+ l3qt = 0;
+ for (i = 0; i < 8; i++)
+ l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
+ }
+
+ /* vlantype */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
+
+ /* vlantci */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
+
+ /* ecamptr */
+ out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
+
+ /* RBDQPTR */
+ /* Size varies with number of Rx queues */
+ ugeth->rx_bd_qs_tbl_offset =
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
+ (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)),
+ UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_bd_qs_tbl =
+ (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
+ rx_bd_qs_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
+ /* rest of fields handled by QE */
+ }
+
+ /* remoder */
+ /* Already has speed set */
+
+ if (ugeth->rx_extended_features)
+ remoder |= REMODER_RX_EXTENDED_FEATURES;
+ if (ug_info->rxExtendedFiltering)
+ remoder |= REMODER_RX_EXTENDED_FILTERING;
+ if (ug_info->dynamicMaxFrameLength)
+ remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
+ if (ug_info->dynamicMinFrameLength)
+ remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
+ remoder |=
+ ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
+ remoder |=
+ ug_info->
+ vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
+ remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
+ remoder |= ((ucc_geth_rx_queues(ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ if (ug_info->ipCheckSumCheck)
+ remoder |= REMODER_IP_CHECKSUM_CHECK;
+ if (ug_info->ipAddressAlignment)
+ remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
+ out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
+
+ /* Note that this function must be called */
+ /* ONLY AFTER p_tx_fw_statistics_pram */
+ /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
+ init_firmware_statistics_gathering_mode((ug_info->
+ statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
+ (ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
+ &ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ ugeth->tx_fw_statistics_pram_offset,
+ &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ ugeth->rx_fw_statistics_pram_offset,
+ &ugeth->p_tx_glbl_pram->temoder,
+ &ugeth->p_rx_glbl_pram->remoder);
+
+ /* function code register */
+ out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
+
+ /* initialize extended filtering */
+ if (ug_info->rxExtendedFiltering) {
+ if (!ug_info->extendedFilteringChainPointer) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Null Extended Filtering Chain Pointer\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for extended filtering Mode Global
+ Parameters */
+ ugeth->exf_glbl_param_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
+ UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_exf_glbl_param =
+ (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
+ exf_glbl_param_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ ugeth->exf_glbl_param_offset);
+ out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
+ (u32) ug_info->extendedFilteringChainPointer);
+
+ } else { /* initialize 82xx style address filtering */
+
+ /* Init individual address recognition registers to disabled */
+
+ for (j = 0; j < NUM_OF_PADDRS; j++)
+ ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_GROUP);
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_INDIVIDUAL);
+ }
+
+ /*
+ * Initialize UCC at QE level
+ */
+
+ command = QE_INIT_TX_RX;
+
+ /* Allocate shadow InitEnet command parameter structure.
+ * This is needed because after the InitEnet command is executed,
+ * the structure in DPRAM is released, because DPRAM is a premium
+ * resource.
+ * This shadow structure keeps a copy of what was done so that the
+ * allocated resources can be released when the channel is freed.
+ */
+ if (!(ugeth->p_init_enet_param_shadow =
+ kzalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
+ return -ENOMEM;
+ }
+
+ /* Fill shadow InitEnet command parameter structure */
+
+ ugeth->p_init_enet_param_shadow->resinit1 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT1;
+ ugeth->p_init_enet_param_shadow->resinit2 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT2;
+ ugeth->p_init_enet_param_shadow->resinit3 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT3;
+ ugeth->p_init_enet_param_shadow->resinit4 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT4;
+ ugeth->p_init_enet_param_shadow->resinit5 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT5;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
+
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ rx_glbl_pram_offset | ug_info->riscRx;
+ if ((ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
+ (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
+ (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Invalid largest External Lookup Key Size\n");
+ return -EINVAL;
+ }
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
+ ug_info->largestexternallookupkeysize;
+ size = sizeof(struct ucc_geth_thread_rx_pram);
+ if (ug_info->rxExtendedFiltering) {
+ size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
+ p_init_enet_param_shadow->rxthread[0]),
+ (u8) (numThreadsRxNumerical + 1)
+ /* Rx needs one extra for terminator */
+ , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
+ ug_info->riscRx, 1)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not fill p_init_enet_param_shadow\n");
+ return ret_val;
+ }
+
+ ugeth->p_init_enet_param_shadow->txglobal =
+ tx_glbl_pram_offset | ug_info->riscTx;
+ if ((ret_val =
+ fill_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]), numThreadsTxNumerical,
+ sizeof(struct ucc_geth_thread_tx_pram),
+ UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
+ ug_info->riscTx, 0)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not fill p_init_enet_param_shadow\n");
+ return ret_val;
+ }
+
+ /* Load Rx bds with buffers */
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not fill Rx bds with buffers\n");
+ return ret_val;
+ }
+ }
+
+ /* Allocate InitEnet command parameter structure */
+ init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
+ if (IS_ERR_VALUE(init_enet_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n");
+ return -ENOMEM;
+ }
+ p_init_enet_pram =
+ (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
+
+ /* Copy shadow InitEnet command parameter structure into PRAM */
+ out_8(&p_init_enet_pram->resinit1,
+ ugeth->p_init_enet_param_shadow->resinit1);
+ out_8(&p_init_enet_pram->resinit2,
+ ugeth->p_init_enet_param_shadow->resinit2);
+ out_8(&p_init_enet_pram->resinit3,
+ ugeth->p_init_enet_param_shadow->resinit3);
+ out_8(&p_init_enet_pram->resinit4,
+ ugeth->p_init_enet_param_shadow->resinit4);
+ out_be16(&p_init_enet_pram->resinit5,
+ ugeth->p_init_enet_param_shadow->resinit5);
+ out_8(&p_init_enet_pram->largestexternallookupkeysize,
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
+ out_be32(&p_init_enet_pram->rgftgfrxglobal,
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
+ out_be32(&p_init_enet_pram->rxthread[i],
+ ugeth->p_init_enet_param_shadow->rxthread[i]);
+ out_be32(&p_init_enet_pram->txglobal,
+ ugeth->p_init_enet_param_shadow->txglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
+ out_be32(&p_init_enet_pram->txthread[i],
+ ugeth->p_init_enet_param_shadow->txthread[i]);
+
+ /* Issue QE command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
+ init_enet_pram_offset);
+
+ /* Free InitEnet command parameter */
+ qe_muram_free(init_enet_pram_offset);
+
+ return 0;
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static netdev_tx_t
+ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ struct ucc_fast_private *uccf;
+#endif
+ u8 __iomem *bd; /* BD pointer */
+ u32 bd_status;
+ u8 txQ = 0;
+ unsigned long flags;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ netdev_sent_queue(dev, skb->len);
+ spin_lock_irqsave(&ugeth->lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Start from the next BD that should be filled */
+ bd = ugeth->txBd[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+ /* Save the skb pointer so we can free it later */
+ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ ugeth->skb_curtx[txQ] =
+ (ugeth->skb_curtx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* set up the buffer descriptor */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
+ dma_map_single(ugeth->dev, skb->data,
+ skb->len, DMA_TO_DEVICE));
+
+ /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
+
+ bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
+
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, bd_status);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W))
+ bd += sizeof(struct qe_bd);
+ else
+ bd = ugeth->p_tx_bd_ring[txQ];
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (bd == ugeth->confBd[txQ]) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ ugeth->txBd[txQ] = bd;
+
+ skb_tx_timestamp(skb);
+
+ if (ugeth->p_scheduler) {
+ ugeth->cpucount[txQ]++;
+ /* Indicate to QE that there are more Tx bds ready for
+ transmission */
+ /* This is done by writing a running counter of the bd
+ count to the scheduler PRAM. */
+ out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
+ }
+
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf = ugeth->uccf;
+ out_be16(uccf->p_utodr, UCC_FAST_TOD);
+#endif
+ spin_unlock_irqrestore(&ugeth->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
+{
+ struct sk_buff *skb;
+ u8 __iomem *bd;
+ u16 length, howmany = 0;
+ u32 bd_status;
+ u8 *bdBuffer;
+ struct net_device *dev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ dev = ugeth->ndev;
+
+ /* collect received buffers */
+ bd = ugeth->rxBd[rxQ];
+
+ bd_status = in_be32((u32 __iomem *)bd);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
+ bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
+ length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
+ skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
+
+ /* determine whether buffer is first, last, first and last
+ (single buffer frame) or middle (not first and not last) */
+ if (!skb ||
+ (!(bd_status & (R_F | R_L))) ||
+ (bd_status & R_ERRORS_FATAL)) {
+ if (netif_msg_rx_err(ugeth))
+ pr_err("%d: ERROR!!! skb - 0x%08x\n",
+ __LINE__, (u32)skb);
+ dev_kfree_skb(skb);
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
+ dev->stats.rx_dropped++;
+ } else {
+ dev->stats.rx_packets++;
+ howmany++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, ugeth->ndev);
+
+ dev->stats.rx_bytes += length;
+ /* Send the packet up the stack */
+ netif_receive_skb(skb);
+ }
+
+ skb = get_new_skb(ugeth, bd);
+ if (!skb) {
+ if (netif_msg_rx_err(ugeth))
+ pr_warn("No Rx Data Buffer\n");
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
+
+ /* update to point at the next skb */
+ ugeth->skb_currx[rxQ] =
+ (ugeth->skb_currx[rxQ] +
+ 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
+
+ if (bd_status & R_W)
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ else
+ bd += sizeof(struct qe_bd);
+
+ bd_status = in_be32((u32 __iomem *)bd);
+ }
+
+ ugeth->rxBd[rxQ] = bd;
+ return howmany;
+}
+
+static int ucc_geth_tx(struct net_device *dev, u8 txQ)
+{
+ /* Start from the next BD that should be filled */
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
+ u8 __iomem *bd; /* BD pointer */
+ u32 bd_status;
+
+ bd = ugeth->confBd[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+
+ /* Normal processing. */
+ while ((bd_status & T_R) == 0) {
+ struct sk_buff *skb;
+
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
+ break;
+ howmany++;
+ bytes_sent += skb->len;
+ dev->stats.tx_packets++;
+
+ dev_consume_skb_any(skb);
+
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+ (ugeth->skb_dirtytx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W))
+ bd += sizeof(struct qe_bd);
+ else
+ bd = ugeth->p_tx_bd_ring[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+ }
+ ugeth->confBd[txQ] = bd;
+ netdev_completed_queue(dev, howmany, bytes_sent);
+ return 0;
+}
+
+static int ucc_geth_poll(struct napi_struct *napi, int budget)
+{
+ struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
+ struct ucc_geth_info *ug_info;
+ int howmany, i;
+
+ ug_info = ugeth->ug_info;
+
+ /* Tx event processing */
+ spin_lock(&ugeth->lock);
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
+ ucc_geth_tx(ugeth->ndev, i);
+ spin_unlock(&ugeth->lock);
+
+ howmany = 0;
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
+ howmany += ucc_geth_rx(ugeth, i, budget - howmany);
+
+ if (howmany < budget) {
+ napi_complete_done(napi, howmany);
+ setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ }
+
+ return howmany;
+}
+
+static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
+{
+ struct net_device *dev = info;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ register u32 ucce;
+ register u32 uccm;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+
+ /* read and clear events */
+ ucce = (u32) in_be32(uccf->p_ucce);
+ uccm = (u32) in_be32(uccf->p_uccm);
+ ucce &= uccm;
+ out_be32(uccf->p_ucce, ucce);
+
+ /* check for receive events that require processing */
+ if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
+ if (napi_schedule_prep(&ugeth->napi)) {
+ uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ out_be32(uccf->p_uccm, uccm);
+ __napi_schedule(&ugeth->napi);
+ }
+ }
+
+ /* Errors and other events */
+ if (ucce & UCCE_OTHER) {
+ if (ucce & UCC_GETH_UCCE_BSY)
+ dev->stats.rx_errors++;
+ if (ucce & UCC_GETH_UCCE_TXE)
+ dev->stats.tx_errors++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ucc_netpoll(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int irq = ugeth->ug_info->uf_info.irq;
+
+ disable_irq(irq);
+ ucc_geth_irq_handler(irq, dev);
+ enable_irq(irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+
+ /*
+ * If device is not running, we will set mac addr register
+ * when opening the device.
+ */
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irq(&ugeth->lock);
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+ spin_unlock_irq(&ugeth->lock);
+
+ return 0;
+}
+
+static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
+{
+ struct net_device *dev = ugeth->ndev;
+ int err;
+
+ err = ucc_struct_init(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
+ goto err;
+ }
+
+ err = ucc_geth_startup(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
+ goto err;
+ }
+
+ err = adjust_enet_interface(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
+ goto err;
+ }
+
+ /* Set MACSTNADDR1, MACSTNADDR2 */
+ /* For more details see the hardware spec. */
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+
+ err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ ucc_geth_stop(ugeth);
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int ucc_geth_open(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int err;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ /* Test station address */
+ if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
+ netif_err(ugeth, ifup, dev,
+ "Multicast address used for station address - is this what you wanted?\n");
+ return -EINVAL;
+ }
+
+ err = init_phy(dev);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
+ return err;
+ }
+
+ err = ucc_geth_init_mac(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
+ goto err;
+ }
+
+ err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
+ 0, "UCC Geth", dev);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
+ goto err;
+ }
+
+ phy_start(ugeth->phydev);
+ napi_enable(&ugeth->napi);
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+
+ device_set_wakeup_capable(&dev->dev,
+ qe_alive_during_sleep() || ugeth->phydev->irq);
+ device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
+
+ return err;
+
+err:
+ ucc_geth_stop(ugeth);
+ return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int ucc_geth_close(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ napi_disable(&ugeth->napi);
+
+ cancel_work_sync(&ugeth->timeout_work);
+ ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
+
+ free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
+
+ netif_stop_queue(dev);
+ netdev_reset_queue(dev);
+
+ return 0;
+}
+
+/* Reopen device. This will reset the MAC and PHY. */
+static void ucc_geth_timeout_work(struct work_struct *work)
+{
+ struct ucc_geth_private *ugeth;
+ struct net_device *dev;
+
+ ugeth = container_of(work, struct ucc_geth_private, timeout_work);
+ dev = ugeth->ndev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ dev->stats.tx_errors++;
+
+ ugeth_dump_regs(ugeth);
+
+ if (dev->flags & IFF_UP) {
+ /*
+ * Must reset MAC *and* PHY. This is done by reopening
+ * the device.
+ */
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
+ }
+
+ netif_tx_schedule_all(dev);
+}
+
+/*
+ * ucc_geth_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ */
+static void ucc_geth_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ schedule_work(&ugeth->timeout_work);
+}
+
+
+#ifdef CONFIG_PM
+
+static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(ofdev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&ugeth->napi);
+
+ /*
+ * Disable the controller, otherwise we'll wakeup on any network
+ * activity.
+ */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ if (ugeth->wol_en & WAKE_MAGIC) {
+ setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
+ setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
+ ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
+ } else if (!(ugeth->wol_en & WAKE_PHY)) {
+ phy_stop(ugeth->phydev);
+ }
+
+ return 0;
+}
+
+static int ucc_geth_resume(struct platform_device *ofdev)
+{
+ struct net_device *ndev = platform_get_drvdata(ofdev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ int err;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (qe_alive_during_sleep()) {
+ if (ugeth->wol_en & WAKE_MAGIC) {
+ ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
+ clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
+ clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
+ }
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ } else {
+ /*
+ * Full reinitialization is required if QE shuts down
+ * during sleep.
+ */
+ ucc_geth_memclean(ugeth);
+
+ err = ucc_geth_init_mac(ugeth);
+ if (err) {
+ netdev_err(ndev, "Cannot initialize MAC, aborting\n");
+ return err;
+ }
+ }
+
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+
+ phy_stop(ugeth->phydev);
+ phy_start(ugeth->phydev);
+
+ napi_enable(&ugeth->napi);
+ netif_device_attach(ndev);
+
+ return 0;
+}
+
+#else
+#define ucc_geth_suspend NULL
+#define ucc_geth_resume NULL
+#endif
+
+static phy_interface_t to_phy_interface(const char *phy_connection_type)
+{
+ if (strcasecmp(phy_connection_type, "mii") == 0)
+ return PHY_INTERFACE_MODE_MII;
+ if (strcasecmp(phy_connection_type, "gmii") == 0)
+ return PHY_INTERFACE_MODE_GMII;
+ if (strcasecmp(phy_connection_type, "tbi") == 0)
+ return PHY_INTERFACE_MODE_TBI;
+ if (strcasecmp(phy_connection_type, "rmii") == 0)
+ return PHY_INTERFACE_MODE_RMII;
+ if (strcasecmp(phy_connection_type, "rgmii") == 0)
+ return PHY_INTERFACE_MODE_RGMII;
+ if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+ if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
+ return PHY_INTERFACE_MODE_RGMII_TXID;
+ if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
+ return PHY_INTERFACE_MODE_RGMII_RXID;
+ if (strcasecmp(phy_connection_type, "rtbi") == 0)
+ return PHY_INTERFACE_MODE_RTBI;
+ if (strcasecmp(phy_connection_type, "sgmii") == 0)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!ugeth->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+}
+
+static const struct net_device_ops ucc_geth_netdev_ops = {
+ .ndo_open = ucc_geth_open,
+ .ndo_stop = ucc_geth_close,
+ .ndo_start_xmit = ucc_geth_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_carrier = fixed_phy_change_carrier,
+ .ndo_set_mac_address = ucc_geth_set_mac_addr,
+ .ndo_set_rx_mode = ucc_geth_set_multi,
+ .ndo_tx_timeout = ucc_geth_timeout,
+ .ndo_eth_ioctl = ucc_geth_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ucc_netpoll,
+#endif
+};
+
+static int ucc_geth_parse_clock(struct device_node *np, const char *which,
+ enum qe_clock *out)
+{
+ const char *sprop;
+ char buf[24];
+
+ snprintf(buf, sizeof(buf), "%s-clock-name", which);
+ sprop = of_get_property(np, buf, NULL);
+ if (sprop) {
+ *out = qe_clock_source(sprop);
+ } else {
+ u32 val;
+
+ snprintf(buf, sizeof(buf), "%s-clock", which);
+ if (of_property_read_u32(np, buf, &val)) {
+ /* If both *-clock-name and *-clock are missing,
+ * we want to tell people to use *-clock-name.
+ */
+ pr_err("missing %s-clock-name property\n", buf);
+ return -EINVAL;
+ }
+ *out = val;
+ }
+ if (*out < QE_CLK_NONE || *out > QE_CLK24) {
+ pr_err("invalid %s property\n", buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ucc_geth_probe(struct platform_device* ofdev)
+{
+ struct device *device = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct net_device *dev = NULL;
+ struct ucc_geth_private *ugeth = NULL;
+ struct ucc_geth_info *ug_info;
+ struct resource res;
+ int err, ucc_num, max_speed = 0;
+ const unsigned int *prop;
+ phy_interface_t phy_interface;
+ static const int enet_to_speed[] = {
+ SPEED_10, SPEED_10, SPEED_10,
+ SPEED_100, SPEED_100, SPEED_100,
+ SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
+ };
+ static const phy_interface_t enet_to_phy_interface[] = {
+ PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
+ PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
+ PHY_INTERFACE_MODE_SGMII,
+ };
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ prop = of_get_property(np, "cell-index", NULL);
+ if (!prop) {
+ prop = of_get_property(np, "device-id", NULL);
+ if (!prop)
+ return -ENODEV;
+ }
+
+ ucc_num = *prop - 1;
+ if ((ucc_num < 0) || (ucc_num > 7))
+ return -ENODEV;
+
+ ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL);
+ if (ug_info == NULL)
+ return -ENOMEM;
+
+ ug_info->uf_info.ucc_num = ucc_num;
+
+ err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
+ if (err)
+ goto err_free_info;
+ err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
+ if (err)
+ goto err_free_info;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ goto err_free_info;
+
+ ug_info->uf_info.regs = res.start;
+ ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
+ /*
+ * In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ goto err_free_info;
+ ug_info->phy_node = of_node_get(np);
+ }
+
+ /* Find the TBI PHY node. If it's not there, we don't support SGMII */
+ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ /* get the phy interface type, or default to MII */
+ prop = of_get_property(np, "phy-connection-type", NULL);
+ if (!prop) {
+ /* handle interface property present in old trees */
+ prop = of_get_property(ug_info->phy_node, "interface", NULL);
+ if (prop != NULL) {
+ phy_interface = enet_to_phy_interface[*prop];
+ max_speed = enet_to_speed[*prop];
+ } else
+ phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ phy_interface = to_phy_interface((const char *)prop);
+ }
+
+ /* get speed, or derive from PHY interface */
+ if (max_speed == 0)
+ switch (phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_SGMII:
+ max_speed = SPEED_1000;
+ break;
+ default:
+ max_speed = SPEED_100;
+ break;
+ }
+
+ if (max_speed == SPEED_1000) {
+ unsigned int snums = qe_get_num_of_snums();
+
+ /* configure muram FIFOs for gigabit operation */
+ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
+ ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
+ ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
+ ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
+ ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
+ ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
+ ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
+
+ /* If QE's snum number is 46/76 which means we need to support
+ * 4 UECs at 1000Base-T simultaneously, we need to allocate
+ * more Threads to Rx.
+ */
+ if ((snums == 76) || (snums == 46))
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
+ else
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
+ }
+
+ if (netif_msg_probe(&debug))
+ pr_info("UCC%1d at 0x%8llx (irq = %d)\n",
+ ug_info->uf_info.ucc_num + 1,
+ (u64)ug_info->uf_info.regs,
+ ug_info->uf_info.irq);
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof(*ugeth));
+
+ if (dev == NULL) {
+ err = -ENOMEM;
+ goto err_deregister_fixed_link;
+ }
+
+ ugeth = netdev_priv(dev);
+ spin_lock_init(&ugeth->lock);
+
+ /* Create CQs for hash tables */
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+
+ dev_set_drvdata(device, dev);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
+
+ SET_NETDEV_DEV(dev, device);
+
+ /* Fill in the dev structure */
+ uec_set_ethtool_ops(dev);
+ dev->netdev_ops = &ucc_geth_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll);
+ dev->mtu = 1500;
+ dev->max_mtu = 1518;
+
+ ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
+ ugeth->phy_interface = phy_interface;
+ ugeth->max_speed = max_speed;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ if (netif_msg_probe(ugeth))
+ pr_err("%s: Cannot register net device, aborting\n",
+ dev->name);
+ goto err_free_netdev;
+ }
+
+ of_get_ethdev_address(np, dev);
+
+ ugeth->ug_info = ug_info;
+ ugeth->dev = device;
+ ugeth->ndev = dev;
+ ugeth->node = np;
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+err_deregister_fixed_link:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(ug_info->tbi_node);
+ of_node_put(ug_info->phy_node);
+err_free_info:
+ kfree(ug_info);
+
+ return err;
+}
+
+static int ucc_geth_remove(struct platform_device* ofdev)
+{
+ struct net_device *dev = platform_get_drvdata(ofdev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct device_node *np = ofdev->dev.of_node;
+
+ unregister_netdev(dev);
+ ucc_geth_memclean(ugeth);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(ugeth->ug_info->tbi_node);
+ of_node_put(ugeth->ug_info->phy_node);
+ kfree(ugeth->ug_info);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static const struct of_device_id ucc_geth_match[] = {
+ {
+ .type = "network",
+ .compatible = "ucc_geth",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ucc_geth_match);
+
+static struct platform_driver ucc_geth_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ucc_geth_match,
+ },
+ .probe = ucc_geth_probe,
+ .remove = ucc_geth_remove,
+ .suspend = ucc_geth_suspend,
+ .resume = ucc_geth_resume,
+};
+
+static int __init ucc_geth_init(void)
+{
+ if (netif_msg_drv(&debug))
+ pr_info(DRV_DESC "\n");
+
+ return platform_driver_register(&ucc_geth_driver);
+}
+
+static void __exit ucc_geth_exit(void)
+{
+ platform_driver_unregister(&ucc_geth_driver);
+}
+
+module_init(ucc_geth_init);
+module_exit(ucc_geth_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
new file mode 100644
index 000000000..4294ed096
--- /dev/null
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -0,0 +1,1233 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC Gigabit Ethernet unit routines.
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ */
+#ifndef __UCC_GETH_H__
+#define __UCC_GETH_H__
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
+#define DRV_NAME "ucc_geth"
+
+#define NUM_TX_QUEUES 8
+#define NUM_RX_QUEUES 8
+#define NUM_BDS_IN_PREFETCHED_BDS 4
+#define TX_IP_OFFSET_ENTRY_MAX 8
+#define NUM_OF_PADDRS 4
+#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
+#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
+
+struct ucc_geth {
+ struct ucc_fast uccf;
+ u8 res0[0x100 - sizeof(struct ucc_fast)];
+
+ u32 maccfg1; /* mac configuration reg. 1 */
+ u32 maccfg2; /* mac configuration reg. 2 */
+ u32 ipgifg; /* interframe gap reg. */
+ u32 hafdup; /* half-duplex reg. */
+ u8 res1[0x10];
+ u8 miimng[0x18]; /* MII management structure moved to _mii.h */
+ u32 ifctl; /* interface control reg */
+ u32 ifstat; /* interface statux reg */
+ u32 macstnaddr1; /* mac station address part 1 reg */
+ u32 macstnaddr2; /* mac station address part 2 reg */
+ u8 res2[0x8];
+ u32 uempr; /* UCC Ethernet Mac parameter reg */
+ u32 utbipar; /* UCC tbi address reg */
+ u16 uescr; /* UCC Ethernet statistics control reg */
+ u8 res3[0x180 - 0x15A];
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in successful
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u8 res4[0x2];
+ u32 tmca; /* Total number of frames that were transmitted
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ successfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received successfully
+ that had destination address equal to the
+ broadcast address */
+ u32 scar; /* Statistics carry register */
+ u32 scam; /* Statistics caryy mask register */
+ u8 res5[0x200 - 0x1c4];
+} __packed;
+
+/* UCC GETH TEMODR Register */
+#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
+ */
+#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
+#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
+ checksums */
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
+ optimization
+ enhancement (mode1) */
+#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
+ */
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
+ shift */
+
+/* UCC GETH TEMODR Register */
+#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
+ statistics */
+#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
+ extended
+ features */
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
+ tagged << shift */
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
+ tagged << shift */
+#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
+ */
+#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
+ statistics */
+#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
+ filtering
+ vs.
+ mpc82xx-like
+ filtering */
+#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
+ shift */
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
+ dynamic max
+ frame length
+ */
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
+ dynamic min
+ frame length
+ */
+#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
+ checksums */
+#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
+ address to
+ 4-byte
+ boundary */
+
+/* UCC GETH Event Register */
+#define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \
+ UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \
+ UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \
+ UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0)
+
+#define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \
+ UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \
+ UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \
+ UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0)
+
+#define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \
+ UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \
+ UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \
+ UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0)
+
+#define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \
+ UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \
+ UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE)
+
+#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY)
+#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE)
+
+/* TBI defines */
+#define ENET_TBI_MII_CR 0x00 /* Control */
+#define ENET_TBI_MII_SR 0x01 /* Status */
+#define ENET_TBI_MII_ANA 0x04 /* AN advertisement */
+#define ENET_TBI_MII_ANLPBPA 0x05 /* AN link partner base page ability */
+#define ENET_TBI_MII_ANEX 0x06 /* AN expansion */
+#define ENET_TBI_MII_ANNPT 0x07 /* AN next page transmit */
+#define ENET_TBI_MII_ANLPANP 0x08 /* AN link partner ability next page */
+#define ENET_TBI_MII_EXST 0x0F /* Extended status */
+#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */
+#define ENET_TBI_MII_TBICON 0x11 /* TBI control */
+
+/* TBI MDIO register bit fields*/
+#define TBISR_LSTATUS 0x0004
+#define TBICON_CLK_SELECT 0x0020
+#define TBIANA_ASYMMETRIC_PAUSE 0x0100
+#define TBIANA_SYMMETRIC_PAUSE 0x0080
+#define TBIANA_HALF_DUPLEX 0x0040
+#define TBIANA_FULL_DUPLEX 0x0020
+#define TBICR_PHY_RESET 0x8000
+#define TBICR_ANEG_ENABLE 0x1000
+#define TBICR_RESTART_ANEG 0x0200
+#define TBICR_FULL_DUPLEX 0x0100
+#define TBICR_SPEED1_SET 0x0040
+
+#define TBIANA_SETTINGS ( \
+ TBIANA_ASYMMETRIC_PAUSE \
+ | TBIANA_SYMMETRIC_PAUSE \
+ | TBIANA_FULL_DUPLEX \
+ )
+#define TBICR_SETTINGS ( \
+ TBICR_PHY_RESET \
+ | TBICR_ANEG_ENABLE \
+ | TBICR_FULL_DUPLEX \
+ | TBICR_SPEED1_SET \
+ )
+
+/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
+#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
+ Rx */
+#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
+ Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
+ synchronized
+ to Rx stream
+ */
+#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
+#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
+ synchronized
+ to Tx stream
+ */
+#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
+
+/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
+#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
+ Length <<
+ shift */
+#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
+ Length mask */
+#define MACCFG2_SRP 0x00000080 /* Soft Receive
+ Preamble */
+#define MACCFG2_STP 0x00000040 /* Soft
+ Transmit
+ Preamble */
+#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
+ must be set
+ to 1 */
+#define MACCFG2_LC 0x00000010 /* Length Check
+ */
+#define MACCFG2_MPE 0x00000008 /* Magic packet
+ detect */
+#define MACCFG2_FDX 0x00000001 /* Full Duplex */
+#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
+ mask */
+#define MACCFG2_PAD_CRC 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
+ Padding
+ short frames
+ nor CRC */
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
+ only */
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
+ (MII/RMII/RGMII
+ 10/100bps) */
+#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
+ (GMII/TBI/RTB/RGMII
+ 1000bps ) */
+#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
+ covering all
+ relevant
+ bits */
+
+/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
+ back-to-back
+ inter frame
+ gap part 1.
+ << shift */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
+ back-to-back
+ inter frame
+ gap part 2.
+ << shift */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
+ Enforcement
+ << shift */
+#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
+ inter frame
+ gap << shift
+ */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 1. max val */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 2. max val */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
+ Enforcement max val */
+#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
+ frame gap max val */
+#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
+#define IPGIFG_NBTB_IPG_MASK 0x007F0000
+#define IPGIFG_MIN_IFG_MASK 0x0000FF00
+#define IPGIFG_BTB_IPG_MASK 0x0000007F
+
+/* UCC GETH HAFDUP (Half Duplex Register) */
+#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
+ Binary
+ Exponential
+ Backoff
+ Truncation
+ << shift */
+#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
+ Exponential Backoff
+ Truncation max val */
+#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
+ Binary
+ Exponential
+ Backoff */
+#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
+ pressure no
+ backoff */
+#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
+#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
+ Defer */
+#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
+ Retransmission
+ << shift */
+#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
+ Retransmission max
+ val */
+#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
+ Window <<
+ shift */
+#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
+ val */
+#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
+#define HALFDUP_RETRANS_MASK 0x0000F000
+#define HALFDUP_COL_WINDOW_MASK 0x0000003F
+
+/* UCC GETH UCCS (Ethernet Status Register) */
+#define UCCS_BPR 0x02 /* Back pressure (in
+ half duplex mode) */
+#define UCCS_PAU 0x02 /* Pause state (in full
+ duplex mode) */
+#define UCCS_MPD 0x01 /* Magic Packet
+ Detected */
+
+/* UCC GETH IFSTAT (Interface Status Register) */
+#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
+ transmission
+ defer */
+
+/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
+#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
+ address 6th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
+ address 5th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
+ address 4th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
+ address 3rd
+ octet <<
+ shift */
+
+/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
+#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
+ address 2nd
+ octet <<
+ shift */
+#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
+ address 1st
+ octet <<
+ shift */
+
+/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
+#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
+ value <<
+ shift */
+#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
+ pause time
+ value <<
+ shift */
+
+/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
+#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
+ << shift */
+#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
+ mask */
+
+/* UCC GETH UESCR (Ethernet Statistics Control Register) */
+#define UESCR_AUTOZ 0x8000 /* Automatically zero
+ addressed
+ statistical counter
+ values */
+#define UESCR_CLRCNT 0x4000 /* Clear all statistics
+ counters */
+#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
+ Coalescing
+ Value <<
+ shift */
+#define UESCR_SCOV_SHIFT (15 - 15) /* Status
+ Coalescing
+ Value <<
+ shift */
+
+/* UCC GETH UDSR (Data Synchronization Register) */
+#define UDSR_MAGIC 0x067E
+
+struct ucc_geth_thread_data_tx {
+ u8 res0[104];
+} __packed;
+
+struct ucc_geth_thread_data_rx {
+ u8 res0[40];
+} __packed;
+
+/* Send Queue Queue-Descriptor */
+struct ucc_geth_send_queue_qd {
+ u32 bd_ring_base; /* pointer to BD ring base address */
+ u8 res0[0x8];
+ u32 last_bd_completed_address;/* initialize to last entry in BD ring */
+ u8 res1[0x30];
+} __packed;
+
+struct ucc_geth_send_queue_mem_region {
+ struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
+} __packed;
+
+struct ucc_geth_thread_tx_pram {
+ u8 res0[64];
+} __packed;
+
+struct ucc_geth_thread_rx_pram {
+ u8 res0[128];
+} __packed;
+
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
+
+struct ucc_geth_scheduler {
+ u16 cpucount0; /* CPU packet counter */
+ u16 cpucount1; /* CPU packet counter */
+ u16 cecount0; /* QE packet counter */
+ u16 cecount1; /* QE packet counter */
+ u16 cpucount2; /* CPU packet counter */
+ u16 cpucount3; /* CPU packet counter */
+ u16 cecount2; /* QE packet counter */
+ u16 cecount3; /* QE packet counter */
+ u16 cpucount4; /* CPU packet counter */
+ u16 cpucount5; /* CPU packet counter */
+ u16 cecount4; /* QE packet counter */
+ u16 cecount5; /* QE packet counter */
+ u16 cpucount6; /* CPU packet counter */
+ u16 cpucount7; /* CPU packet counter */
+ u16 cecount6; /* QE packet counter */
+ u16 cecount7; /* QE packet counter */
+ u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
+ u32 rtsrshadow; /* temporary variable handled by QE */
+ u32 time; /* temporary variable handled by QE */
+ u32 ttl; /* temporary variable handled by QE */
+ u32 mblinterval; /* max burst length interval */
+ u16 nortsrbytetime; /* normalized value of byte time in tsr units */
+ u8 fracsiz; /* radix 2 log value of denom. of
+ NorTSRByteTime */
+ u8 res0[1];
+ u8 strictpriorityq; /* Strict Priority Mask register */
+ u8 txasap; /* Transmit ASAP register */
+ u8 extrabw; /* Extra BandWidth register */
+ u8 oldwfqmask; /* temporary variable handled by QE */
+ u8 weightfactor[NUM_TX_QUEUES];
+ /**< weight factor for queues */
+ u32 minw; /* temporary variable handled by QE */
+ u8 res1[0x70 - 0x64];
+} __packed;
+
+struct ucc_geth_tx_firmware_statistics_pram {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __packed;
+
+struct ucc_geth_rx_firmware_statistics_pram {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __packed;
+
+struct ucc_geth_rx_interrupt_coalescing_entry {
+ u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
+ value */
+ u32 interruptcoalescingcounter; /* interrupt coalescing counter,
+ initialize to
+ interruptcoalescingmaxvalue */
+} __packed;
+
+struct ucc_geth_rx_interrupt_coalescing_table {
+ struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
+ /**< interrupt coalescing entry */
+} __packed;
+
+struct ucc_geth_rx_prefetched_bds {
+ struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
+} __packed;
+
+struct ucc_geth_rx_bd_queues_entry {
+ u32 bdbaseptr; /* BD base pointer */
+ u32 bdptr; /* BD pointer */
+ u32 externalbdbaseptr; /* external BD base pointer */
+ u32 externalbdptr; /* external BD pointer */
+} __packed;
+
+struct ucc_geth_tx_global_pram {
+ u16 temoder;
+ u8 res0[0x38 - 0x02];
+ u32 sqptr; /* a base pointer to send queue memory region */
+ u32 schedulerbasepointer; /* a base pointer to scheduler memory
+ region */
+ u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
+ u32 tstate; /* tx internal state. High byte contains
+ function code */
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
+ u32 tqptr; /* a base pointer to the Tx Queues Memory
+ Region */
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
+} __packed;
+
+/* structure representing Extended Filtering Global Parameters in PRAM */
+struct ucc_geth_exf_global_pram {
+ u32 l2pcdptr; /* individual address filter, high */
+ u8 res0[0x10 - 0x04];
+} __packed;
+
+struct ucc_geth_rx_global_pram {
+ u32 remoder; /* ethernet mode reg. */
+ u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
+ u32 res0[0x1];
+ u8 res1[0x20 - 0xC];
+ u16 typeorlen; /* cutoff point less than which, type/len field
+ is considered length */
+ u8 res2[0x1];
+ u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
+ u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
+ u8 res3[0x30 - 0x28];
+ u32 intcoalescingptr; /* Interrupt coalescing table pointer */
+ u8 res4[0x36 - 0x34];
+ u8 rstate; /* rx internal state. High byte contains
+ function code */
+ u8 res5[0x46 - 0x37];
+ u16 mrblr; /* max receive buffer length reg. */
+ u32 rbdqptr; /* base pointer to RxBD parameter table
+ description */
+ u16 mflr; /* max frame length reg. */
+ u16 minflr; /* min frame length reg. */
+ u16 maxd1; /* max dma1 length reg. */
+ u16 maxd2; /* max dma2 length reg. */
+ u32 ecamptr; /* external CAM address */
+ u32 l2qt; /* VLAN priority mapping table. */
+ u32 l3qt[0x8]; /* IP priority mapping table. */
+ u16 vlantype; /* vlan type */
+ u16 vlantci; /* default vlan tci */
+ u8 addressfiltering[64]; /* address filtering data structure */
+ u32 exfGlobalParam; /* base address for extended filtering global
+ parameters */
+ u8 res6[0x100 - 0xC4]; /* Initialize to zero */
+} __packed;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
+
+/* structure representing InitEnet command */
+struct ucc_geth_init_pram {
+ u8 resinit1;
+ u8 resinit2;
+ u8 resinit3;
+ u8 resinit4;
+ u16 resinit5;
+ u8 res1[0x1];
+ u8 largestexternallookupkeysize;
+ u32 rgftgfrxglobal;
+ u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
+ u8 res2[0x38 - 0x30];
+ u32 txglobal; /* tx global */
+ u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
+ u8 res3[0x1];
+} __packed;
+
+#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT 24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM */
+struct ucc_geth_82xx_enet_address {
+ u8 res1[0x2];
+ u16 h; /* address (MSB) */
+ u16 m; /* address */
+ u16 l; /* address (LSB) */
+} __packed;
+
+/* structure representing 82xx Address Filtering PRAM */
+struct ucc_geth_82xx_address_filtering_pram {
+ u32 iaddr_h; /* individual address filter, high */
+ u32 iaddr_l; /* individual address filter, low */
+ u32 gaddr_h; /* group address filter, high */
+ u32 gaddr_l; /* group address filter, low */
+ struct ucc_geth_82xx_enet_address __iomem taddr;
+ struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
+ u8 res0[0x40 - 0x38];
+} __packed;
+
+/* GETH Tx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_tx_firmware_statistics {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __packed;
+
+/* GETH Rx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_rx_firmware_statistics {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __packed;
+
+/* GETH hardware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_hardware_statistics {
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in successful
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u32 tmca; /* Total number of frames that were transmitted
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ successfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received successfully
+ that had destination address equal to the
+ broadcast address */
+} __packed;
+
+/* UCC GETH Tx errors returned via TxConf callback */
+#define TX_ERRORS_DEF 0x0200
+#define TX_ERRORS_EXDEF 0x0100
+#define TX_ERRORS_LC 0x0080
+#define TX_ERRORS_RL 0x0040
+#define TX_ERRORS_RC_MASK 0x003C
+#define TX_ERRORS_RC_SHIFT 2
+#define TX_ERRORS_UN 0x0002
+#define TX_ERRORS_CSL 0x0001
+
+/* UCC GETH Rx errors returned via RxStore callback */
+#define RX_ERRORS_CMR 0x0200
+#define RX_ERRORS_M 0x0100
+#define RX_ERRORS_BC 0x0080
+#define RX_ERRORS_MC 0x0040
+
+/* Transmit BD. These are in addition to values defined in uccf. */
+#define T_VID 0x003c0000 /* insert VLAN id index mask. */
+#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
+#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
+#define T_LC (((u32) TX_ERRORS_LC ) << 16)
+#define T_RL (((u32) TX_ERRORS_RL ) << 16)
+#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
+#define T_UN (((u32) TX_ERRORS_UN ) << 16)
+#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
+#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
+ | T_UN | T_CSL) /* transmit errors to report */
+
+/* Receive BD. These are in addition to values defined in uccf. */
+#define R_LG 0x00200000 /* Frame length violation. */
+#define R_NO 0x00100000 /* Non-octet aligned frame. */
+#define R_SH 0x00080000 /* Short frame. */
+#define R_CR 0x00040000 /* CRC error. */
+#define R_OV 0x00020000 /* Overrun. */
+#define R_IPCH 0x00010000 /* IP checksum check failed. */
+#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
+#define R_M (((u32) RX_ERRORS_M ) << 16)
+#define R_BC (((u32) RX_ERRORS_BC ) << 16)
+#define R_MC (((u32) RX_ERRORS_MC ) << 16)
+#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
+ report */
+#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
+ R_OV | R_IPCH) /* receive errors to discard */
+
+/* Alignments */
+#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
+#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
+#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
+ based on num of
+ threads, but always
+ using the maximum is
+ easier */
+#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
+#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
+ is a
+ guess
+ */
+#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_MRBLR_ALIGNMENT 128
+#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
+#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
+#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
+
+#define UCC_GETH_TAD_EF 0x80
+#define UCC_GETH_TAD_V 0x40
+#define UCC_GETH_TAD_REJ 0x20
+#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
+#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
+#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
+#define UCC_GETH_TAD_RQOS_SHIFT 0
+#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
+#define UCC_GETH_TAD_CFI 0x10
+
+#define UCC_GETH_VLAN_PRIORITY_MAX 8
+#define UCC_GETH_IP_PRIORITY_MAX 64
+#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
+#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
+#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
+#define UCC_GETH_BD_RING_SIZE_MAX 0xffff
+
+#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
+
+/* Driver definitions */
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x20
+
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+
+#define ENET_GROUP_ADDR 0x01 /* Group address mask
+ for ethernet
+ addresses */
+
+#define TX_TIMEOUT (1*HZ)
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_CHANGE_TIME 2
+
+/* Fast Ethernet (10/100 Mbps) */
+#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
+ */
+#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
+ */
+#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
+/* Gigabit Ethernet (1000 Mbps) */
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
+ FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
+ FIFO size */
+
+#define UCC_GETH_REMODER_INIT 0 /* bits that must be
+ set */
+#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
+
+/* Initial value for UPSMR */
+#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
+
+#define UCC_GETH_MACCFG1_INIT 0
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+
+/* Ethernet Address Type. */
+enum enet_addr_type {
+ ENET_ADDR_TYPE_INDIVIDUAL,
+ ENET_ADDR_TYPE_GROUP,
+ ENET_ADDR_TYPE_BROADCAST
+};
+
+/* UCC GETH 82xx Ethernet Address Recognition Location */
+enum ucc_geth_enet_address_recognition_location {
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
+ address */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
+ station
+ address
+ paddr1 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
+ station
+ address
+ paddr2 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
+ station
+ address
+ paddr3 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
+ station
+ address
+ paddr4 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
+ hash */
+};
+
+/* UCC GETH vlan operation tagged */
+enum ucc_geth_vlan_operation_tagged {
+ UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
+ UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
+ = 0x1, /* Tagged - replace vid portion of q tag */
+ UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
+ = 0x2, /* Tagged - if vid0 replace vid with default value */
+ UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
+ = 0x3 /* Tagged - extract q tag from frame */
+};
+
+/* UCC GETH vlan operation non-tagged */
+enum ucc_geth_vlan_operation_non_tagged {
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
+ q tag insert
+ */
+};
+
+/* UCC GETH Rx Quality of Service Mode */
+enum ucc_geth_qos_mode {
+ UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
+ determined
+ by L2
+ criteria */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
+ determined
+ by L3
+ criteria */
+};
+
+/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
+ for combined functionality */
+enum ucc_geth_statistics_gathering_mode {
+ UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
+ statistics
+ gathering */
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
+ hardware
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
+ firmware
+ tx
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
+ firmware
+ rx
+ statistics
+ gathering
+ */
+};
+
+/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
+enum ucc_geth_maccfg2_pad_and_crc_mode {
+ UCC_GETH_PAD_AND_CRC_MODE_NONE
+ = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
+ short frames
+ nor CRC */
+ UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
+ = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
+ CRC only */
+ UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
+ MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
+};
+
+/* UCC GETH upsmr Flow Control Mode */
+enum ucc_geth_flow_control_mode {
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
+ flow control
+ */
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
+ = 0x00004000 /* Send pause frame when RxFIFO reaches its
+ emergency threshold */
+};
+
+/* UCC GETH number of threads */
+enum ucc_geth_num_of_threads {
+ UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
+ UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
+ UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
+ UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
+ UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
+};
+
+/* UCC GETH number of station addresses */
+enum ucc_geth_num_of_station_addresses {
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
+};
+
+/* UCC GETH 82xx Ethernet Address Container */
+struct enet_addr_container {
+ u8 address[ETH_ALEN]; /* ethernet address */
+ enum ucc_geth_enet_address_recognition_location location; /* location in
+ 82xx address
+ recognition
+ hardware */
+ struct list_head node;
+};
+
+#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node)
+
+/* UCC GETH Termination Action Descriptor (TAD) structure. */
+struct ucc_geth_tad_params {
+ int rx_non_dynamic_extended_features_mode;
+ int reject_frame;
+ enum ucc_geth_vlan_operation_tagged vtag_op;
+ enum ucc_geth_vlan_operation_non_tagged vnontag_op;
+ enum ucc_geth_qos_mode rqos;
+ u8 vpri;
+ u16 vid;
+};
+
+/* GETH protocol initialization structure */
+struct ucc_geth_info {
+ struct ucc_fast_info uf_info;
+ int ipCheckSumCheck;
+ int ipCheckSumGenerate;
+ int rxExtendedFiltering;
+ u32 extendedFilteringChainPointer;
+ u16 typeorlen;
+ int dynamicMaxFrameLength;
+ int dynamicMinFrameLength;
+ u8 nonBackToBackIfgPart1;
+ u8 nonBackToBackIfgPart2;
+ u8 miminumInterFrameGapEnforcement;
+ u8 backToBackInterFrameGap;
+ int ipAddressAlignment;
+ int lengthCheckRx;
+ u32 mblinterval;
+ u16 nortsrbytetime;
+ u8 fracsiz;
+ u8 strictpriorityq;
+ u8 txasap;
+ u8 extrabw;
+ int miiPreambleSupress;
+ u8 altBebTruncation;
+ int altBeb;
+ int backPressureNoBackoff;
+ int noBackoff;
+ int excessDefer;
+ u8 maxRetransmission;
+ u8 collisionWindow;
+ int pro;
+ int cap;
+ int rsh;
+ int rlpb;
+ int cam;
+ int bro;
+ int ecm;
+ int receiveFlowControl;
+ int transmitFlowControl;
+ u8 maxGroupAddrInHash;
+ u8 maxIndAddrInHash;
+ u8 prel;
+ u16 maxFrameLength;
+ u16 minFrameLength;
+ u16 maxD1Length;
+ u16 maxD2Length;
+ u16 vlantype;
+ u16 vlantci;
+ u32 ecamptr;
+ u32 eventRegMask;
+ u16 pausePeriod;
+ u16 extensionField;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ u8 weightfactor[NUM_TX_QUEUES];
+ u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
+ u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
+ u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
+ u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u16 bdRingLenTx[NUM_TX_QUEUES];
+ u16 bdRingLenRx[NUM_RX_QUEUES];
+ enum ucc_geth_num_of_station_addresses numStationAddresses;
+ enum qe_fltr_largest_external_tbl_lookup_key_size
+ largestexternallookupkeysize;
+ enum ucc_geth_statistics_gathering_mode statisticsMode;
+ enum ucc_geth_vlan_operation_tagged vlanOperationTagged;
+ enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged;
+ enum ucc_geth_qos_mode rxQoSMode;
+ enum ucc_geth_flow_control_mode aufc;
+ enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc;
+ enum ucc_geth_num_of_threads numThreadsTx;
+ enum ucc_geth_num_of_threads numThreadsRx;
+ unsigned int riscTx;
+ unsigned int riscRx;
+};
+
+/* structure representing UCC GETH */
+struct ucc_geth_private {
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_private *uccf;
+ struct device *dev;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct work_struct timeout_work;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_geth_init_pram *p_init_enet_param_shadow;
+ struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
+ u32 exf_glbl_param_offset;
+ struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
+ struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
+ struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
+ u32 send_q_mem_reg_offset;
+ struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
+ u32 thread_dat_tx_offset;
+ struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx;
+ u32 thread_dat_rx_offset;
+ struct ucc_geth_scheduler __iomem *p_scheduler;
+ u32 scheduler_offset;
+ struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram;
+ u32 tx_fw_statistics_pram_offset;
+ struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram;
+ u32 rx_fw_statistics_pram_offset;
+ struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl;
+ u32 rx_irq_coalescing_tbl_offset;
+ struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
+ u32 rx_bd_qs_tbl_offset;
+ u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
+ u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
+ u8 __iomem *confBd[NUM_TX_QUEUES];
+ u8 __iomem *txBd[NUM_TX_QUEUES];
+ u8 __iomem *rxBd[NUM_RX_QUEUES];
+ int badFrame[NUM_RX_QUEUES];
+ u16 cpucount[NUM_TX_QUEUES];
+ u16 __iomem *p_cpucount[NUM_TX_QUEUES];
+ int indAddrRegUsed[NUM_OF_PADDRS];
+ u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */
+ u8 numGroupAddrInHash;
+ u8 numIndAddrInHash;
+ u8 numIndAddrInReg;
+ int rx_extended_features;
+ int rx_non_dynamic_extended_features;
+ struct list_head conf_skbs;
+ struct list_head group_hash_q;
+ struct list_head ind_hash_q;
+ u32 saved_uccm;
+ spinlock_t lock;
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
+ struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx[NUM_TX_QUEUES];
+ u16 skb_currx[NUM_RX_QUEUES];
+ /* index of the first skb which hasn't been transmitted yet. */
+ u16 skb_dirtytx[NUM_TX_QUEUES];
+
+ struct ugeth_mii_info *mii_info;
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int max_speed;
+ uint32_t msg_enable;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+ int wol_en;
+
+ struct device_node *node;
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev);
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable, int tx_flow_control_enable,
+ u16 pause_period, u16 extension_field,
+ u32 __iomem *upsmr_register, u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register);
+
+
+#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
new file mode 100644
index 000000000..601beb93d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Description: QE UCC Gigabit Ethernet Ethtool API Set
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ *
+ * Limitation:
+ * Can only get/set settings of the first queue.
+ * Need to re-open the interface manually after changing some parameters.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+#include <asm/types.h>
+
+#include "ucc_geth.h"
+
+static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-64-frames",
+ "tx-65-127-frames",
+ "tx-128-255-frames",
+ "rx-64-frames",
+ "rx-65-127-frames",
+ "rx-128-255-frames",
+ "tx-bytes-ok",
+ "tx-pause-frames",
+ "tx-multicast-frames",
+ "tx-broadcast-frames",
+ "rx-frames",
+ "rx-bytes-ok",
+ "rx-bytes-all",
+ "rx-multicast-frames",
+ "rx-broadcast-frames",
+ "stats-counter-carry",
+ "stats-counter-mask",
+ "rx-dropped-frames",
+};
+
+static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-single-collision",
+ "tx-multiple-collision",
+ "tx-late-collision",
+ "tx-aborted-frames",
+ "tx-lost-frames",
+ "tx-carrier-sense-errors",
+ "tx-frames-ok",
+ "tx-excessive-differ-frames",
+ "tx-256-511-frames",
+ "tx-512-1023-frames",
+ "tx-1024-1518-frames",
+ "tx-jumbo-frames",
+};
+
+static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-crc-errors",
+ "rx-alignment-errors",
+ "rx-in-range-length-errors",
+ "rx-out-of-range-length-errors",
+ "rx-too-long-frames",
+ "rx-runt",
+ "rx-very-long-event",
+ "rx-symbol-errors",
+ "rx-busy-drop-frames",
+ "reserved",
+ "reserved",
+ "rx-mismatch-drop-frames",
+ "rx-small-than-64",
+ "rx-256-511-frames",
+ "rx-512-1023-frames",
+ "rx-1024-1518-frames",
+ "rx-jumbo-frames",
+ "rx-mac-error-loss",
+ "rx-pause-frames",
+ "reserved",
+ "rx-vlan-removed",
+ "rx-vlan-replaced",
+ "rx-vlan-inserted",
+ "rx-ip-checksum-errors",
+};
+
+#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings)
+#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
+#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
+
+static int
+uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
+}
+
+static int
+uec_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(phydev, cmd);
+}
+
+static void
+uec_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ pause->autoneg = ugeth->phydev->autoneg;
+
+ if (ugeth->ug_info->receiveFlowControl)
+ pause->rx_pause = 1;
+ if (ugeth->ug_info->transmitFlowControl)
+ pause->tx_pause = 1;
+}
+
+static int
+uec_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ int ret = 0;
+
+ ugeth->ug_info->receiveFlowControl = pause->rx_pause;
+ ugeth->ug_info->transmitFlowControl = pause->tx_pause;
+
+ if (ugeth->phydev->autoneg) {
+ if (netif_running(netdev)) {
+ /* FIXME: automatically restart */
+ netdev_info(netdev, "Please re-open the interface\n");
+ }
+ } else {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ ret = init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ return ret;
+}
+
+static uint32_t
+uec_get_msglevel(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ return ugeth->msg_enable;
+}
+
+static void
+uec_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ ugeth->msg_enable = data;
+}
+
+static int
+uec_get_regs_len(struct net_device *netdev)
+{
+ return sizeof(struct ucc_geth);
+}
+
+static void
+uec_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++)
+ buff[i] = in_be32(&ug_regs[i]);
+}
+
+static void
+uec_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0;
+
+ ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+
+ ring->rx_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
+ ring->tx_pending = ug_info->bdRingLenTx[queue];
+}
+
+static int
+uec_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0, ret = 0;
+
+ if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
+ netdev_info(netdev, "RxBD ring size must be no smaller than %d\n",
+ UCC_GETH_RX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+ if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
+ netdev_info(netdev, "RxBD ring size must be multiple of %d\n",
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ netdev_info(netdev, "TxBD ring size must be no smaller than %d\n",
+ UCC_GETH_TX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ ug_info->bdRingLenRx[queue] = ring->rx_pending;
+ ug_info->bdRingLenTx[queue] = ring->tx_pending;
+
+ return ret;
+}
+
+static int uec_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ int len = 0;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
+ len += UEC_HW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
+ len += UEC_TX_FW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ len += UEC_RX_FW_STATS_LEN;
+
+ return len;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+}
+
+static void uec_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ u32 __iomem *base;
+ int i, j = 0;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ if (ugeth->ug_regs)
+ base = (u32 __iomem *)&ugeth->ug_regs->tx64;
+ else
+ base = NULL;
+
+ for (i = 0; i < UEC_HW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
+ for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
+ for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+}
+
+/* Report driver information */
+static void
+uec_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+}
+
+#ifdef CONFIG_PM
+
+static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (phydev && phydev->irq)
+ wol->supported |= WAKE_PHY;
+ if (qe_alive_during_sleep())
+ wol->supported |= WAKE_MAGIC;
+
+ wol->wolopts = ugeth->wol_en;
+}
+
+static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+ else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ return -EINVAL;
+ else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+ return -EINVAL;
+
+ ugeth->wol_en = wol->wolopts;
+ device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
+
+ return 0;
+}
+
+#else
+#define uec_get_wol NULL
+#define uec_set_wol NULL
+#endif /* CONFIG_PM */
+
+static const struct ethtool_ops uec_ethtool_ops = {
+ .get_drvinfo = uec_get_drvinfo,
+ .get_regs_len = uec_get_regs_len,
+ .get_regs = uec_get_regs,
+ .get_msglevel = uec_get_msglevel,
+ .set_msglevel = uec_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = uec_get_ringparam,
+ .set_ringparam = uec_set_ringparam,
+ .get_pauseparam = uec_get_pauseparam,
+ .set_pauseparam = uec_set_pauseparam,
+ .get_sset_count = uec_get_sset_count,
+ .get_strings = uec_get_strings,
+ .get_ethtool_stats = uec_get_ethtool_stats,
+ .get_wol = uec_get_wol,
+ .set_wol = uec_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = uec_get_ksettings,
+ .set_link_ksettings = uec_set_ksettings,
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &uec_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 000000000..d7d39a58c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,404 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2021 NXP
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ * Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT 1000
+
+struct tgec_mdio_controller {
+ __be32 reserved[12];
+ __be32 mdio_stat; /* MDIO configuration and status */
+ __be32 mdio_ctl; /* MDIO control */
+ __be32 mdio_data; /* MDIO data */
+ __be32 mdio_addr; /* MDIO address */
+} __packed;
+
+#define MDIO_STAT_ENC BIT(6)
+#define MDIO_STAT_CLKDIV(x) (((x) & 0x1ff) << 7)
+#define MDIO_STAT_BSY BIT(0)
+#define MDIO_STAT_RD_ER BIT(1)
+#define MDIO_STAT_PRE_DIS BIT(5)
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS BIT(10)
+#define MDIO_CTL_SCAN_EN BIT(11)
+#define MDIO_CTL_POST_INC BIT(14)
+#define MDIO_CTL_READ BIT(15)
+
+#define MDIO_DATA(x) (x & 0xffff)
+
+struct mdio_fsl_priv {
+ struct tgec_mdio_controller __iomem *mdio_base;
+ struct clk *enet_clk;
+ u32 mdc_freq;
+ bool is_little_endian;
+ bool has_a009885;
+ bool has_a011043;
+};
+
+static u32 xgmac_read32(void __iomem *regs,
+ bool is_little_endian)
+{
+ if (is_little_endian)
+ return ioread32(regs);
+ else
+ return ioread32be(regs);
+}
+
+static void xgmac_write32(u32 value,
+ void __iomem *regs,
+ bool is_little_endian)
+{
+ if (is_little_endian)
+ iowrite32(value, regs);
+ else
+ iowrite32be(value, regs);
+}
+
+/*
+ * Wait until the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+ struct tgec_mdio_controller __iomem *regs,
+ bool is_little_endian)
+{
+ unsigned int timeout;
+
+ /* Wait till the bus is free */
+ timeout = TIMEOUT;
+ while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+ MDIO_STAT_BSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout) {
+ dev_err(dev, "timeout waiting for bus to be free\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+ struct tgec_mdio_controller __iomem *regs,
+ bool is_little_endian)
+{
+ unsigned int timeout;
+
+ /* Wait till the MDIO write is complete */
+ timeout = TIMEOUT;
+ while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+ MDIO_STAT_BSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout) {
+ dev_err(dev, "timeout waiting for operation to complete\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns. All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ uint16_t dev_addr;
+ u32 mdio_ctl, mdio_stat;
+ int ret;
+ bool endian = priv->is_little_endian;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ if (regnum & MII_ADDR_C45) {
+ /* Clause 45 (ie 10G) */
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ /* Clause 22 (ie 1G) */
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Set the register address */
+ if (regnum & MII_ADDR_C45) {
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+ }
+
+ /* Write the value to the register */
+ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first. All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ unsigned long flags;
+ uint16_t dev_addr;
+ uint32_t mdio_stat;
+ uint32_t mdio_ctl;
+ int ret;
+ bool endian = priv->is_little_endian;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Set the register address */
+ if (regnum & MII_ADDR_C45) {
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
+ /* Initiate the read */
+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ goto irq_restore;
+
+ /* Return all Fs if nothing was there */
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ ret = 0xffff;
+ } else {
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
+ }
+
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static int xgmac_mdio_set_mdc_freq(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat, div;
+
+ if (device_property_read_u32(dev, "clock-frequency", &priv->mdc_freq))
+ return 0;
+
+ priv->enet_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->enet_clk)) {
+ dev_err(dev, "Input clock unknown, not changing MDC frequency");
+ return PTR_ERR(priv->enet_clk);
+ }
+
+ div = ((clk_get_rate(priv->enet_clk) / priv->mdc_freq) - 1) / 2;
+ if (div < 5 || div > 0x1ff) {
+ dev_err(dev, "Requested MDC frequency is out of range, ignoring");
+ return -EINVAL;
+ }
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat &= ~MDIO_STAT_CLKDIV(0x1ff);
+ mdio_stat |= MDIO_STAT_CLKDIV(div);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+ return 0;
+}
+
+static void xgmac_mdio_set_suppress_preamble(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat;
+
+ if (!device_property_read_bool(dev, "suppress-preamble"))
+ return;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat |= MDIO_STAT_PRE_DIS;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+}
+
+static int xgmac_mdio_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *fwnode;
+ struct mdio_fsl_priv *priv;
+ struct resource *res;
+ struct mii_bus *bus;
+ int ret;
+
+ /* In DPAA-1, MDIO is one of the many FMan sub-devices. The FMan
+ * defines a register space that spans a large area, covering all the
+ * subdevice areas. Therefore, MDIO cannot claim exclusive access to
+ * this register area.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "could not obtain address\n");
+ return -EINVAL;
+ }
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(struct mdio_fsl_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale XGMAC MDIO Bus";
+ bus->read = xgmac_mdio_read;
+ bus->write = xgmac_mdio_write;
+ bus->parent = &pdev->dev;
+ bus->probe_capabilities = MDIOBUS_C22_C45;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
+
+ priv = bus->priv;
+ priv->mdio_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!priv->mdio_base)
+ return -ENOMEM;
+
+ /* For both ACPI and DT cases, endianness of MDIO controller
+ * needs to be specified using "little-endian" property.
+ */
+ priv->is_little_endian = device_property_read_bool(&pdev->dev,
+ "little-endian");
+
+ priv->has_a009885 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a009885");
+ priv->has_a011043 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a011043");
+
+ xgmac_mdio_set_suppress_preamble(bus);
+
+ ret = xgmac_mdio_set_mdc_freq(bus);
+ if (ret)
+ return ret;
+
+ fwnode = dev_fwnode(&pdev->dev);
+ if (is_of_node(fwnode))
+ ret = of_mdiobus_register(bus, to_of_node(fwnode));
+ else if (is_acpi_node(fwnode))
+ ret = acpi_mdiobus_register(bus, fwnode);
+ else
+ ret = -EINVAL;
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register MDIO bus\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static const struct of_device_id xgmac_mdio_match[] = {
+ {
+ .compatible = "fsl,fman-xmdio",
+ },
+ {
+ .compatible = "fsl,fman-memac-mdio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static const struct acpi_device_id xgmac_acpi_match[] = {
+ { "NXP0006" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgmac_acpi_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+ .driver = {
+ .name = "fsl-fman_xmdio",
+ .of_match_table = xgmac_mdio_match,
+ .acpi_match_table = xgmac_acpi_match,
+ },
+ .probe = xgmac_mdio_probe,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");