summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/dpaa2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/freescale/dpaa2
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa2')
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig39
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c150
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c158
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c307
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h157
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4896
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h774
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c897
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c574
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c262
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h18
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c211
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c885
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3531
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h280
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h481
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h85
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c237
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h213
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h686
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c2181
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h1110
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h74
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.c293
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h556
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.c1661
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.h791
31 files changed, 21674 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
new file mode 100644
index 000000000..d029b69c3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config FSL_DPAA2_ETH
+ tristate "Freescale DPAA2 Ethernet"
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ select PHYLINK
+ select PCS_LYNX
+ select FSL_XGMAC_MDIO
+ select NET_DEVLINK
+ help
+ This is the DPAA2 Ethernet driver supporting Freescale SoCs
+ with DPAA2 (DataPath Acceleration Architecture v2).
+ The driver manages network objects discovered on the Freescale
+ MC bus.
+
+if FSL_DPAA2_ETH
+config FSL_DPAA2_ETH_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on DCB
+ help
+ Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
+ devices.
+endif
+
+config FSL_DPAA2_PTP_CLOCK
+ tristate "Freescale DPAA2 PTP Clock"
+ depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
+ default y
+ help
+ This driver adds support for using the DPAA2 1588 timer module
+ as a PTP clock.
+
+config FSL_DPAA2_SWITCH
+ tristate "Freescale DPAA2 Ethernet Switch"
+ depends on BRIDGE || BRIDGE=n
+ depends on NET_SWITCHDEV
+ help
+ Driver for Freescale DPAA2 Ethernet Switch. This driver manages
+ switch objects discovered on the Freeescale MC bus.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
new file mode 100644
index 000000000..3d9842af7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
+obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
+fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
+fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
+fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o dpaa2-mac.o dpmac.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
new file mode 100644
index 000000000..84de06441
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP */
+
+#include "dpaa2-eth.h"
+
+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
+ return 0;
+
+ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
+
+ return 0;
+}
+
+static inline bool dpaa2_eth_is_prio_enabled(u8 pfc_en, u8 tc)
+{
+ return !!(pfc_en & (1 << tc));
+}
+
+static int dpaa2_eth_set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+{
+ struct dpni_congestion_notification_cfg cfg = {0};
+ int i, err;
+
+ cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
+ cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cfg.message_iova = 0ULL;
+ cfg.message_ctx = 0ULL;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (dpaa2_eth_is_prio_enabled(pfc_en, i)) {
+ cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
+ cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
+ } else {
+ /* For priorities not set in the pfc_en mask, we leave
+ * the congestion thresholds at zero, which effectively
+ * disables generation of PFC frames for them
+ */
+ cfg.threshold_entry = 0;
+ cfg.threshold_exit = 0;
+ }
+
+ err = dpni_set_congestion_notification(priv->mc_io, 0,
+ priv->mc_token,
+ DPNI_QUEUE_RX, i, &cfg);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_congestion_notification failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg link_cfg = {0};
+ bool tx_pause;
+ int err;
+
+ if (pfc->mbc || pfc->delay)
+ return -EOPNOTSUPP;
+
+ /* If same PFC enabled mask, nothing to do */
+ if (priv->pfc.pfc_en == pfc->pfc_en)
+ return 0;
+
+ /* We allow PFC configuration even if it won't have any effect until
+ * general pause frames are enabled
+ */
+ tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
+ if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
+ netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");
+
+ link_cfg.rate = priv->link_state.rate;
+ link_cfg.options = priv->link_state.options;
+ if (pfc->pfc_en)
+ link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
+ else
+ link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_cfg failed\n");
+ return err;
+ }
+
+ /* Configure congestion notifications for the enabled priorities */
+ err = dpaa2_eth_set_pfc_cn(priv, pfc->pfc_en);
+ if (err)
+ return err;
+
+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+ priv->pfc_enabled = !!pfc->pfc_en;
+
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
+
+ return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return priv->dcbx_mode;
+}
+
+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return (mode != (priv->dcbx_mode)) ? 1 : 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_mode;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
+ .getcap = dpaa2_eth_dcbnl_getcap,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
new file mode 100644
index 000000000..8356af463
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "dpaa2-eth.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+static struct dentry *dpaa2_dbg_root;
+
+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct rtnl_link_stats64 *stats;
+ struct dpaa2_eth_drv_stats *extras;
+ int i;
+
+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
+ "Tx SG", "Tx converted to SG", "Enq busy");
+
+ for_each_online_cpu(i) {
+ stats = per_cpu_ptr(priv->percpu_stats, i);
+ extras = per_cpu_ptr(priv->percpu_extras, i);
+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+ i,
+ stats->rx_packets,
+ stats->rx_errors,
+ extras->rx_sg_frames,
+ stats->tx_packets,
+ stats->tx_errors,
+ extras->tx_conf_frames,
+ extras->tx_sg_frames,
+ extras->tx_converted_sg_frames,
+ extras->tx_portal_busy);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_cpu);
+
+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ return "Rx";
+ case DPAA2_TX_CONF_FQ:
+ return "Tx conf";
+ default:
+ return "N/A";
+ }
+}
+
+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt, bcnt;
+ int i, err;
+
+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err)
+ fcnt = 0;
+
+ /* Skip FQs with no traffic */
+ if (!fq->stats.frames && !fcnt)
+ continue;
+
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
+ fq->fqid,
+ fq->target_cpu,
+ fq->tc,
+ fq_type_to_str(fq),
+ fq->stats.frames,
+ fcnt);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_fqs);
+
+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ "Avg Frm/CDAN", "Buf count");
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
+ ch->ch_id,
+ ch->nctx.desired_cpu,
+ ch->stats.dequeue_portal_busy,
+ ch->stats.frames,
+ ch->stats.cdan,
+ div64_u64(ch->stats.frames, ch->stats.cdan),
+ ch->buf_count);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
+
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev;
+ struct dentry *dir;
+ char name[10];
+
+ /* Create a directory for the interface */
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ snprintf(name, 10, "dpni.%d", dpni_dev->obj_desc.id);
+ dir = debugfs_create_dir(name, dpaa2_dbg_root);
+ priv->dbg.dir = dir;
+
+ /* per-cpu stats file */
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_fops);
+
+ /* per-fq stats file */
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fqs_fops);
+
+ /* per-fq stats file */
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
+}
+
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
+ debugfs_remove_recursive(priv->dbg.dir);
+}
+
+void dpaa2_eth_dbg_init(void)
+{
+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
+ pr_debug("DPAA2-ETH: debugfs created\n");
+}
+
+void dpaa2_eth_dbg_exit(void)
+{
+ debugfs_remove(dpaa2_dbg_root);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
new file mode 100644
index 000000000..15598b28f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#ifndef DPAA2_ETH_DEBUGFS_H
+#define DPAA2_ETH_DEBUGFS_H
+
+#include <linux/dcache.h>
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_debugfs {
+ struct dentry *dir;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_eth_dbg_init(void);
+void dpaa2_eth_dbg_exit(void);
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
+#else
+static inline void dpaa2_eth_dbg_init(void) {}
+static inline void dpaa2_eth_dbg_exit(void) {}
+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
new file mode 100644
index 000000000..7fefe1574
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+#include "dpaa2-eth.h"
+/* Copyright 2020 NXP
+ */
+
+#define DPAA2_ETH_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, 0)
+
+static const struct devlink_trap_group dpaa2_eth_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(PARSER_ERROR_DROPS, 0),
+};
+
+static const struct devlink_trap dpaa2_eth_traps_arr[] = {
+ DPAA2_ETH_TRAP_DROP(VXLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(LLC_SNAP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(VLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(PPPOE_PPP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(MPLS_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ARP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_1_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_N_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GRE_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(UDP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(TCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IPSEC_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(SCTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(DCCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ESP_PARSING, PARSER_ERROR_DROPS),
+};
+
+static int dpaa2_eth_dl_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ char buf[10];
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
+ err = devlink_info_version_running_put(req, "dpni", buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct dpaa2_eth_trap_item *
+dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data = priv->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_traps_arr); i++) {
+ if (dpaa2_eth_traps_arr[i].id == trap_id)
+ return &dpaa2_eth_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr)
+{
+ static const struct dpaa2_faf_error_bit {
+ int position;
+ enum devlink_trap_generic_id trap_id;
+ } faf_bits[] = {
+ { .position = 5, .trap_id = DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING },
+ { .position = 20, .trap_id = DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING },
+ { .position = 24, .trap_id = DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING },
+ { .position = 26, .trap_id = DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING },
+ { .position = 29, .trap_id = DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING },
+ { .position = 31, .trap_id = DEVLINK_TRAP_GENERIC_ID_ARP_PARSING },
+ { .position = 52, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING },
+ { .position = 61, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING },
+ { .position = 67, .trap_id = DEVLINK_TRAP_GENERIC_ID_GRE_PARSING },
+ { .position = 71, .trap_id = DEVLINK_TRAP_GENERIC_ID_UDP_PARSING },
+ { .position = 76, .trap_id = DEVLINK_TRAP_GENERIC_ID_TCP_PARSING },
+ { .position = 80, .trap_id = DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING },
+ { .position = 82, .trap_id = DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING },
+ { .position = 84, .trap_id = DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING },
+ { .position = 88, .trap_id = DEVLINK_TRAP_GENERIC_ID_GTP_PARSING },
+ { .position = 90, .trap_id = DEVLINK_TRAP_GENERIC_ID_ESP_PARSING },
+ };
+ u64 faf_word;
+ u64 mask;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(faf_bits); i++) {
+ if (faf_bits[i].position < 32) {
+ /* Low part of FAF.
+ * position ranges from 31 to 0, mask from 0 to 31.
+ */
+ mask = 1ull << (31 - faf_bits[i].position);
+ faf_word = __le32_to_cpu(fapr->faf_lo);
+ } else {
+ /* High part of FAF.
+ * position ranges from 95 to 32, mask from 0 to 63.
+ */
+ mask = 1ull << (63 - (faf_bits[i].position - 32));
+ faf_word = __le64_to_cpu(fapr->faf_hi);
+ }
+ if (faf_word & mask)
+ return dpaa2_eth_dl_trap_item_lookup(priv, faf_bits[i].trap_id);
+ }
+ return NULL;
+}
+
+static int dpaa2_eth_dl_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct dpaa2_eth_trap_item *dpaa2_eth_trap_item;
+
+ dpaa2_eth_trap_item = dpaa2_eth_dl_trap_item_lookup(priv, trap->id);
+ if (WARN_ON(!dpaa2_eth_trap_item))
+ return -ENOENT;
+
+ dpaa2_eth_trap_item->trap_ctx = trap_ctx;
+
+ return 0;
+}
+
+static int dpaa2_eth_dl_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* No support for changing the action of an independent packet trap,
+ * only per trap group - parser error drops
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change trap action independently of group");
+ return -EOPNOTSUPP;
+}
+
+static int dpaa2_eth_dl_trap_group_action_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_error_cfg err_cfg = {0};
+ int err;
+
+ if (group->id != DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS)
+ return -EOPNOTSUPP;
+
+ /* Configure handling of frames marked as errors from the parser */
+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops dpaa2_eth_devlink_ops = {
+ .info_get = dpaa2_eth_dl_info_get,
+ .trap_init = dpaa2_eth_dl_trap_init,
+ .trap_action_set = dpaa2_eth_dl_trap_action_set,
+ .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
+};
+
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_devlink_priv *dl_priv;
+
+ priv->devlink =
+ devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev);
+ if (!priv->devlink) {
+ dev_err(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->dpaa2_priv = priv;
+ return 0;
+}
+
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv)
+{
+ devlink_free(priv->devlink);
+}
+
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ devlink_register(priv->devlink);
+}
+
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_unregister(priv->devlink);
+}
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devlink_port_register(priv->devlink, devlink_port, 0);
+ if (err)
+ return err;
+
+ devlink_port_type_eth_set(devlink_port, priv->net_dev);
+
+ return 0;
+}
+
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
+}
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ dpaa2_eth_trap_data = kzalloc(sizeof(*dpaa2_eth_trap_data), GFP_KERNEL);
+ if (!dpaa2_eth_trap_data)
+ return -ENOMEM;
+ priv->trap_data = dpaa2_eth_trap_data;
+
+ dpaa2_eth_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(dpaa2_eth_traps_arr),
+ sizeof(struct dpaa2_eth_trap_item),
+ GFP_KERNEL);
+ if (!dpaa2_eth_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto trap_data_free;
+ }
+
+ err = devlink_trap_groups_register(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ if (err) {
+ dev_err(dev, "devlink_trap_groups_register() = %d\n", err);
+ goto trap_items_arr_free;
+ }
+
+ err = devlink_traps_register(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr), priv);
+ if (err) {
+ dev_err(dev, "devlink_traps_register() = %d\n", err);
+ goto trap_groups_unregiser;
+ }
+
+ return 0;
+
+trap_groups_unregiser:
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+trap_items_arr_free:
+ kfree(dpaa2_eth_trap_data->trap_items_arr);
+trap_data_free:
+ kfree(dpaa2_eth_trap_data);
+ priv->trap_data = NULL;
+
+ return err;
+}
+
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_traps_unregister(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr));
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ kfree(priv->trap_data->trap_items_arr);
+ kfree(priv->trap_data);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
new file mode 100644
index 000000000..5fb5f14e0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa2_eth
+
+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA2_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+
+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
+/* trace_printk format for raw buffer event class */
+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor */
+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fd),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u64, fd_addr)
+ __field(u32, fd_len)
+ __field(u16, fd_offset)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->fd_addr = dpaa2_fd_get_addr(fd);
+ __entry->fd_len = dpaa2_fd_get_len(fd);
+ __entry->fd_offset = dpaa2_fd_get_offset(fd);
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_FMT,
+ __get_str(name),
+ __entry->fd_addr,
+ __entry->fd_len,
+ __entry->fd_offset)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Log data about raw buffers. Useful for tracing DPBP content. */
+TRACE_EVENT(dpaa2_eth_buf_seed,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA2_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa2-eth-trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
new file mode 100644
index 000000000..de62eee58
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -0,0 +1,4896 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2020 NXP
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/iommu.h>
+#include <linux/fsl/mc.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/fsl/ptp_qoriq.h>
+#include <linux/ptp_classify.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/tso.h>
+
+#include "dpaa2-eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa2-eth-trace.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+struct ptp_qoriq *dpaa2_ptp;
+EXPORT_SYMBOL(dpaa2_ptp);
+
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* HW checksum validation is disabled, nothing to do here */
+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* Read checksum validation bits */
+ if (!((fd_status & DPAA2_FAS_L3CV) &&
+ (fd_status & DPAA2_FAS_L4CV)))
+ return;
+
+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_sg_entry *sgt;
+ void *sg_vaddr;
+ int i;
+
+ /* If single buffer frame, just free the data buffer */
+ if (fd_format == dpaa2_fd_single)
+ goto free_buf;
+ else if (fd_format != dpaa2_fd_sg)
+ /* We don't support any other format */
+ return;
+
+ /* For S/G frames, we first need to free all SG entries
+ * except the first one, which was taken care of already
+ */
+ sgt = vaddr + dpaa2_fd_get_offset(fd);
+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ addr = dpaa2_sg_get_addr(&sgt[i]);
+ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ free_pages((unsigned long)sg_vaddr, 0);
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ }
+
+free_buf:
+ free_pages((unsigned long)vaddr, 0);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct sk_buff *skb = NULL;
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ ch->buf_count--;
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
+{
+ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sg_vaddr;
+ dma_addr_t sg_addr;
+ u16 sg_offset;
+ u32 sg_length;
+ struct page *page, *head_page;
+ int page_offset;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ struct dpaa2_sg_entry *sge = &sgt[i];
+
+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
+ * but this is the only format we may receive from HW anyway
+ */
+
+ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
+ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
+ dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
+ if (unlikely(!skb)) {
+ /* Free the first SG entry now, since we already
+ * unmapped it and obtained the virtual address
+ */
+ free_pages((unsigned long)sg_vaddr, 0);
+
+ /* We still need to subtract the buffers used
+ * by this FD from our software counter
+ */
+ while (!dpaa2_sg_is_final(&sgt[i]) &&
+ i < DPAA2_ETH_MAX_SG_ENTRIES)
+ i++;
+ break;
+ }
+
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
+ skb_put(skb, sg_length);
+ } else {
+ /* Rest of the data buffers are stored as skb frags */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Offset in page (which may be compound).
+ * Data in subsequent SG entries is stored from the
+ * beginning of the buffer, so we don't need to add the
+ * sg_offset.
+ */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ sg_length, priv->rx_buf_size);
+ }
+
+ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
+ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
+ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
+ int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
+static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
+{
+ int retries = 0;
+ int err;
+
+ ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
+ if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ return;
+
+ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ ch->recycled_bufs,
+ ch->recycled_bufs_cnt)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
+ cpu_relax();
+ }
+
+ if (err) {
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ ch->buf_count -= ch->recycled_bufs_cnt;
+ }
+
+ ch->recycled_bufs_cnt = 0;
+}
+
+static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_eth_xdp_fds *xdp_fds)
+{
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ int num_fds, err, max_retries;
+ struct dpaa2_fd *fds;
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* try to enqueue all the FDs until the max number of retries is hit */
+ fds = xdp_fds->fds;
+ num_fds = xdp_fds->num;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued],
+ 0, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ percpu_extras->tx_portal_busy += ++retries;
+ continue;
+ }
+ total_enqueued += enqueued;
+ }
+ xdp_fds->num = 0;
+
+ return total_enqueued;
+}
+
+static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_fd *fds;
+ int enqueued, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ // enqueue the array of XDP_TX frames
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ fds = fq->xdp_tx_fds.fds;
+ for (i = 0; i < enqueued; i++) {
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ ch->stats.xdp_tx++;
+ }
+ for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ }
+ fq->xdp_tx_fds.num = 0;
+}
+
+static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
+{
+ struct dpaa2_faead *faead;
+ struct dpaa2_fd *dest_fd;
+ struct dpaa2_eth_fq *fq;
+ u32 ctrl, frc;
+
+ /* Mark the egress frame hardware annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+ /* Instruct hardware to release the FD buffer directly into
+ * the buffer pool once transmission is completed, instead of
+ * sending a Tx confirmation frame to us
+ */
+ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+ faead = dpaa2_get_faead(buf_start, false);
+ faead->ctrl = cpu_to_le32(ctrl);
+ faead->conf_fqid = 0;
+
+ fq = &priv->fq[queue_id];
+ dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
+ memcpy(dest_fd, fd, sizeof(*dest_fd));
+
+ if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
+ return;
+
+ dpaa2_eth_xdp_tx_flush(priv, ch, fq);
+}
+
+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 xdp_act = XDP_PASS;
+ int err, offset;
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+ dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ case XDP_REDIRECT:
+ dma_unmap_page(priv->net_dev->dev.parent, addr,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ ch->buf_count--;
+
+ /* Allow redirect use of full headroom */
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+
+ err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+ if (unlikely(err)) {
+ addr = dma_map_page(priv->net_dev->dev.parent,
+ virt_to_page(vaddr), 0,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ ch->buf_count++;
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ }
+ ch->stats.xdp_drop++;
+ } else {
+ ch->stats.xdp_redirect++;
+ }
+ break;
+ }
+
+ ch->xdp.res |= xdp_act;
+out:
+ return xdp_act;
+}
+
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb = NULL;
+ unsigned int skb_len;
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
+
+ skb = napi_alloc_skb(&ch->napi, skb_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
+ skb_put(skb, fd_length);
+
+ memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+
+ return skb;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ void *vaddr;
+ struct sk_buff *skb;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_fas *fas;
+ bool recycle_rx_buf = false;
+ void *buf_data;
+ u32 status = 0;
+ u32 xdp_act;
+
+ /* Tracing point */
+ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+ prefetch(buf_data);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ if (fd_format == dpaa2_fd_single) {
+ xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ skb = dpaa2_eth_copybreak(ch, fd, vaddr);
+ if (!skb) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else {
+ recycle_rx_buf = true;
+ }
+ } else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
+ free_pages((unsigned long)vaddr, 0);
+ percpu_extras->rx_sg_frames++;
+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
+ } else {
+ /* We don't support any other format */
+ goto err_frame_format;
+ }
+
+ if (unlikely(!skb))
+ goto err_build_skb;
+
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ status = le32_to_cpu(fas->status);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, fq->flowid);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
+
+ list_add_tail(&skb->list, ch->rx_list);
+
+ if (recycle_rx_buf)
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+ return;
+
+err_build_skb:
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+/* Processing of Rx frames received on the error FQ
+ * We check and print the error bits and then free the frame
+ */
+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq __always_unused)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_trap_item *trap_item;
+ struct dpaa2_fapr *fapr;
+ struct sk_buff *skb;
+ void *buf_data;
+ void *vaddr;
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+
+ if (fd_format == dpaa2_fd_single) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ /* We don't support any other format */
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
+ goto err_frame_format;
+ }
+
+ fapr = dpaa2_get_fapr(vaddr, false);
+ trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
+ if (trap_item)
+ devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
+ &priv->devlink_port, NULL);
+ consume_skb(skb);
+
+err_frame_format:
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_stats->rx_errors++;
+ ch->buf_count--;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq **src)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ struct dpaa2_eth_fq *fq = NULL;
+ struct dpaa2_dq *dq;
+ const struct dpaa2_fd *fd;
+ int cleaned = 0, retries = 0;
+ int is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ch->store, &is_last);
+ if (unlikely(!dq)) {
+ /* If we're here, we *must* have placed a
+ * volatile dequeue comnmand, so keep reading through
+ * the store until we get some sort of valid response
+ * token (either a valid frame or an "empty dequeue")
+ */
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
+ netdev_err_once(priv->net_dev,
+ "Unable to read a valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ fd = dpaa2_dq_fd(dq);
+ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+
+ fq->consume(priv, ch, fd, fq);
+ cleaned++;
+ retries = 0;
+ } while (!is_last);
+
+ if (!cleaned)
+ return 0;
+
+ fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
+ ch->stats.frames_per_cdan += cleaned;
+
+ /* A dequeue operation only pulls frames from a single queue
+ * into the store. Return the frame queue as an out param.
+ */
+ if (src)
+ *src = fq;
+
+ return cleaned;
+}
+
+static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
+ u8 *msgtype, u8 *twostep, u8 *udp,
+ u16 *correction_offset,
+ u16 *origintimestamp_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 ||
+ type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
+/* Configure the egress frame annotation for timestamp update */
+static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fd *fd,
+ void *buf_start,
+ struct sk_buff *skb)
+{
+ struct ptp_tstamp origin_timestamp;
+ u8 msgtype, twostep, udp;
+ struct dpaa2_faead *faead;
+ struct dpaa2_fas *fas;
+ struct timespec64 ts;
+ u16 offset1, offset2;
+ u32 ctrl, frc;
+ __le64 *ns;
+ u8 *data;
+
+ /* Mark the egress frame annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+
+ /* Set hardware annotation size */
+ ctrl = dpaa2_fd_get_ctrl(fd);
+ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
+
+ /* enable UPD (update prepanded data) bit in FAEAD field of
+ * hardware frame annotation area
+ */
+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+ faead = dpaa2_get_faead(buf_start, true);
+ faead->ctrl = cpu_to_le32(ctrl);
+
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep) {
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ return;
+ }
+
+ /* Mark the frame annotation status as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
+
+ /* Mark the PTP flag for one step timestamping */
+ fas = dpaa2_get_fas(buf_start, true);
+ fas->status = cpu_to_le32(DPAA2_FAS_PTP);
+
+ dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
+ ns = dpaa2_get_ts(buf_start, true);
+ *ns = cpu_to_le64(timespec64_to_ns(&ts) /
+ DPAA2_PTP_CLK_PERIOD_NS);
+
+ /* Update current time to PTP message originTimestamp field */
+ ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(origin_timestamp.sec_lsb);
+ *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
+
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
+
+ }
+}
+
+static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sgt_buf = NULL;
+ dma_addr_t addr;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct dpaa2_sg_entry *sgt;
+ int i, err;
+ int sgt_buf_size;
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+ * to go beyond nr_frags+1.
+ * Note: We don't support chained scatterlists
+ */
+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+ return -EINVAL;
+
+ scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (unlikely(!scl))
+ return -ENOMEM;
+
+ sg_init_table(scl, nr_frags + 1);
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+ if (unlikely(num_sg < 0)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Fill in the HW SGT structure.
+ *
+ * sgt_buf is zeroed out, so the following fields are implicit
+ * in all sgt entries:
+ * - offset is 0
+ * - format is 'dpaa2_sg_single'
+ */
+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+ }
+ dpaa2_sg_set_final(&sgt[i - 1], true);
+
+ /* Store the skb backpointer in the SGT buffer.
+ * Fit the scatterlist and the number of buffers alongside the
+ * skb backpointer in the software annotation area. We'll need
+ * all of them on Tx Conf.
+ */
+ *swa_addr = (void *)sgt_buf;
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SG;
+ swa->sg.skb = skb;
+ swa->sg.scl = scl;
+ swa->sg.num_sg = num_sg;
+ swa->sg.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto dma_map_single_failed;
+ }
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+dma_map_single_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+sgt_buf_alloc_failed:
+ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+dma_map_sg_failed:
+ kfree(scl);
+ return err;
+}
+
+/* Create a SG frame descriptor based on a linear skb.
+ *
+ * This function is used on the Tx path when the skb headroom is not large
+ * enough for the HW requirements, thus instead of realloc-ing the skb we
+ * create a SG frame descriptor with only one entry.
+ */
+static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t addr, sgt_addr;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+ int err;
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf))
+ return -ENOMEM;
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto data_map_failed;
+ }
+
+ /* Fill in the HW SGT structure */
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, skb->len);
+ dpaa2_sg_set_final(sgt, true);
+
+ /* Store the skb backpointer in the SGT buffer */
+ *swa_addr = (void *)sgt_buf;
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SINGLE;
+ swa->single.skb = skb;
+ swa->single.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ err = -ENOMEM;
+ goto sgt_map_failed;
+ }
+
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+sgt_map_failed:
+ dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
+data_map_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+
+ return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u8 *buffer_start, *aligned_start;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t addr;
+
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
+ else
+ return -ENOMEM;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ *swa_addr = (void *)buffer_start;
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+ swa->type = DPAA2_ETH_SWA_SINGLE;
+ swa->single.skb = skb;
+
+ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ */
+static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr, sg_addr;
+ struct sk_buff *skb = NULL;
+ unsigned char *buffer_start;
+ struct dpaa2_eth_swa *swa;
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ u32 fd_len = dpaa2_fd_get_len(fd);
+ struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ void *tso_hdr;
+ int i;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+
+ if (fd_format == dpaa2_fd_single) {
+ if (swa->type == DPAA2_ETH_SWA_SINGLE) {
+ skb = swa->single.skb;
+ /* Accessing the skb buffer is safe before dma unmap,
+ * because we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ } else {
+ WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
+ dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
+ DMA_BIDIRECTIONAL);
+ }
+ } else if (fd_format == dpaa2_fd_sg) {
+ if (swa->type == DPAA2_ETH_SWA_SG) {
+ skb = swa->sg.skb;
+
+ /* Unmap the scatterlist */
+ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+ DMA_BIDIRECTIONAL);
+ kfree(swa->sg.scl);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+ DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Unmap and free the header */
+ tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(tso_hdr);
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
+ } else {
+ skb = swa->single.skb;
+
+ /* Unmap the SGT Buffer */
+ dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+ sg_addr = dpaa2_sg_get_addr(sgt);
+ dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
+ }
+ } else {
+ netdev_dbg(priv->net_dev, "Invalid FD format\n");
+ return;
+ }
+
+ if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
+ }
+
+ if (swa->type == DPAA2_ETH_SWA_XDP) {
+ xdp_return_frame(swa->xdp.xdpf);
+ return;
+ }
+
+ /* Get the timestamp value */
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
+ }
+
+ /* Free SGT buffer allocated on tx */
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
+ }
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
+ }
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
+ }
+
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
+
+ return err;
+}
+
+static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
+ struct dpaa2_eth_fq *fq;
+ struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
+ u16 queue_mapping;
+ void *swa = NULL;
+ u8 prio = 0;
+ int err, i;
+ u32 fd_len;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
+
+ needed_headroom = dpaa2_eth_needed_headroom(skb);
+
+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ percpu_stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* Setup the FD fields */
+
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
+ } else if (skb_headroom(skb) < needed_headroom) {
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ percpu_extras->tx_converted_sg_frames++;
+ percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
+ } else {
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
+ }
+
+ if (unlikely(err)) {
+ percpu_stats->tx_dropped++;
+ goto err_build_fd;
+ }
+
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
+
+ /* Tracing point */
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
+
+ /* TxConf FQ selection relies on queue id from the stack.
+ * In case of a forwarded frame from another DPNI interface, we choose
+ * a queue affined to the same core that processed the Rx frame
+ */
+ queue_mapping = skb_get_queue_mapping(skb);
+
+ if (net_dev->num_tc) {
+ prio = netdev_txq_to_tc(net_dev, queue_mapping);
+ /* Hardware interprets priority level 0 as being the highest,
+ * so we need to do a reverse mapping to the netdev tc index
+ */
+ prio = net_dev->num_tc - prio - 1;
+ /* We have only one FQ array entry for all Tx hardware queues
+ * with the same flow id (but different priority levels)
+ */
+ queue_mapping %= dpaa2_eth_queue_count(priv);
+ }
+ fq = &priv->fq[queue_mapping];
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
+
+ /* Everything that happens after this enqueues might race with
+ * the Tx confirmation callback for this frame
+ */
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
+ }
+ percpu_extras->tx_portal_busy += retries;
+
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
+ dpaa2_eth_free_tx_fd(priv, fq, fd, false);
+ netdev_tx_completed_queue(nq, 1, fd_len);
+ } else {
+ percpu_stats->tx_packets += total_enqueued;
+ percpu_stats->tx_bytes += fd_len;
+ }
+
+ return NETDEV_TX_OK;
+
+err_build_fd:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
+ tx_onestep_tstamp);
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (!skb)
+ return;
+
+ /* Lock just before TX one-step timestamping packet,
+ * and release the lock in dpaa2_eth_free_tx_fd when
+ * confirm the packet has been sent on hardware, or
+ * when clean up during transmit failure.
+ */
+ mutex_lock(&priv->onestep_tstamp_lock);
+ __dpaa2_eth_tx(skb, priv->net_dev);
+ }
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 msgtype, twostep, udp;
+ u16 offset1, offset2;
+
+ /* Utilize skb->cb[0] for timestamping request per skb */
+ skb->cb[0] = 0;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
+ skb->cb[0] = TX_TSTAMP;
+ else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
+ }
+
+ /* TX for one-step timestamping PTP Sync packet */
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2))
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ queue_work(priv->dpaa2_ptp_wq,
+ &priv->tx_onestep_tstamp);
+ return NETDEV_TX_OK;
+ }
+ /* Use two-step timestamping if not one-step timestamping
+ * PTP Sync packet
+ */
+ skb->cb[0] = TX_TSTAMP;
+ }
+
+ /* TX for other packets */
+ return __dpaa2_eth_tx(skb, net_dev);
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_len = dpaa2_fd_get_len(fd);
+ u32 fd_errors;
+
+ /* Tracing point */
+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ percpu_extras->tx_conf_frames++;
+ percpu_extras->tx_conf_bytes += fd_len;
+ ch->stats.bytes_per_cdan += fd_len;
+
+ /* Check frame errors in the FD field */
+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+ dpaa2_eth_free_tx_fd(priv, fq, fd, true);
+
+ if (likely(!fd_errors))
+ return;
+
+ if (net_ratelimit())
+ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
+ fd_errors);
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
+}
+
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ struct page *page;
+ dma_addr_t addr;
+ int retries = 0;
+ int i, err;
+
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
+ * alignment padding
+ */
+ /* allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
+ goto err_alloc;
+
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ bpid);
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful */
+ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
+ cpu_relax();
+ }
+
+ /* If release command failed, clean up and bail out;
+ * not much else we can do about it
+ */
+ if (err) {
+ dpaa2_eth_free_bufs(priv, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ __free_pages(page, 0);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ int i, j;
+ int new_count;
+
+ for (j = 0; j < priv->num_channels; j++) {
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+ i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
+ priv->channel[j]->buf_count += new_count;
+
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ int retries = 0;
+ int ret;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
+ buf_array, count);
+ if (ret < 0) {
+ if (ret == -EBUSY &&
+ retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
+ continue;
+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+ return;
+ }
+ dpaa2_eth_free_bufs(priv, buf_array, ret);
+ retries = 0;
+ } while (ret);
+}
+
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, 1);
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->buf_count = 0;
+}
+
+/* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
+{
+ int new_count;
+
+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+ return 0;
+
+ do {
+ new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll try later on */
+ break;
+ }
+ ch->buf_count += new_count;
+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ u16 count;
+ int k, i;
+
+ for_each_possible_cpu(k) {
+ sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
+ count = sgt_cache->count;
+
+ for (i = 0; i < count; i++)
+ skb_free_frag(sgt_cache->buf[i]);
+ sgt_cache->count = 0;
+ }
+}
+
+static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
+{
+ int err;
+ int dequeues = -1;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
+ ch->store);
+ dequeues++;
+ cpu_relax();
+ } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
+
+ ch->stats.dequeue_portal_busy += dequeues;
+ if (unlikely(err))
+ ch->stats.pull_err++;
+
+ return err;
+}
+
+/* NAPI poll routine
+ *
+ * Frames are dequeued from the QMan channel associated with this NAPI context.
+ * Rx, Tx confirmation and (if configured) Rx error frames all count
+ * towards the NAPI budget.
+ */
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_eth_channel *ch;
+ struct dpaa2_eth_priv *priv;
+ int rx_cleaned = 0, txconf_cleaned = 0;
+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+ struct netdev_queue *nq;
+ int store_cleaned, work_done;
+ struct list_head rx_list;
+ int retries = 0;
+ u16 flowid;
+ int err;
+
+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
+ ch->xdp.res = 0;
+ priv = ch->priv;
+
+ INIT_LIST_HEAD(&rx_list);
+ ch->rx_list = &rx_list;
+
+ do {
+ err = dpaa2_eth_pull_channel(ch);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ dpaa2_eth_refill_pool(priv, ch, priv->bpid);
+
+ store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
+ if (store_cleaned <= 0)
+ break;
+ if (fq->type == DPAA2_RX_FQ) {
+ rx_cleaned += store_cleaned;
+ flowid = fq->flowid;
+ } else {
+ txconf_cleaned += store_cleaned;
+ /* We have a single Tx conf FQ on this channel */
+ txc_fq = fq;
+ }
+
+ /* If we either consumed the whole NAPI budget with Rx frames
+ * or we reached the Tx confirmations threshold, we're done.
+ */
+ if (rx_cleaned >= budget ||
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+ goto out;
+ }
+ } while (store_cleaned);
+
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
+ /* Update NET DIM with the values for this CDAN */
+ dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ ch->stats.bytes_per_cdan);
+ ch->stats.frames_per_cdan = 0;
+ ch->stats.bytes_per_cdan = 0;
+
+ /* We didn't consume the entire budget, so finish napi and
+ * re-enable data availability notifications
+ */
+ napi_complete_done(napi, rx_cleaned);
+ do {
+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+ ch->nctx.desired_cpu);
+
+ work_done = max(rx_cleaned, 1);
+
+out:
+ netif_receive_skb_list(ch->rx_list);
+
+ if (txc_fq && txc_fq->dq_frames) {
+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+ txc_fq->dq_bytes);
+ txc_fq->dq_frames = 0;
+ txc_fq->dq_bytes = 0;
+ }
+
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
+ dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
+
+ return work_done;
+}
+
+static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_enable(&ch->napi);
+ }
+}
+
+static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_disable(&ch->napi);
+ }
+}
+
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc)
+{
+ struct dpni_taildrop td = {0};
+ struct dpaa2_eth_fq *fq;
+ int i, err;
+
+ /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
+ * flow control is disabled (as it might interfere with either the
+ * buffer pool depletion trigger for pause frames or with the group
+ * congestion trigger for PFC frames)
+ */
+ td.enable = !tx_pause;
+ if (priv->rx_fqtd_enabled == td.enable)
+ goto set_cgtd;
+
+ td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
+ td.units = DPNI_CONGESTION_UNIT_BYTES;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_RX_FQ)
+ continue;
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ fq->tc, fq->flowid, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(FQ) failed\n");
+ return;
+ }
+ }
+
+ priv->rx_fqtd_enabled = td.enable;
+
+set_cgtd:
+ /* Congestion group taildrop: threshold is in frames, per group
+ * of FQs belonging to the same traffic class
+ * Enabled if general Tx pause disabled or if PFCs are enabled
+ * (congestion group threhsold for PFC generation is lower than the
+ * CG taildrop threshold, so it won't interfere with it; we also
+ * want frames in non-PFC enabled traffic classes to be kept in check)
+ */
+ td.enable = !tx_pause || pfc;
+ if (priv->rx_cgtd_enabled == td.enable)
+ return;
+
+ td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
+ i, 0, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(CG) failed\n");
+ return;
+ }
+ }
+
+ priv->rx_cgtd_enabled = td.enable;
+}
+
+static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_link_state state = {0};
+ bool tx_pause;
+ int err;
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (unlikely(err)) {
+ netdev_err(priv->net_dev,
+ "dpni_get_link_state() failed\n");
+ return err;
+ }
+
+ /* If Tx pause frame settings have changed, we need to update
+ * Rx FQ taildrop configuration as well. We configure taildrop
+ * only when pause frame generation is disabled.
+ */
+ tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
+
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (dpaa2_eth_is_type_phy(priv))
+ goto out;
+
+ /* Chech link state; speed / duplex changes are not treated yet */
+ if (priv->link_state.up == state.up)
+ goto out;
+
+ if (state.up) {
+ netif_carrier_on(priv->net_dev);
+ netif_tx_start_all_queues(priv->net_dev);
+ } else {
+ netif_tx_stop_all_queues(priv->net_dev);
+ netif_carrier_off(priv->net_dev);
+ }
+
+ netdev_info(priv->net_dev, "Link Event: state %s\n",
+ state.up ? "up" : "down");
+
+out:
+ priv->link_state = state;
+
+ return 0;
+}
+
+static int dpaa2_eth_open(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpaa2_eth_seed_pool(priv, priv->bpid);
+ if (err) {
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ priv->dpbp_dev->obj_desc.id, priv->bpid);
+ }
+
+ if (!dpaa2_eth_is_type_phy(priv)) {
+ /* We'll only start the txqs when the link is actually ready;
+ * make sure we don't race against the link up notification,
+ * which may come immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+
+ /* Also, explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+ }
+ dpaa2_eth_enable_ch_napi(priv);
+
+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
+ netdev_err(net_dev, "dpni_enable() failed\n");
+ goto enable_err;
+ }
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ dpaa2_mac_start(priv->mac);
+ phylink_start(priv->mac->phylink);
+ }
+
+ return 0;
+
+enable_err:
+ dpaa2_eth_disable_ch_napi(priv);
+ dpaa2_eth_drain_pool(priv);
+ return err;
+}
+
+/* Total number of in-flight frames on ingress queues */
+static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt = 0, bcnt = 0, total = 0;
+ int i, err;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "query_fq_count failed");
+ break;
+ }
+ total += fcnt;
+ }
+
+ return total;
+}
+
+static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+ int retries = 10;
+ u32 pending;
+
+ do {
+ pending = dpaa2_eth_ingress_fq_count(priv);
+ if (pending)
+ msleep(100);
+ } while (pending && --retries);
+}
+
+#define DPNI_TX_PENDING_VER_MAJOR 7
+#define DPNI_TX_PENDING_VER_MINOR 13
+static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+{
+ union dpni_statistics stats;
+ int retries = 10;
+ int err;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
+ DPNI_TX_PENDING_VER_MINOR) < 0)
+ goto out;
+
+ do {
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
+ &stats);
+ if (err)
+ goto out;
+ if (stats.page_6.tx_pending_frames == 0)
+ return;
+ } while (--retries);
+
+out:
+ msleep(500);
+}
+
+static int dpaa2_eth_stop(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int dpni_enabled = 0;
+ int retries = 10;
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
+ } else {
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+ }
+
+ /* On dpni_disable(), the MC firmware will:
+ * - stop MAC Rx and wait for all Rx frames to be enqueued to software
+ * - cut off WRIOP dequeues from egress FQs and wait until transmission
+ * of all in flight Tx frames is finished (and corresponding Tx conf
+ * frames are enqueued back to software)
+ *
+ * Before calling dpni_disable(), we wait for all Tx frames to arrive
+ * on WRIOP. After it finishes, wait until all remaining frames on Rx
+ * and Tx conf queues are consumed on NAPI poll.
+ */
+ dpaa2_eth_wait_for_egress_fq_empty(priv);
+
+ do {
+ dpni_disable(priv->mc_io, 0, priv->mc_token);
+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+ if (dpni_enabled)
+ /* Allow the hardware some slack */
+ msleep(100);
+ } while (dpni_enabled && --retries);
+ if (!retries) {
+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
+ /* Must go on and disable NAPI nonetheless, so we don't crash at
+ * the next "ifconfig up"
+ */
+ }
+
+ dpaa2_eth_wait_for_ingress_fq_empty(priv);
+ dpaa2_eth_disable_ch_napi(priv);
+
+ /* Empty the buffer pool */
+ dpaa2_eth_drain_pool(priv);
+
+ /* Empty the Scatter-Gather Buffer cache */
+ dpaa2_eth_sgt_cache_drain(priv);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/** Fill in counters maintained by the GPP driver. These may be different from
+ * the hardware counters obtained by ethtool.
+ */
+static void dpaa2_eth_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct rtnl_link_stats64 *percpu_stats;
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_possible_cpu(i) {
+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
+ cpustats = (u64 *)percpu_stats;
+ for (j = 0; j < num; j++)
+ netstats[j] += cpustats[j];
+ }
+}
+
+/* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netdev_for_each_uc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+ if (err)
+ netdev_warn(priv->net_dev,
+ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
+ ha->addr, err);
+ }
+}
+
+/* Copy mac multicast addresses from @net_dev to @priv
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netdev_for_each_mc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+ if (err)
+ netdev_warn(priv->net_dev,
+ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
+ ha->addr, err);
+ }
+}
+
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int uc_count = netdev_uc_count(net_dev);
+ int mc_count = netdev_mc_count(net_dev);
+ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
+ u32 options = priv->dpni_attrs.options;
+ u16 mc_token = priv->mc_token;
+ struct fsl_mc_io *mc_io = priv->mc_io;
+ int err;
+
+ /* Basic sanity checks; these probably indicate a misconfiguration */
+ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
+ netdev_info(net_dev,
+ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
+ max_mac);
+
+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
+ if (uc_count > max_mac) {
+ netdev_info(net_dev,
+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count, max_mac);
+ goto force_promisc;
+ }
+ if (mc_count + uc_count > max_mac) {
+ netdev_info(net_dev,
+ "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count + mc_count, max_mac);
+ goto force_mc_promisc;
+ }
+
+ /* Adjust promisc settings due to flag combinations */
+ if (net_dev->flags & IFF_PROMISC)
+ goto force_promisc;
+ if (net_dev->flags & IFF_ALLMULTI) {
+ /* First, rebuild unicast filtering table. This should be done
+ * in promisc mode, in order to avoid frame loss while we
+ * progressively add entries to the table.
+ * We don't know whether we had been in promisc already, and
+ * making an MC call to find out is expensive; so set uc promisc
+ * nonetheless.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set uc promisc\n");
+
+ /* Actual uc table reconstruction. */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear uc filters\n");
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
+
+ /* Finally, clear uc promisc and set mc promisc as requested. */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear uc promisc\n");
+ goto force_mc_promisc;
+ }
+
+ /* Neither unicast, nor multicast promisc will be on... eventually.
+ * For now, rebuild mac filtering tables while forcing both of them on.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
+ /* Actual mac filtering tables reconstruction */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't clear mac filters\n");
+ dpaa2_eth_add_mc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
+
+ /* Now we can clear both ucast and mcast promisc, without risking
+ * to drop legitimate frames anymore.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear ucast promisc\n");
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
+ return;
+
+force_promisc:
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set ucast promisc\n");
+force_mc_promisc:
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set mcast promisc\n");
+}
+
+static int dpaa2_eth_set_features(struct net_device *net_dev,
+ netdev_features_t features)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ netdev_features_t changed = features ^ net_dev->features;
+ bool enable;
+ int err;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ enable = !!(features & NETIF_F_RXCSUM);
+ err = dpaa2_eth_set_rx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ err = dpaa2_eth_set_tx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->tx_tstamp_type = config.tx_type;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ priv->rx_tstamp = false;
+ } else {
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+ int mfl, linear_mfl;
+
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
+ dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
+
+ if (mfl > linear_mfl) {
+ netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+ linear_mfl - VLAN_ETH_HLEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+ int mfl, err;
+
+ /* We enforce a maximum Rx frame length based on MTU only if we have
+ * an XDP program attached (in order to avoid Rx S/G frames).
+ * Otherwise, we accept all incoming frames as long as they are not
+ * larger than maximum size supported in hardware
+ */
+ if (has_xdp)
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ else
+ mfl = DPAA2_ETH_MFL;
+
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!priv->xdp_prog)
+ goto out;
+
+ if (!xdp_mtu_valid(priv, new_mtu))
+ return -EINVAL;
+
+ err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
+ if (err)
+ return err;
+
+out:
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+ struct dpni_buffer_layout buf_layout = {0};
+ int err;
+
+ err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+ return err;
+ }
+
+ /* Reserve extra headroom for XDP header size changes */
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+ (has_xdp ? XDP_PACKET_HEADROOM : 0);
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch;
+ struct bpf_prog *old;
+ bool up, need_update;
+ int i, err;
+
+ if (prog && !xdp_mtu_valid(priv, dev->mtu))
+ return -EINVAL;
+
+ if (prog)
+ bpf_prog_add(prog, priv->num_channels);
+
+ up = netif_running(dev);
+ need_update = (!!priv->xdp_prog != !!prog);
+
+ if (up)
+ dpaa2_eth_stop(dev);
+
+ /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+ * Also, when switching between xdp/non-xdp modes we need to reconfigure
+ * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+ * so we are sure no old format buffers will be used from now on.
+ */
+ if (need_update) {
+ err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
+ if (err)
+ goto out_err;
+ err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
+ if (err)
+ goto out_err;
+ }
+
+ old = xchg(&priv->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ old = xchg(&ch->xdp.prog, prog);
+ if (old)
+ bpf_prog_put(old);
+ }
+
+ if (up) {
+ err = dpaa2_eth_open(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+out_err:
+ if (prog)
+ bpf_prog_sub(prog, priv->num_channels);
+ if (up)
+ dpaa2_eth_open(dev);
+
+ return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return dpaa2_eth_setup_xdp(dev, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
+ struct xdp_frame *xdpf,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = net_dev->dev.parent;
+ unsigned int needed_headroom;
+ struct dpaa2_eth_swa *swa;
+ void *buffer_start, *aligned_start;
+ dma_addr_t addr;
+
+ /* We require a minimum headroom to be able to transmit the frame.
+ * Otherwise return an error and let the original net_device handle it
+ */
+ needed_headroom = dpaa2_eth_needed_headroom(NULL);
+ if (xdpf->headroom < needed_headroom)
+ return -EINVAL;
+
+ /* Setup the FD fields */
+ memset(fd, 0, sizeof(*fd));
+
+ /* Align FD address, if possible */
+ buffer_start = xdpf->data - needed_headroom;
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= xdpf->data - xdpf->headroom)
+ buffer_start = aligned_start;
+
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+ /* fill in necessary fields here */
+ swa->type = DPAA2_ETH_SWA_XDP;
+ swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
+ swa->xdp.xdpf = xdpf;
+
+ addr = dma_map_single(dev, buffer_start,
+ swa->xdp.dma_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
+ dpaa2_fd_set_len(fd, xdpf->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+}
+
+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int enqueued, i, err;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!netif_running(net_dev))
+ return -ENETDOWN;
+
+ fq = &priv->fq[smp_processor_id()];
+ xdp_redirect_fds = &fq->xdp_redirect_fds;
+ fds = xdp_redirect_fds->fds;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ /* create a FD for each xdp_frame in the list received */
+ for (i = 0; i < n; i++) {
+ err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
+ if (err)
+ break;
+ }
+ xdp_redirect_fds->num = i;
+
+ /* enqueue all the frame descriptors */
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ for (i = 0; i < enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+
+ return enqueued;
+}
+
+static int update_xps(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct cpumask xps_mask;
+ struct dpaa2_eth_fq *fq;
+ int i, num_queues, netdev_queues;
+ int err = 0;
+
+ num_queues = dpaa2_eth_queue_count(priv);
+ netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+
+ /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
+ * queues, so only process those
+ */
+ for (i = 0; i < netdev_queues; i++) {
+ fq = &priv->fq[i % num_queues];
+
+ cpumask_clear(&xps_mask);
+ cpumask_set_cpu(fq->target_cpu, &xps_mask);
+
+ err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ if (err) {
+ netdev_warn_once(net_dev, "Error setting XPS queue\n");
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
+ struct tc_mqprio_qopt *mqprio)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 num_tc, num_queues;
+ int i;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_queues = dpaa2_eth_queue_count(priv);
+ num_tc = mqprio->num_tc;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ if (num_tc > dpaa2_eth_tc_count(priv)) {
+ netdev_err(net_dev, "Max %d traffic classes supported\n",
+ dpaa2_eth_tc_count(priv));
+ return -EOPNOTSUPP;
+ }
+
+ if (!num_tc) {
+ netdev_reset_tc(net_dev);
+ netif_set_real_num_tx_queues(net_dev, num_queues);
+ goto out;
+ }
+
+ netdev_set_num_tc(net_dev, num_tc);
+ netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
+
+out:
+ update_xps(priv);
+
+ return 0;
+}
+
+#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
+
+static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
+{
+ struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
+ struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
+ int err;
+
+ if (p->command == TC_TBF_STATS)
+ return -EOPNOTSUPP;
+
+ /* Only per port Tx shaping */
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_TBF_REPLACE) {
+ if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
+ netdev_err(net_dev, "burst size cannot be greater than %d\n",
+ DPAA2_ETH_MAX_BURST_SIZE);
+ return -EINVAL;
+ }
+
+ tx_cr_shaper.max_burst_size = cfg->max_size;
+ /* The TBF interface is in bytes/s, whereas DPAA2 expects the
+ * rate in Mbits/s
+ */
+ tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
+ }
+
+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
+ &tx_er_shaper, 0);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return dpaa2_eth_setup_mqprio(net_dev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return dpaa2_eth_setup_tbf(net_dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops dpaa2_eth_ops = {
+ .ndo_open = dpaa2_eth_open,
+ .ndo_start_xmit = dpaa2_eth_tx,
+ .ndo_stop = dpaa2_eth_stop,
+ .ndo_set_mac_address = dpaa2_eth_set_addr,
+ .ndo_get_stats64 = dpaa2_eth_get_stats,
+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
+ .ndo_set_features = dpaa2_eth_set_features,
+ .ndo_eth_ioctl = dpaa2_eth_ioctl,
+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_bpf = dpaa2_eth_xdp,
+ .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+ .ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+};
+
+static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_eth_channel *ch;
+
+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
+
+ /* Update NAPI statistics */
+ ch->stats.cdan++;
+
+ napi_schedule(&ch->napi);
+}
+
+/* Allocate and configure a DPCON object */
+static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpcon;
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
+ FSL_MC_POOL_DPCON, &dpcon);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return ERR_PTR(err);
+ }
+
+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_open() failed\n");
+ goto free;
+ }
+
+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_reset() failed\n");
+ goto close;
+ }
+
+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_enable() failed\n");
+ goto close;
+ }
+
+ return dpcon;
+
+close:
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+free:
+ fsl_mc_object_free(dpcon);
+
+ return ERR_PTR(err);
+}
+
+static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
+ struct fsl_mc_device *dpcon)
+{
+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+ fsl_mc_object_free(dpcon);
+}
+
+static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_attr attr;
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->dpcon = dpaa2_eth_setup_dpcon(priv);
+ if (IS_ERR(channel->dpcon)) {
+ err = PTR_ERR(channel->dpcon);
+ goto err_setup;
+ }
+
+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
+ &attr);
+ if (err) {
+ dev_err(dev, "dpcon_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ channel->dpcon_id = attr.id;
+ channel->ch_id = attr.qbman_ch_id;
+ channel->priv = priv;
+
+ return channel;
+
+err_get_attr:
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
+err_setup:
+ kfree(channel);
+ return ERR_PTR(err);
+}
+
+static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
+{
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
+ kfree(channel);
+}
+
+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
+ * and register data availability notifications
+ */
+static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_notification_cfg dpcon_notif_cfg;
+ struct device *dev = priv->net_dev->dev.parent;
+ int i, err;
+
+ /* We want the ability to spread ingress traffic (RX, TX conf) to as
+ * many cores as possible, so we need one channel for each core
+ * (unless there's fewer queues than cores, in which case the extra
+ * channels would be wasted).
+ * Allocate one channel per core and register it to the core's
+ * affine DPIO. If not enough channels are available for all cores
+ * or if some cores don't have an affine DPIO, there will be no
+ * ingress frame processing on those cores.
+ */
+ cpumask_clear(&priv->dpio_cpumask);
+ for_each_online_cpu(i) {
+ /* Try to allocate a channel */
+ channel = dpaa2_eth_alloc_channel(priv);
+ if (IS_ERR_OR_NULL(channel)) {
+ err = PTR_ERR_OR_ZERO(channel);
+ if (err != -EPROBE_DEFER)
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
+ goto err_alloc_ch;
+ }
+
+ priv->channel[priv->num_channels] = channel;
+
+ nctx = &channel->nctx;
+ nctx->is_cdan = 1;
+ nctx->cb = dpaa2_eth_cdan_cb;
+ nctx->id = channel->ch_id;
+ nctx->desired_cpu = i;
+
+ /* Register the new context */
+ channel->dpio = dpaa2_io_service_select(i);
+ err = dpaa2_io_service_register(channel->dpio, nctx, dev);
+ if (err) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
+ /* If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err_service_reg;
+ }
+
+ /* Register DPCON notification with MC */
+ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
+ dpcon_notif_cfg.priority = 0;
+ dpcon_notif_cfg.user_ctx = nctx->qman64;
+ err = dpcon_set_notification(priv->mc_io, 0,
+ channel->dpcon->mc_handle,
+ &dpcon_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpcon_set_notification failed()\n");
+ goto err_set_cdan;
+ }
+
+ /* If we managed to allocate a channel and also found an affine
+ * DPIO for this core, add it to the final mask
+ */
+ cpumask_set_cpu(i, &priv->dpio_cpumask);
+ priv->num_channels++;
+
+ /* Stop if we already have enough channels to accommodate all
+ * RX and TX conf queues
+ */
+ if (priv->num_channels == priv->dpni_attrs.num_queues)
+ break;
+ }
+
+ return 0;
+
+err_set_cdan:
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+err_service_reg:
+ dpaa2_eth_free_channel(priv, channel);
+err_alloc_ch:
+ if (err == -EPROBE_DEFER) {
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+ nctx = &channel->nctx;
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
+ dpaa2_eth_free_channel(priv, channel);
+ }
+ priv->num_channels = 0;
+ return err;
+ }
+
+ if (cpumask_empty(&priv->dpio_cpumask)) {
+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
+ cpumask_pr_args(&priv->dpio_cpumask));
+
+ return 0;
+}
+
+static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ /* deregister CDAN notifications and free channels */
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
+ dpaa2_eth_free_channel(priv, ch);
+ }
+}
+
+static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
+ int cpu)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->channel[i]->nctx.desired_cpu == cpu)
+ return priv->channel[i];
+
+ /* We should never get here. Issue a warning and return
+ * the first channel, because it's still better than nothing
+ */
+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
+ return priv->channel[0];
+}
+
+static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_fq *fq;
+ int rx_cpu, txc_cpu;
+ int i;
+
+ /* For each FQ, pick one channel/CPU to deliver frames to.
+ * This may well change at runtime, either through irqbalance or
+ * through direct user intervention.
+ */
+ rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ case DPAA2_RX_ERR_FQ:
+ fq->target_cpu = rx_cpu;
+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
+ if (rx_cpu >= nr_cpu_ids)
+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ case DPAA2_TX_CONF_FQ:
+ fq->target_cpu = txc_cpu;
+ txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
+ if (txc_cpu >= nr_cpu_ids)
+ txc_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ default:
+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
+ }
+ fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
+ }
+
+ update_xps(priv);
+}
+
+static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
+{
+ int i, j;
+
+ /* We have one TxConf FQ per Tx flow.
+ * The number of Tx and Rx queues is the same.
+ * Tx queues come first in the fq array.
+ */
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
+
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs].tc = (u8)j;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
+ }
+
+ /* We have exactly one Rx error queue per DPNI */
+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
+
+ /* For each FQ, decide on which core to process incoming frames */
+ dpaa2_eth_set_fq_affinity(priv);
+}
+
+/* Allocate and configure one buffer pool for each interface */
+static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
+{
+ int err;
+ struct fsl_mc_device *dpbp_dev;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpbp_attr dpbp_attrs;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+
+ priv->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
+ &dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+ priv->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+
+ return err;
+}
+
+static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
+{
+ dpaa2_eth_drain_pool(priv);
+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ fsl_mc_object_free(priv->dpbp_dev);
+}
+
+static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_buffer_layout buf_layout = {0};
+ u16 rx_buf_align;
+ int err;
+
+ /* We need to check for WRIOP version 1.0.0, but depending on the MC
+ * version, this number is not always provided correctly on rev1.
+ * We need to check for both alternatives in this situation.
+ */
+ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ else
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
+ /* We need to ensure that the buffer size seen by WRIOP is a multiple
+ * of 64 or 256 bytes depending on the WRIOP version.
+ */
+ priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
+
+ /* tx buffer */
+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+ buf_layout.pass_timestamp = true;
+ buf_layout.pass_frame_status = true;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
+ return err;
+ }
+
+ /* tx-confirm buffer */
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
+ return err;
+ }
+
+ /* Now that we've set our tx buffer layout, retrieve the minimum
+ * required tx data offset.
+ */
+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
+ &priv->tx_data_offset);
+ if (err) {
+ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
+ return err;
+ }
+
+ if ((priv->tx_data_offset % 64) != 0)
+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
+ priv->tx_data_offset);
+
+ /* rx buffer */
+ buf_layout.pass_frame_status = true;
+ buf_layout.pass_parser_result = true;
+ buf_layout.data_align = rx_buf_align;
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
+ buf_layout.private_data_size = 0;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
+#define DPNI_ENQUEUE_FQID_VER_MINOR 9
+
+static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames __always_unused,
+ int *frames_enqueued)
+{
+ int err;
+
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+ if (!err && frames_enqueued)
+ *frames_enqueued = 1;
+ return err;
+}
+
+static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio, u32 num_frames,
+ int *frames_enqueued)
+{
+ int err;
+
+ err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
+ fq->tx_fqid[prio],
+ fd, num_frames);
+
+ if (err == 0)
+ return -EBUSY;
+
+ if (frames_enqueued)
+ *frames_enqueued = err;
+ return 0;
+}
+
+static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
+{
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+ else
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
+}
+
+static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_link_cfg link_cfg = {0};
+ int err;
+
+ /* Get the default link options so we don't override other flags */
+ err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_get_link_cfg() failed\n");
+ return err;
+ }
+
+ /* By default, enable both Rx and Tx pause frames */
+ link_cfg.options |= DPNI_LINK_OPT_PAUSE;
+ link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_link_cfg() failed\n");
+ return err;
+ }
+
+ priv->link_state.options = link_cfg.options;
+
+ return 0;
+}
+
+static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_queue_id qid = {0};
+ struct dpaa2_eth_fq *fq;
+ struct dpni_queue queue;
+ int i, j, err;
+
+ /* We only use Tx FQIDs for FQID-based enqueue, so check
+ * if DPNI version supports it before updating FQIDs
+ */
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ return;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_TX_CONF_FQ)
+ continue;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, j, fq->flowid,
+ &queue, &qid);
+ if (err)
+ goto out_err;
+
+ fq->tx_fqid[j] = qid.fqid;
+ if (fq->tx_fqid[j] == 0)
+ goto out_err;
+ }
+ }
+
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
+
+ return;
+
+out_err:
+ netdev_info(priv->net_dev,
+ "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
+/* Configure ingress classification based on VLAN PCP */
+static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpkg_profile_cfg kg_cfg = {0};
+ struct dpni_qos_tbl_cfg qos_cfg = {0};
+ struct dpni_rule_cfg key_params;
+ void *dma_mem, *key, *mask;
+ u8 key_size = 2; /* VLAN TCI field */
+ int i, pcp, err;
+
+ /* VLAN-based classification only makes sense if we have multiple
+ * traffic classes.
+ * Also, we need to extract just the 3-bit PCP field from the VLAN
+ * header and we can only do that by using a mask
+ */
+ if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
+ dev_dbg(dev, "VLAN-based QoS classification not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ kg_cfg.num_extracts = 1;
+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
+ err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg failed\n");
+ goto out_free_tbl;
+ }
+
+ /* set QoS table */
+ qos_cfg.default_tc = 0;
+ qos_cfg.discard_on_miss = 0;
+ qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
+ DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
+ dev_err(dev, "QoS table DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_tbl;
+ }
+
+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_qos_table failed\n");
+ goto out_unmap_tbl;
+ }
+
+ /* Add QoS table entries */
+ key = kzalloc(key_size * 2, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out_unmap_tbl;
+ }
+ mask = key + key_size;
+ *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
+
+ key_params.key_iova = dma_map_single(dev, key, key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.key_iova)) {
+ dev_err(dev, "Qos table entry DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_params.mask_iova = key_params.key_iova + key_size;
+ key_params.key_size = key_size;
+
+ /* We add rules for PCP-based distribution starting with highest
+ * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
+ * classes to accommodate all priority levels, the lowest ones end up
+ * on TC 0 which was configured as default
+ */
+ for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
+ *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
+ dma_sync_single_for_device(dev, key_params.key_iova,
+ key_size * 2, DMA_TO_DEVICE);
+
+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+ &key_params, i, i);
+ if (err) {
+ dev_err(dev, "dpni_add_qos_entry failed\n");
+ dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
+ goto out_unmap_key;
+ }
+ }
+
+ priv->vlan_cls_enabled = true;
+
+ /* Table and key memory is not persistent, clean everything up after
+ * configuration is finished
+ */
+out_unmap_key:
+ dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
+out_free_key:
+ kfree(key);
+out_unmap_tbl:
+ dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+out_free_tbl:
+ kfree(dma_mem);
+
+ return err;
+}
+
+/* Configure the DPNI object this interface is associated with */
+static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+ struct net_device *net_dev;
+ int err;
+
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ /* get a handle for the DPNI object */
+ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
+ return err;
+ }
+
+ /* Check if we can work with this DPNI object */
+ err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
+ &priv->dpni_ver_minor);
+ if (err) {
+ dev_err(dev, "dpni_get_api_version() failed\n");
+ goto close;
+ }
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
+ dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
+ priv->dpni_ver_major, priv->dpni_ver_minor,
+ DPNI_VER_MAJOR, DPNI_VER_MINOR);
+ err = -ENOTSUPP;
+ goto close;
+ }
+
+ ls_dev->mc_io = priv->mc_io;
+ ls_dev->mc_handle = priv->mc_token;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_reset() failed\n");
+ goto close;
+ }
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+ goto close;
+ }
+
+ err = dpaa2_eth_set_buffer_layout(priv);
+ if (err)
+ goto close;
+
+ dpaa2_eth_set_enqueue_mode(priv);
+
+ /* Enable pause frame support */
+ if (dpaa2_eth_has_pause_support(priv)) {
+ err = dpaa2_eth_set_pause(priv);
+ if (err)
+ goto close;
+ }
+
+ err = dpaa2_eth_set_vlan_qos(priv);
+ if (err && err != -EOPNOTSUPP)
+ goto close;
+
+ priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
+ sizeof(struct dpaa2_eth_cls_rule),
+ GFP_KERNEL);
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
+ goto close;
+ }
+
+ return 0;
+
+close:
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+
+ return err;
+}
+
+static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
+{
+ int err;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err)
+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
+ err);
+
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+}
+
+static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue queue;
+ struct dpni_queue_id qid;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(RX) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 1;
+ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ &queue);
+ if (err) {
+ dev_err(dev, "dpni_set_queue(RX) failed\n");
+ return err;
+ }
+
+ /* xdp_rxq setup */
+ /* only once for each channel */
+ if (fq->tc > 0)
+ return 0;
+
+ err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
+ fq->flowid, 0);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg failed\n");
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue queue;
+ struct dpni_queue_id qid;
+ int i, err;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, i, fq->flowid,
+ &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX) failed\n");
+ return err;
+ }
+ fq->tx_fqid[i] = qid.fqid;
+ }
+
+ /* All Tx queues belonging to the same flowid have the same qdbin */
+ fq->tx_qdbin = qid.qdbin;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 0;
+ queue.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ &queue);
+ if (err) {
+ dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue q = { { 0 } };
+ struct dpni_queue_id qid;
+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ q.destination.id = fq->channel->dpcon_id;
+ q.destination.type = DPNI_DEST_DPCON;
+ q.destination.priority = 1;
+ q.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
+ if (err) {
+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/* Supported header fields for Rx hash distribution key */
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
+ {
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
+ .size = 6,
+ }, {
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
+ .size = 6,
+ }, {
+ /* This is the last ethertype field parsed:
+ * depending on frame format, it can be the MAC ethertype
+ * or the VLAN etype.
+ */
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
+ .size = 2,
+ }, {
+ /* VLAN header */
+ .rxnfc_field = RXH_VLAN,
+ .cls_prot = NET_PROT_VLAN,
+ .cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
+ .size = 2,
+ }, {
+ /* IP header */
+ .rxnfc_field = RXH_IP_SRC,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_IP_DST,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_L3_PROTO,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
+ .size = 1,
+ }, {
+ /* Using UDP ports, this is functionally equivalent to raw
+ * byte pairs from L4 header.
+ */
+ .rxnfc_field = RXH_L4_B_0_1,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
+ .size = 2,
+ }, {
+ .rxnfc_field = RXH_L4_B_2_3,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
+ .size = 2,
+ },
+};
+
+/* Configure the Rx hash key using the legacy API */
+static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_tc_dist_cfg dist_cfg;
+ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
+ i, &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ break;
+ }
+ }
+
+ return err;
+}
+
+/* Configure the Rx hash key using the new API */
+static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ break;
+ }
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
+ }
+
+ return err;
+}
+
+/* Configure the Rx flow classification key */
+static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ break;
+ }
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
+ }
+
+ return err;
+}
+
+/* Size of the Rx flow classification key */
+int dpaa2_eth_cls_key_size(u64 fields)
+{
+ int i, size = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (!(fields & dist_fields[i].id))
+ continue;
+ size += dist_fields[i].size;
+ }
+
+ return size;
+}
+
+/* Offset of header field in Rx classification key */
+int dpaa2_eth_cls_fld_off(int prot, int field)
+{
+ int i, off = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (dist_fields[i].cls_prot == prot &&
+ dist_fields[i].cls_field == field)
+ return off;
+ off += dist_fields[i].size;
+ }
+
+ WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
+ return 0;
+}
+
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+ int off = 0, new_off = 0;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ size = dist_fields[i].size;
+ if (dist_fields[i].id & fields) {
+ memcpy(key_mem + new_off, key_mem + off, size);
+ new_off += size;
+ }
+ off += size;
+ }
+}
+
+/* Set Rx distribution (hash or flow classification) key
+ * flags is a combination of RXH_ bits
+ */
+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+ enum dpaa2_eth_rx_dist type, u64 flags)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpkg_profile_cfg cls_cfg;
+ u32 rx_hash_fields = 0;
+ dma_addr_t key_iova;
+ u8 *dma_mem;
+ int i;
+ int err = 0;
+
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ struct dpkg_extract *key =
+ &cls_cfg.extracts[cls_cfg.num_extracts];
+
+ /* For both Rx hashing and classification keys
+ * we set only the selected fields.
+ */
+ if (!(flags & dist_fields[i].id))
+ continue;
+ if (type == DPAA2_ETH_RX_DIST_HASH)
+ rx_hash_fields |= dist_fields[i].rxnfc_field;
+
+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ dev_err(dev, "error adding key extraction rule, too many rules?\n");
+ return -E2BIG;
+ }
+
+ key->type = DPKG_EXTRACT_FROM_HDR;
+ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
+ key->extract.from_hdr.field = dist_fields[i].cls_field;
+ cls_cfg.num_extracts++;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
+ goto free_key;
+ }
+
+ /* Prepare for setting the rx dist */
+ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
+ dev_err(dev, "DMA mapping failed\n");
+ err = -ENOMEM;
+ goto free_key;
+ }
+
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (dpaa2_eth_has_legacy_dist(priv))
+ err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
+ else
+ err = dpaa2_eth_config_hash_key(priv, key_iova);
+ } else {
+ err = dpaa2_eth_config_cls_key(priv, key_iova);
+ }
+
+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
+ priv->rx_hash_fields = rx_hash_fields;
+
+free_key:
+ kfree(dma_mem);
+ return err;
+}
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 key = 0;
+ int i;
+
+ if (!dpaa2_eth_hash_enabled(priv))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ if (dist_fields[i].rxnfc_field & flags)
+ key |= dist_fields[i].id;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ /* Check if we actually support Rx flow classification */
+ if (dpaa2_eth_has_legacy_dist(priv)) {
+ dev_dbg(dev, "Rx cls not supported by current MC version\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!dpaa2_eth_fs_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!dpaa2_eth_hash_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
+
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
+ return err;
+
+out:
+ priv->rx_cls_enabled = 1;
+
+ return 0;
+}
+
+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+ * frame queues and channels
+ */
+static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_pools_cfg pools_params;
+ struct dpni_error_cfg err_cfg;
+ int err = 0;
+ int i;
+
+ pools_params.num_dpbp = 1;
+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].backup_pool = 0;
+ pools_params.pools[0].buffer_size = priv->rx_buf_size;
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ dev_err(dev, "dpni_set_pools() failed\n");
+ return err;
+ }
+
+ /* have the interface implicitly distribute traffic based on
+ * the default hash key
+ */
+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
+ if (err && err != -EOPNOTSUPP)
+ dev_err(dev, "Failed to configure hashing\n");
+
+ /* Configure the flow classification key; it includes all
+ * supported header fields and cannot be modified at runtime
+ */
+ err = dpaa2_eth_set_default_cls(priv);
+ if (err && err != -EOPNOTSUPP)
+ dev_err(dev, "Failed to configure Rx classification key\n");
+
+ /* Configure handling of error frames */
+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
+ &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ /* Configure Rx and Tx conf queues to generate CDANs */
+ for (i = 0; i < priv->num_fqs; i++) {
+ switch (priv->fq[i].type) {
+ case DPAA2_RX_FQ:
+ err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_TX_CONF_FQ:
+ err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_RX_ERR_FQ:
+ err = setup_rx_err_flow(priv, &priv->fq[i]);
+ break;
+ default:
+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
+ return -EINVAL;
+ }
+ if (err)
+ return err;
+ }
+
+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &priv->tx_qdid);
+ if (err) {
+ dev_err(dev, "dpni_get_qdid() failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Allocate rings for storing incoming frame descriptors */
+static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ priv->channel[i]->store =
+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
+ if (!priv->channel[i]->store) {
+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
+ goto err_ring;
+ }
+ }
+
+ return 0;
+
+err_ring:
+ for (i = 0; i < priv->num_channels; i++) {
+ if (!priv->channel[i]->store)
+ break;
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+ }
+
+ return -ENOMEM;
+}
+
+static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++)
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
+static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
+ int err;
+
+ /* Get firmware address, if any */
+ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Get DPNI attributes address, if any */
+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ dpni_mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ /* If the DPMAC addr != DPNI addr, update it */
+ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
+ priv->mc_token,
+ mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
+ }
+ eth_hw_addr_set(net_dev, mac_addr);
+ } else if (is_zero_ether_addr(dpni_mac_addr)) {
+ /* No MAC address configured, fill in net_dev->dev_addr
+ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
+ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ } else {
+ /* NET_ADDR_PERM is default, all we have to do is
+ * fill in the device addr.
+ */
+ eth_hw_addr_set(net_dev, dpni_mac_addr);
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_netdev_init(struct net_device *net_dev)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u32 options = priv->dpni_attrs.options;
+ u64 supported = 0, not_supported = 0;
+ u8 bcast_addr[ETH_ALEN];
+ u8 num_queues;
+ int err;
+
+ net_dev->netdev_ops = &dpaa2_eth_ops;
+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+
+ err = dpaa2_eth_set_mac_addr(priv);
+ if (err)
+ return err;
+
+ /* Explicitly add the broadcast address to the MAC filtering table */
+ eth_broadcast_addr(bcast_addr);
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
+ if (err) {
+ dev_err(dev, "dpni_add_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Set MTU upper limit; lower limit is 68B (default value) */
+ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+ DPAA2_ETH_MFL);
+ if (err) {
+ dev_err(dev, "dpni_set_max_frame_length() failed\n");
+ return err;
+ }
+
+ /* Set actual number of queues in the net device */
+ num_queues = dpaa2_eth_queue_count(priv);
+ err = netif_set_real_num_tx_queues(net_dev, num_queues);
+ if (err) {
+ dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
+ return err;
+ }
+ err = netif_set_real_num_rx_queues(net_dev, num_queues);
+ if (err) {
+ dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
+ return err;
+ }
+
+ dpaa2_eth_detect_features(priv);
+
+ /* Capabilities listing */
+ supported |= IFF_LIVE_ADDR_CHANGE;
+
+ if (options & DPNI_OPT_NO_MAC_FILTER)
+ not_supported |= IFF_UNICAST_FLT;
+ else
+ supported |= IFF_UNICAST_FLT;
+
+ net_dev->priv_flags |= supported;
+ net_dev->priv_flags &= ~not_supported;
+
+ /* Features */
+ net_dev->features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
+ net_dev->hw_features = net_dev->features;
+
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int dpaa2_eth_poll_link_state(void *arg)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+ int err;
+
+ while (!kthread_should_stop()) {
+ err = dpaa2_eth_link_state_update(priv);
+ if (unlikely(err))
+ return err;
+
+ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
+
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = priv->mc_io;
+ mac->net_dev = priv->net_dev;
+
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+ priv->mac = mac;
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err && err != -EPROBE_DEFER)
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
+ ERR_PTR(err));
+ if (err)
+ goto err_close_mac;
+ }
+
+ return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
+}
+
+static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
+{
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
+ return;
+
+ dpaa2_mac_close(priv->mac);
+ kfree(priv->mac);
+ priv->mac = NULL;
+}
+
+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
+{
+ u32 status = ~0;
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
+ struct net_device *net_dev = dev_get_drvdata(dev);
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+ DPNI_IRQ_INDEX, &status);
+ if (unlikely(err)) {
+ netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
+ return IRQ_HANDLED;
+ }
+
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
+ dpaa2_eth_link_state_update(netdev_priv(net_dev));
+
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
+ dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
+ dpaa2_eth_update_tx_fqids(priv);
+
+ rtnl_lock();
+ if (dpaa2_eth_has_mac(priv))
+ dpaa2_eth_disconnect_mac(priv);
+ else
+ dpaa2_eth_connect_mac(priv);
+ rtnl_unlock();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
+{
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+
+ err = fsl_mc_allocate_irqs(ls_dev);
+ if (err) {
+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ irq = ls_dev->irqs[0];
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
+ NULL, dpni_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&ls_dev->dev), &ls_dev->dev);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_mc_irq;
+ }
+
+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+ DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
+free_mc_irq:
+ fsl_mc_free_irqs(ls_dev);
+
+ return err;
+}
+
+static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
+ }
+}
+
+static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ netif_napi_del(&ch->napi);
+ }
+}
+
+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+{
+ struct device *dev;
+ struct net_device *net_dev = NULL;
+ struct dpaa2_eth_priv *priv = NULL;
+ int err = 0;
+
+ dev = &dpni_dev->dev;
+
+ /* Net device */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
+ priv->rx_tstamp = false;
+
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ if (!priv->dpaa2_ptp_wq) {
+ err = -ENOMEM;
+ goto err_wq_alloc;
+ }
+
+ INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
+ mutex_init(&priv->onestep_tstamp_lock);
+ skb_queue_head_init(&priv->tx_skbs);
+
+ priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_portal_alloc;
+ }
+
+ /* MC objects initialization and configuration */
+ err = dpaa2_eth_setup_dpni(dpni_dev);
+ if (err)
+ goto err_dpni_setup;
+
+ err = dpaa2_eth_setup_dpio(priv);
+ if (err)
+ goto err_dpio_setup;
+
+ dpaa2_eth_setup_fqs(priv);
+
+ err = dpaa2_eth_setup_dpbp(priv);
+ if (err)
+ goto err_dpbp_setup;
+
+ err = dpaa2_eth_bind_dpni(priv);
+ if (err)
+ goto err_bind;
+
+ /* Add a NAPI context for each channel */
+ dpaa2_eth_add_ch_napi(priv);
+
+ /* Percpu statistics */
+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
+ if (!priv->percpu_stats) {
+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_percpu_stats;
+ }
+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
+ if (!priv->percpu_extras) {
+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_percpu_extras;
+ }
+
+ priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
+ if (!priv->sgt_cache) {
+ dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_sgt_cache;
+ }
+
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
+ err = dpaa2_eth_netdev_init(net_dev);
+ if (err)
+ goto err_netdev_init;
+
+ /* Configure checksum offload based on current interface flags */
+ err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ if (err)
+ goto err_csum;
+
+ err = dpaa2_eth_set_tx_csum(priv,
+ !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ if (err)
+ goto err_csum;
+
+ err = dpaa2_eth_alloc_rings(priv);
+ if (err)
+ goto err_alloc_rings;
+
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
+ } else {
+ dev_dbg(dev, "PFC not supported\n");
+ }
+#endif
+
+ err = dpaa2_eth_setup_irqs(dpni_dev);
+ if (err) {
+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
+ priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
+ "%s_poll_link", net_dev->name);
+ if (IS_ERR(priv->poll_thread)) {
+ dev_err(dev, "Error starting polling thread\n");
+ goto err_poll_thread;
+ }
+ priv->do_link_poll = true;
+ }
+
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
+ err = dpaa2_eth_dl_alloc(priv);
+ if (err)
+ goto err_dl_register;
+
+ err = dpaa2_eth_dl_traps_register(priv);
+ if (err)
+ goto err_dl_trap_register;
+
+ err = dpaa2_eth_dl_port_add(priv);
+ if (err)
+ goto err_dl_port_add;
+
+ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+ goto err_netdev_reg;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_add(priv);
+#endif
+
+ dpaa2_eth_dl_register(priv);
+ dev_info(dev, "Probed interface %s\n", net_dev->name);
+ return 0;
+
+err_netdev_reg:
+ dpaa2_eth_dl_port_del(priv);
+err_dl_port_add:
+ dpaa2_eth_dl_traps_unregister(priv);
+err_dl_trap_register:
+ dpaa2_eth_dl_free(priv);
+err_dl_register:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
+ fsl_mc_free_irqs(dpni_dev);
+err_poll_thread:
+ dpaa2_eth_free_rings(priv);
+err_alloc_rings:
+err_csum:
+err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
+ free_percpu(priv->sgt_cache);
+err_alloc_sgt_cache:
+ free_percpu(priv->percpu_extras);
+err_alloc_percpu_extras:
+ free_percpu(priv->percpu_stats);
+err_alloc_percpu_stats:
+ dpaa2_eth_del_ch_napi(priv);
+err_bind:
+ dpaa2_eth_free_dpbp(priv);
+err_dpbp_setup:
+ dpaa2_eth_free_dpio(priv);
+err_dpio_setup:
+ dpaa2_eth_free_dpni(priv);
+err_dpni_setup:
+ fsl_mc_portal_free(priv->mc_io);
+err_portal_alloc:
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+err_wq_alloc:
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpaa2_eth_priv *priv;
+
+ dev = &ls_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ dpaa2_eth_dl_unregister(priv);
+
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_remove(priv);
+#endif
+
+ unregister_netdev(net_dev);
+ rtnl_lock();
+ dpaa2_eth_disconnect_mac(priv);
+ rtnl_unlock();
+
+ dpaa2_eth_dl_port_del(priv);
+ dpaa2_eth_dl_traps_unregister(priv);
+ dpaa2_eth_dl_free(priv);
+
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
+ fsl_mc_free_irqs(ls_dev);
+
+ dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
+ free_percpu(priv->sgt_cache);
+ free_percpu(priv->percpu_stats);
+ free_percpu(priv->percpu_extras);
+
+ dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpio(priv);
+ dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
+
+ fsl_mc_portal_free(priv->mc_io);
+
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+
+ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+ free_netdev(net_dev);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpni",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
+static struct fsl_mc_driver dpaa2_eth_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_eth_probe,
+ .remove = dpaa2_eth_remove,
+ .match_id_table = dpaa2_eth_match_id_table
+};
+
+static int __init dpaa2_eth_driver_init(void)
+{
+ int err;
+
+ dpaa2_eth_dbg_init();
+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
+ if (err) {
+ dpaa2_eth_dbg_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
+ dpaa2_eth_dbg_exit();
+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
new file mode 100644
index 000000000..e703846ad
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2020 NXP
+ */
+
+#ifndef __DPAA2_ETH_H
+#define __DPAA2_ETH_H
+
+#include <linux/dcbnl.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/fsl/mc.h>
+#include <linux/net_tstamp.h>
+#include <net/devlink.h>
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+#include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
+#include "dpaa2-mac.h"
+
+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
+#define DPAA2_ETH_STORE_SIZE 16
+
+/* Maximum number of scatter-gather entries in an ingress frame,
+ * considering the maximum receive frame size is 64K
+ */
+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
+/* Maximum acceptable MTU value. It is in direct relation with the hardware
+ * enforced Max Frame Length (currently 10k).
+ */
+#define DPAA2_ETH_MFL (10 * 1024)
+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
+/* Convert L3 MTU to L2 MFL */
+#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
+
+/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
+ * enough number of jumbo frames in the Rx queues (length of the current
+ * frame is not taken into account when making the taildrop decision)
+ */
+#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
+
+/* Maximum burst size value for Tx shaping */
+#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
+
+/* Maximum number of Tx confirmation frames to be processed
+ * in a single NAPI call
+ */
+#define DPAA2_ETH_TXCONF_PER_NAPI 256
+
+/* Buffer qouta per channel. We want to keep in check number of ingress frames
+ * in flight: for small sized frames, congestion group taildrop may kick in
+ * first; for large sizes, Rx FQ taildrop threshold will ensure only a
+ * reasonable number of frames will be pending at any given time.
+ * Ingress frame drop due to buffer pool depletion should be a corner case only
+ */
+#define DPAA2_ETH_NUM_BUFS 1280
+#define DPAA2_ETH_REFILL_THRESH \
+ (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
+
+/* Congestion group taildrop threshold: number of frames allowed to accumulate
+ * at any moment in a group of Rx queues belonging to the same traffic class.
+ * Choose value such that we don't risk depleting the buffer pool before the
+ * taildrop kicks in
+ */
+#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
+ (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
+
+/* Congestion group notification threshold: when this many frames accumulate
+ * on the Rx queues belonging to the same TC, the MAC is instructed to send
+ * PFC frames for that TC.
+ * When number of pending frames drops below exit threshold transmission of
+ * PFC frames is stopped.
+ */
+#define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
+ (DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2)
+#define DPAA2_ETH_CN_THRESH_EXIT(priv) \
+ (DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
+
+/* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+#define DPAA2_ETH_BUFS_PER_CMD 7
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_ETH_TX_BUF_ALIGN 64
+
+#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_ETH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_ETH_RX_BUF_SIZE \
+ (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
+
+/* Hardware annotation area in RX/TX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE 64
+#define DPAA2_ETH_TX_HWA_SIZE 128
+
+/* PTP nominal frequency 1GHz */
+#define DPAA2_PTP_CLK_PERIOD_NS 1
+
+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
+ * to 256B. For newer revisions, the requirement is only for 64B alignment
+ */
+#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
+#define DPAA2_ETH_RX_BUF_ALIGN 64
+
+/* We are accommodating a skb backpointer and some S/G info
+ * in the frame's software annotation. The hardware
+ * options are either 0 or 64, so we choose the latter.
+ */
+#define DPAA2_ETH_SWA_SIZE 64
+
+/* We store different information in the software annotation area of a Tx frame
+ * based on what type of frame it is
+ */
+enum dpaa2_eth_swa_type {
+ DPAA2_ETH_SWA_SINGLE,
+ DPAA2_ETH_SWA_SG,
+ DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_SW_TSO,
+};
+
+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
+struct dpaa2_eth_swa {
+ enum dpaa2_eth_swa_type type;
+ union {
+ struct {
+ struct sk_buff *skb;
+ int sgt_size;
+ } single;
+ struct {
+ struct sk_buff *skb;
+ struct scatterlist *scl;
+ int num_sg;
+ int sgt_size;
+ } sg;
+ struct {
+ int dma_size;
+ struct xdp_frame *xdpf;
+ } xdp;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
+ };
+};
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV 0x8000
+#define DPAA2_FD_FRC_FAEADV 0x4000
+#define DPAA2_FD_FRC_FAPRV 0x2000
+#define DPAA2_FD_FRC_FAIADV 0x1000
+#define DPAA2_FD_FRC_FASWOV 0x0800
+#define DPAA2_FD_FRC_FAICFDV 0x0400
+
+/* Error bits in FD CTRL */
+#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
+#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
+ FD_CTRL_SBE | \
+ FD_CTRL_FSE | \
+ FD_CTRL_FAERR)
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
+
+/* Frame annotation status */
+struct dpaa2_fas {
+ u8 reserved;
+ u8 ppid;
+ __le16 ifpid;
+ __le32 status;
+};
+
+/* Frame annotation status word is located in the first 8 bytes
+ * of the buffer's hardware annoatation area
+ */
+#define DPAA2_FAS_OFFSET 0
+#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+
+/* Timestamp is located in the next 8 bytes of the buffer's
+ * hardware annotation area
+ */
+#define DPAA2_TS_OFFSET 0x8
+
+/* Frame annotation parse results */
+struct dpaa2_fapr {
+ /* 64-bit word 1 */
+ __le32 faf_lo;
+ __le16 faf_ext;
+ __le16 nxt_hdr;
+ /* 64-bit word 2 */
+ __le64 faf_hi;
+ /* 64-bit word 3 */
+ u8 last_ethertype_offset;
+ u8 vlan_tci_offset_n;
+ u8 vlan_tci_offset_1;
+ u8 llc_snap_offset;
+ u8 eth_offset;
+ u8 ip1_pid_offset;
+ u8 shim_offset_2;
+ u8 shim_offset_1;
+ /* 64-bit word 4 */
+ u8 l5_offset;
+ u8 l4_offset;
+ u8 gre_offset;
+ u8 l3_offset_n;
+ u8 l3_offset_1;
+ u8 mpls_offset_n;
+ u8 mpls_offset_1;
+ u8 pppoe_offset;
+ /* 64-bit word 5 */
+ __le16 running_sum;
+ __le16 gross_running_sum;
+ u8 ipv6_frag_offset;
+ u8 nxt_hdr_offset;
+ u8 routing_hdr_offset_2;
+ u8 routing_hdr_offset_1;
+ /* 64-bit word 6 */
+ u8 reserved[5]; /* Soft-parsing context */
+ u8 ip_proto_offset_n;
+ u8 nxt_hdr_frag_offset;
+ u8 parse_error_code;
+};
+
+#define DPAA2_FAPR_OFFSET 0x10
+#define DPAA2_FAPR_SIZE sizeof((struct dpaa2_fapr))
+
+/* Frame annotation egress action descriptor */
+#define DPAA2_FAEAD_OFFSET 0x58
+
+struct dpaa2_faead {
+ __le32 conf_fqid;
+ __le32 ctrl;
+};
+
+#define DPAA2_FAEAD_A2V 0x20000000
+#define DPAA2_FAEAD_A4V 0x08000000
+#define DPAA2_FAEAD_UPDV 0x00001000
+#define DPAA2_FAEAD_EBDDV 0x00002000
+#define DPAA2_FAEAD_UPD 0x00000010
+
+struct ptp_tstamp {
+ u16 sec_msb;
+ u32 sec_lsb;
+ u32 nsec;
+};
+
+static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns)
+{
+ u64 sec, nsec;
+
+ sec = ns;
+ nsec = do_div(sec, 1000000000);
+
+ tstamp->sec_lsb = sec & 0xFFFFFFFF;
+ tstamp->sec_msb = (sec >> 32) & 0xFFFF;
+ tstamp->nsec = nsec;
+}
+
+/* Accessors for the hardware annotation fields that we use */
+static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
+{
+ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
+}
+
+static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
+}
+
+static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
+static inline struct dpaa2_fapr *dpaa2_get_fapr(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAPR_OFFSET;
+}
+
+static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
+}
+
+/* Error and status bits in the frame annotation status word */
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_FAS_DISC 0x80000000
+/* MACSEC frame */
+#define DPAA2_FAS_MS 0x40000000
+#define DPAA2_FAS_PTP 0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_FAS_MC 0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_FAS_BC 0x02000000
+#define DPAA2_FAS_KSE 0x00040000
+#define DPAA2_FAS_EOFHE 0x00020000
+#define DPAA2_FAS_MNLE 0x00010000
+#define DPAA2_FAS_TIDE 0x00008000
+#define DPAA2_FAS_PIEE 0x00004000
+/* Frame length error */
+#define DPAA2_FAS_FLE 0x00002000
+/* Frame physical error */
+#define DPAA2_FAS_FPE 0x00001000
+#define DPAA2_FAS_PTE 0x00000080
+#define DPAA2_FAS_ISP 0x00000040
+#define DPAA2_FAS_PHE 0x00000020
+#define DPAA2_FAS_BLE 0x00000010
+/* L3 csum validation performed */
+#define DPAA2_FAS_L3CV 0x00000008
+/* L3 csum error */
+#define DPAA2_FAS_L3CE 0x00000004
+/* L4 csum validation performed */
+#define DPAA2_FAS_L4CV 0x00000002
+/* L4 csum error */
+#define DPAA2_FAS_L4CE 0x00000001
+/* Possible errors on the ingress path */
+#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \
+ DPAA2_FAS_EOFHE | \
+ DPAA2_FAS_MNLE | \
+ DPAA2_FAS_TIDE | \
+ DPAA2_FAS_PIEE | \
+ DPAA2_FAS_FLE | \
+ DPAA2_FAS_FPE | \
+ DPAA2_FAS_PTE | \
+ DPAA2_FAS_ISP | \
+ DPAA2_FAS_PHE | \
+ DPAA2_FAS_BLE | \
+ DPAA2_FAS_L3CE | \
+ DPAA2_FAS_L4CE)
+
+/* Time in milliseconds between link state updates */
+#define DPAA2_ETH_LINK_STATE_REFRESH 1000
+
+/* Number of times to retry a frame enqueue before giving up.
+ * Value determined empirically, in order to minimize the number
+ * of frames dropped on Tx
+ */
+#define DPAA2_ETH_ENQUEUE_RETRIES 10
+
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_ETH_SWP_BUSY_RETRIES 1000
+
+/* Driver statistics, other than those in struct rtnl_link_stats64.
+ * These are usually collected per-CPU and aggregated by ethtool.
+ */
+struct dpaa2_eth_drv_stats {
+ __u64 tx_conf_frames;
+ __u64 tx_conf_bytes;
+ __u64 tx_sg_frames;
+ __u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
+ __u64 rx_sg_frames;
+ __u64 rx_sg_bytes;
+ /* Linear skbs sent as a S/G FD due to insufficient headroom */
+ __u64 tx_converted_sg_frames;
+ __u64 tx_converted_sg_bytes;
+ /* Enqueues retried due to portal busy */
+ __u64 tx_portal_busy;
+};
+
+/* Per-FQ statistics */
+struct dpaa2_eth_fq_stats {
+ /* Number of frames received on this queue */
+ __u64 frames;
+};
+
+/* Per-channel statistics */
+struct dpaa2_eth_ch_stats {
+ /* Volatile dequeues retried due to portal busy */
+ __u64 dequeue_portal_busy;
+ /* Pull errors */
+ __u64 pull_err;
+ /* Number of CDANs; useful to estimate avg NAPI len */
+ __u64 cdan;
+ /* XDP counters */
+ __u64 xdp_drop;
+ __u64 xdp_tx;
+ __u64 xdp_tx_err;
+ __u64 xdp_redirect;
+ /* Must be last, does not show up in ethtool stats */
+ __u64 frames;
+ __u64 frames_per_cdan;
+ __u64 bytes_per_cdan;
+};
+
+#define DPAA2_ETH_CH_STATS 7
+
+/* Maximum number of queues associated with a DPNI */
+#define DPAA2_ETH_MAX_TCS 8
+#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
+#define DPAA2_ETH_MAX_RX_QUEUES \
+ (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
+#define DPAA2_ETH_MAX_TX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
+ DPAA2_ETH_MAX_TX_QUEUES + \
+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
+#define DPAA2_ETH_MAX_NETDEV_QUEUES \
+ (DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
+
+#define DPAA2_ETH_MAX_DPCONS 16
+
+enum dpaa2_eth_fq_type {
+ DPAA2_RX_FQ = 0,
+ DPAA2_TX_CONF_FQ,
+ DPAA2_RX_ERR_FQ
+};
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_eth_xdp_fds {
+ struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
+ ssize_t num;
+};
+
+struct dpaa2_eth_fq {
+ u32 fqid;
+ u32 tx_qdbin;
+ u32 tx_fqid[DPAA2_ETH_MAX_TCS];
+ u16 flowid;
+ u8 tc;
+ int target_cpu;
+ u32 dq_frames;
+ u32 dq_bytes;
+ struct dpaa2_eth_channel *channel;
+ enum dpaa2_eth_fq_type type;
+
+ void (*consume)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+ struct dpaa2_eth_fq_stats stats;
+
+ struct dpaa2_eth_xdp_fds xdp_redirect_fds;
+ struct dpaa2_eth_xdp_fds xdp_tx_fds;
+};
+
+struct dpaa2_eth_ch_xdp {
+ struct bpf_prog *prog;
+ unsigned int res;
+};
+
+struct dpaa2_eth_channel {
+ struct dpaa2_io_notification_ctx nctx;
+ struct fsl_mc_device *dpcon;
+ int dpcon_id;
+ int ch_id;
+ struct napi_struct napi;
+ struct dpaa2_io *dpio;
+ struct dpaa2_io_store *store;
+ struct dpaa2_eth_priv *priv;
+ int buf_count;
+ struct dpaa2_eth_ch_stats stats;
+ struct dpaa2_eth_ch_xdp xdp;
+ struct xdp_rxq_info xdp_rxq;
+ struct list_head *rx_list;
+
+ /* Buffers to be recycled back in the buffer pool */
+ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int recycled_bufs_cnt;
+};
+
+struct dpaa2_eth_dist_fields {
+ u64 rxnfc_field;
+ enum net_prot cls_prot;
+ int cls_field;
+ int size;
+ u64 id;
+};
+
+struct dpaa2_eth_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ u8 in_use;
+};
+
+#define DPAA2_ETH_SGT_CACHE_SIZE 256
+struct dpaa2_eth_sgt_cache {
+ void *buf[DPAA2_ETH_SGT_CACHE_SIZE];
+ u16 count;
+};
+
+struct dpaa2_eth_trap_item {
+ void *trap_ctx;
+};
+
+struct dpaa2_eth_trap_data {
+ struct dpaa2_eth_trap_item *trap_items_arr;
+ struct dpaa2_eth_priv *priv;
+};
+
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
+#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
+/* Driver private data */
+struct dpaa2_eth_priv {
+ struct net_device *net_dev;
+
+ u8 num_fqs;
+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+ int (*enqueue)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames,
+ int *frames_enqueued);
+
+ u8 num_channels;
+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+ struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
+ unsigned long features;
+ struct dpni_attr dpni_attrs;
+ u16 dpni_ver_major;
+ u16 dpni_ver_minor;
+ u16 tx_data_offset;
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
+ struct fsl_mc_device *dpbp_dev;
+ u16 rx_buf_size;
+ u16 bpid;
+ struct iommu_domain *iommu_domain;
+
+ enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
+ bool rx_tstamp; /* Rx timestamping enabled */
+
+ u16 tx_qdid;
+ struct fsl_mc_io *mc_io;
+ /* Cores which have an affine DPIO/DPCON.
+ * This is the cpu set on which Rx and Tx conf frames are processed
+ */
+ struct cpumask dpio_cpumask;
+
+ /* Standard statistics */
+ struct rtnl_link_stats64 __percpu *percpu_stats;
+ /* Extra stats, in addition to the ones known by the kernel */
+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
+
+ u16 mc_token;
+ u8 rx_fqtd_enabled;
+ u8 rx_cgtd_enabled;
+
+ struct dpni_link_state link_state;
+ bool do_link_poll;
+ struct task_struct *poll_thread;
+
+ /* enabled ethtool hashing bits */
+ u64 rx_hash_fields;
+ u64 rx_cls_fields;
+ struct dpaa2_eth_cls_rule *cls_rules;
+ u8 rx_cls_enabled;
+ u8 vlan_cls_enabled;
+ u8 pfc_enabled;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ u8 dcbx_mode;
+ struct ieee_pfc pfc;
+#endif
+ struct bpf_prog *xdp_prog;
+#ifdef CONFIG_DEBUG_FS
+ struct dpaa2_debugfs dbg;
+#endif
+
+ struct dpaa2_mac *mac;
+ struct workqueue_struct *dpaa2_ptp_wq;
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
+ /* The one-step timestamping configuration on hardware
+ * registers could only be done when no one-step
+ * timestamping frames are in flight. So we use a mutex
+ * lock here to make sure the lock is released by last
+ * one-step timestamping packet through TX confirmation
+ * queue before transmit current packet.
+ */
+ struct mutex onestep_tstamp_lock;
+ struct devlink *devlink;
+ struct dpaa2_eth_trap_data *trap_data;
+ struct devlink_port devlink_port;
+
+ u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
+};
+
+struct dpaa2_eth_devlink_priv {
+ struct dpaa2_eth_priv *dpaa2_priv;
+};
+
+#define TX_TSTAMP 0x1
+#define TX_TSTAMP_ONESTEP_SYNC 0x2
+
+#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
+ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
+ | RXH_L4_B_2_3)
+
+/* default Rx hash options, set during probing */
+#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+#define dpaa2_eth_hash_enabled(priv) \
+ ((priv)->dpni_attrs.num_queues > 1)
+
+/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+extern const struct ethtool_ops dpaa2_ethtool_ops;
+extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
+
+static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
+ u16 ver_major, u16 ver_minor)
+{
+ if (priv->dpni_ver_major == ver_major)
+ return priv->dpni_ver_minor - ver_minor;
+ return priv->dpni_ver_major - ver_major;
+}
+
+/* Minimum firmware version that supports a more flexible API
+ * for configuring the Rx flow hash key
+ */
+#define DPNI_RX_DIST_KEY_VER_MAJOR 7
+#define DPNI_RX_DIST_KEY_VER_MINOR 5
+
+#define dpaa2_eth_has_legacy_dist(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
+ DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
+#define dpaa2_eth_fs_enabled(priv) \
+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv) \
+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
+#define dpaa2_eth_fs_count(priv) \
+ ((priv)->dpni_attrs.fs_entries)
+
+#define dpaa2_eth_tc_count(priv) \
+ ((priv)->dpni_attrs.num_tcs)
+
+/* We have exactly one {Rx, Tx conf} queue per channel */
+#define dpaa2_eth_queue_count(priv) \
+ ((priv)->num_channels)
+
+enum dpaa2_eth_rx_dist {
+ DPAA2_ETH_RX_DIST_HASH,
+ DPAA2_ETH_RX_DIST_CLS
+};
+
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
+#define DPAA2_ETH_DIST_VLAN BIT(3)
+#define DPAA2_ETH_DIST_IPSRC BIT(4)
+#define DPAA2_ETH_DIST_IPDST BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
+#define DPAA2_ETH_DIST_L4SRC BIT(7)
+#define DPAA2_ETH_DIST_L4DST BIT(8)
+#define DPAA2_ETH_DIST_ALL (~0ULL)
+
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
+#define DPNI_PAUSE_VER_MAJOR 7
+#define DPNI_PAUSE_VER_MINOR 13
+#define dpaa2_eth_has_pause_support(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
+ DPNI_PAUSE_VER_MINOR) >= 0)
+
+static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE) ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
+static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE);
+}
+
+static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
+{
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
+ * the software annotation area
+ */
+ if (!skb)
+ return headroom;
+
+ /* For non-linear skbs we have no headroom requirement, as we build a
+ * SG frame with a newly allocated SGT buffer
+ */
+ if (skb_is_nonlinear(skb))
+ return 0;
+
+ /* If we have Tx timestamping, need 128B hardware annotation */
+ if (skb->cb[0])
+ headroom += DPAA2_ETH_TX_HWA_SIZE;
+
+ return headroom;
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios
+ */
+static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+{
+ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
+}
+
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac &&
+ (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
+int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
+
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc);
+
+extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
+
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr);
+#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
new file mode 100644
index 000000000..598888264
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+
+#include <linux/net_tstamp.h>
+#include <linux/nospec.h>
+
+#include "dpni.h" /* DPNI_LINK_OPT_* */
+#include "dpaa2-eth.h"
+
+/* To be kept in sync with DPNI statistics */
+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+ "[hw] rx frames",
+ "[hw] rx bytes",
+ "[hw] rx mcast frames",
+ "[hw] rx mcast bytes",
+ "[hw] rx bcast frames",
+ "[hw] rx bcast bytes",
+ "[hw] tx frames",
+ "[hw] tx bytes",
+ "[hw] tx mcast frames",
+ "[hw] tx mcast bytes",
+ "[hw] tx bcast frames",
+ "[hw] tx bcast bytes",
+ "[hw] rx filtered frames",
+ "[hw] rx discarded frames",
+ "[hw] rx nobuffer discards",
+ "[hw] tx discarded frames",
+ "[hw] tx confirmed frames",
+ "[hw] tx dequeued bytes",
+ "[hw] tx dequeued frames",
+ "[hw] tx rejected bytes",
+ "[hw] tx rejected frames",
+ "[hw] tx pending frames",
+};
+
+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+ /* per-cpu stats */
+ "[drv] tx conf frames",
+ "[drv] tx conf bytes",
+ "[drv] tx sg frames",
+ "[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
+ "[drv] rx sg frames",
+ "[drv] rx sg bytes",
+ "[drv] tx converted sg frames",
+ "[drv] tx converted sg bytes",
+ "[drv] enqueue portal busy",
+ /* Channel stats */
+ "[drv] dequeue portal busy",
+ "[drv] channel pull errors",
+ "[drv] cdan",
+ "[drv] xdp drop",
+ "[drv] xdp tx",
+ "[drv] xdp tx errors",
+ "[drv] xdp redirect",
+ /* FQ stats */
+ "[qbman] rx pending frames",
+ "[qbman] rx pending bytes",
+ "[qbman] tx conf pending frames",
+ "[qbman] tx conf pending bytes",
+ "[qbman] buffer count",
+};
+
+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
+
+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
+
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int dpaa2_eth_nway_reset(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+
+ link_settings->base.autoneg = AUTONEG_DISABLE;
+ if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.duplex = DUPLEX_FULL;
+ link_settings->base.speed = priv->link_state.rate;
+
+ return 0;
+}
+
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!dpaa2_eth_is_type_phy(priv))
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+}
+
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 link_options = priv->link_state.options;
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ return;
+ }
+
+ pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
+ pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
+ pause->autoneg = AUTONEG_DISABLE;
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg cfg = {0};
+ int err;
+
+ if (!dpaa2_eth_has_pause_support(priv)) {
+ netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
+ DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
+ return -EOPNOTSUPP;
+ }
+
+ if (dpaa2_eth_is_type_phy(priv))
+ return phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ cfg.rate = priv->link_state.rate;
+ cfg.options = priv->link_state.options;
+ if (pause->rx_pause)
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ if (!!pause->rx_pause ^ !!pause->tx_pause)
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+ if (cfg.options == priv->link_state.options)
+ return 0;
+
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_state failed\n");
+ return err;
+ }
+
+ priv->link_state.options = cfg.options;
+
+ return 0;
+}
+
+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
+ strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
+ strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (dpaa2_eth_has_mac(priv))
+ dpaa2_mac_get_strings(p);
+ break;
+ }
+}
+
+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+{
+ int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (sset) {
+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
+ if (dpaa2_eth_has_mac(priv))
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/** Fill in hardware counters, as returned by MC.
+ */
+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ union dpni_statistics dpni_stats;
+ int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
+ sizeof(dpni_stats.page_0),
+ sizeof(dpni_stats.page_1),
+ sizeof(dpni_stats.page_2),
+ sizeof(dpni_stats.page_3),
+ sizeof(dpni_stats.page_4),
+ sizeof(dpni_stats.page_5),
+ sizeof(dpni_stats.page_6),
+ };
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ struct dpaa2_eth_ch_stats *ch_stats;
+ struct dpaa2_eth_drv_stats *extras;
+ int j, k, err, num_cnt, i = 0;
+ u32 fcnt, bcnt;
+ u32 buf_cnt;
+
+ memset(data, 0,
+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
+ /* Print standard counters, from DPNI statistics */
+ for (j = 0; j <= 6; j++) {
+ /* We're not interested in pages 4 & 5 for now */
+ if (j == 4 || j == 5)
+ continue;
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
+ j, &dpni_stats);
+ if (err == -EINVAL)
+ /* Older firmware versions don't support all pages */
+ memset(&dpni_stats, 0, sizeof(dpni_stats));
+ else if (err)
+ netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
+
+ num_cnt = dpni_stats_page_size[j] / sizeof(u64);
+ for (k = 0; k < num_cnt; k++)
+ *(data + i++) = dpni_stats.raw.counter[k];
+ }
+
+ /* Print per-cpu extra stats */
+ for_each_online_cpu(k) {
+ extras = per_cpu_ptr(priv->percpu_extras, k);
+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
+ }
+ i += j;
+
+ /* Per-channel stats */
+ for (k = 0; k < priv->num_channels; k++) {
+ ch_stats = &priv->channel[k]->stats;
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
+ *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
+ }
+ i += j;
+
+ for (j = 0; j < priv->num_fqs; j++) {
+ /* Print FQ instantaneous counts */
+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+ &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(net_dev, "FQ query error %d", err);
+ return;
+ }
+
+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
+ fcnt_tx_total += fcnt;
+ bcnt_tx_total += bcnt;
+ } else {
+ fcnt_rx_total += fcnt;
+ bcnt_rx_total += bcnt;
+ }
+ }
+
+ *(data + i++) = fcnt_rx_total;
+ *(data + i++) = bcnt_rx_total;
+ *(data + i++) = fcnt_tx_total;
+ *(data + i++) = bcnt_tx_total;
+
+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ *(data + i++) = buf_cnt;
+
+ if (dpaa2_eth_has_mac(priv))
+ dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
+}
+
+static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+
+ if (eth_mask->h_proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = eth_value->h_proto;
+ *(__be16 *)(mask + off) = eth_mask->h_proto;
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
+ ether_addr_copy(key + off, eth_value->h_source);
+ ether_addr_copy(mask + off, eth_mask->h_source);
+ *fields |= DPAA2_ETH_DIST_ETHSRC;
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, eth_value->h_dest);
+ ether_addr_copy(mask + off, eth_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+ u32 tmp_value, tmp_mask;
+
+ if (uip_mask->tos || uip_mask->ip_ver)
+ return -EOPNOTSUPP;
+
+ if (uip_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = uip_value->ip4src;
+ *(__be32 *)(mask + off) = uip_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
+ }
+
+ if (uip_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = uip_value->ip4dst;
+ *(__be32 *)(mask + off) = uip_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
+ }
+
+ if (uip_mask->proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = uip_value->proto;
+ *(u8 *)(mask + off) = uip_mask->proto;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
+ }
+
+ if (uip_mask->l4_4_bytes) {
+ tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
+ tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = htons(tmp_value >> 16);
+ *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+ *fields |= DPAA2_ETH_DIST_L4SRC;
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
+ *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ *fields |= DPAA2_ETH_DIST_L4DST;
+ }
+
+ /* Only apply the rule for IPv4 frames */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto, u64 *fields)
+{
+ int off;
+
+ if (l4_mask->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = l4_value->ip4src;
+ *(__be32 *)(mask + off) = l4_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
+ }
+
+ if (l4_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = l4_value->ip4dst;
+ *(__be32 *)(mask + off) = l4_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
+ }
+
+ if (l4_mask->psrc) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = l4_value->psrc;
+ *(__be16 *)(mask + off) = l4_mask->psrc;
+ *fields |= DPAA2_ETH_DIST_L4SRC;
+ }
+
+ if (l4_mask->pdst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = l4_value->pdst;
+ *(__be16 *)(mask + off) = l4_mask->pdst;
+ *fields |= DPAA2_ETH_DIST_L4DST;
+ }
+
+ /* Only apply the rule for IPv4 frames with the specified L4 proto */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = l4_proto;
+ *(u8 *)(mask + off) = 0xFF;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+
+ if (ext_mask->vlan_etype)
+ return -EOPNOTSUPP;
+
+ if (ext_mask->vlan_tci) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+ *(__be16 *)(key + off) = ext_value->vlan_tci;
+ *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ *fields |= DPAA2_ETH_DIST_VLAN;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
+{
+ int off;
+
+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, ext_value->h_dest);
+ ether_addr_copy(mask + off, ext_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
+ void *mask, u64 *fields)
+{
+ int err;
+
+ switch (fs->flow_type & 0xFF) {
+ case ETHER_FLOW:
+ err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask, fields);
+ break;
+ case IP_USER_FLOW:
+ err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
+ break;
+ case TCP_V4_FLOW:
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP, fields);
+ break;
+ case UDP_V4_FLOW:
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP, fields);
+ break;
+ case SCTP_V4_FLOW:
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP, fields);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+
+ if (fs->flow_type & FLOW_EXT) {
+ err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+ if (err)
+ return err;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
+ mask, fields);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_rule_cfg rule_cfg = { 0 };
+ struct dpni_fs_action_cfg fs_act = { 0 };
+ dma_addr_t key_iova;
+ u64 fields = 0;
+ void *key_buf;
+ int i, err;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+ return -EINVAL;
+
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
+
+ /* allocate twice the key size, for the actual key and for mask */
+ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ /* Fill the key and mask memory areas */
+ err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+ if (err)
+ goto free_mem;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
+ /* Masking allows us to configure a maximal key during init and
+ * use it for all flow steering rules. Without it, we include
+ * in the key only the fields actually used, so we need to
+ * extract the others from the final key buffer.
+ *
+ * Program the FS key if needed, or return error if previously
+ * set key can't be used for the current rule. User needs to
+ * delete existing rules in this case to allow for the new one.
+ */
+ if (!priv->rx_cls_fields) {
+ err = dpaa2_eth_set_cls(net_dev, fields);
+ if (err)
+ goto free_mem;
+
+ priv->rx_cls_fields = fields;
+ } else if (priv->rx_cls_fields != fields) {
+ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+ err = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ dpaa2_eth_cls_trim_rule(key_buf, fields);
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+ }
+
+ key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ rule_cfg.key_iova = key_iova;
+ if (dpaa2_eth_fs_mask_enabled(priv))
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
+ if (add) {
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ fs_act.options |= DPNI_FS_OPT_DISCARD;
+ else
+ fs_act.flow_id = fs->ring_cookie;
+ }
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+ i, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
+ priv->mc_token, i,
+ &rule_cfg);
+ if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
+ }
+
+ dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+free_mem:
+ kfree(key_buf);
+
+ return err;
+}
+
+static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
+{
+ int i, rules = 0;
+
+ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+ if (priv->cls_rules[i].in_use)
+ rules++;
+
+ return rules;
+}
+
+static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ unsigned int location)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_cls_rule *rule;
+ int err = -EINVAL;
+
+ if (!priv->rx_cls_enabled)
+ return -EOPNOTSUPP;
+
+ if (location >= dpaa2_eth_fs_count(priv))
+ return -EINVAL;
+
+ rule = &priv->cls_rules[location];
+
+ /* If a rule is present at the specified location, delete it. */
+ if (rule->in_use) {
+ err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
+ if (err)
+ return err;
+
+ rule->in_use = 0;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv) &&
+ !dpaa2_eth_num_cls_rules(priv))
+ priv->rx_cls_fields = 0;
+ }
+
+ /* If no new entry to add, return here */
+ if (!new_fs)
+ return err;
+
+ err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
+ if (err)
+ return err;
+
+ rule->in_use = 1;
+ rule->fs = *new_fs;
+
+ return 0;
+}
+
+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int max_rules = dpaa2_eth_fs_count(priv);
+ int i, j = 0;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = dpaa2_eth_queue_count(priv);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rxnfc->rule_cnt = 0;
+ rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
+ rxnfc->data = max_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= max_rules)
+ return -EINVAL;
+ rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
+ max_rules);
+ if (!priv->cls_rules[rxnfc->fs.location].in_use)
+ return -EINVAL;
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < max_rules; i++) {
+ if (!priv->cls_rules[i].in_use)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ rxnfc->rule_cnt = j;
+ rxnfc->data = max_rules;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc)
+{
+ int err = 0;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXFH:
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+int dpaa2_phc_index = -1;
+EXPORT_SYMBOL(dpaa2_phc_index);
+
+static int dpaa2_eth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ if (!dpaa2_ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = dpaa2_phc_index;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static int dpaa2_eth_get_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_set_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio = priv->channel[0]->dpio;
+
+ dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
+ ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio;
+ int prev_adaptive;
+ u32 prev_rx_usecs;
+ int i, j, err;
+
+ /* Keep track of the previous value, just in case we fail */
+ dpio = priv->channel[0]->dpio;
+ dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
+ prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ /* Setup new value for rx coalescing */
+ for (i = 0; i < priv->num_channels; i++) {
+ dpio = priv->channel[i]->dpio;
+
+ dpaa2_io_set_adaptive_coalescing(dpio,
+ ic->use_adaptive_rx_coalesce);
+ err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
+ if (err)
+ goto restore_rx_usecs;
+ }
+
+ return 0;
+
+restore_rx_usecs:
+ for (j = 0; j < i; j++) {
+ dpio = priv->channel[j]->dpio;
+
+ dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
+ dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
+ }
+
+ return err;
+}
+
+const struct ethtool_ops dpaa2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_drvinfo = dpaa2_eth_get_drvinfo,
+ .nway_reset = dpaa2_eth_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_pauseparam = dpaa2_eth_get_pauseparam,
+ .set_pauseparam = dpaa2_eth_set_pauseparam,
+ .get_sset_count = dpaa2_eth_get_sset_count,
+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
+ .get_strings = dpaa2_eth_get_strings,
+ .get_rxnfc = dpaa2_eth_get_rxnfc,
+ .set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_ts_info = dpaa2_eth_get_ts_info,
+ .get_tunable = dpaa2_eth_get_tunable,
+ .set_tunable = dpaa2_eth_set_tunable,
+ .get_coalesce = dpaa2_eth_get_coalesce,
+ .set_coalesce = dpaa2_eth_set_coalesce,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
new file mode 100644
index 000000000..49ff85633
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/acpi.h>
+#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
+#include <linux/property.h>
+
+#include "dpaa2-eth.h"
+#include "dpaa2-mac.h"
+
+#define phylink_to_dpaa2_mac(config) \
+ container_of((config), struct dpaa2_mac, phylink_config)
+
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
+static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
+{
+ *if_mode = PHY_INTERFACE_MODE_NA;
+
+ switch (eth_if) {
+ case DPMAC_ETH_IF_RGMII:
+ *if_mode = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case DPMAC_ETH_IF_USXGMII:
+ *if_mode = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case DPMAC_ETH_IF_QSGMII:
+ *if_mode = PHY_INTERFACE_MODE_QSGMII;
+ break;
+ case DPMAC_ETH_IF_SGMII:
+ *if_mode = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case DPMAC_ETH_IF_XFI:
+ *if_mode = PHY_INTERFACE_MODE_10GBASER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
+static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
+ u16 dpmac_id)
+{
+ struct fwnode_handle *fwnode, *parent = NULL, *child = NULL;
+ struct device_node *dpmacs = NULL;
+ int err;
+ u32 id;
+
+ fwnode = dev_fwnode(dev->parent);
+ if (is_of_node(fwnode)) {
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+ parent = of_fwnode_handle(dpmacs);
+ } else if (is_acpi_node(fwnode)) {
+ parent = fwnode;
+ } else {
+ /* The root dprc device didn't yet get to finalize it's probe,
+ * thus the fwnode field is not yet set. Defer probe if we are
+ * facing this situation.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ fwnode_for_each_child_node(parent, child) {
+ err = -EINVAL;
+ if (is_acpi_device_node(child))
+ err = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &id);
+ else if (is_of_node(child))
+ err = of_property_read_u32(to_of_node(child), "reg", &id);
+ if (err)
+ continue;
+
+ if (id == dpmac_id) {
+ of_node_put(dpmacs);
+ return child;
+ }
+ }
+ of_node_put(dpmacs);
+ return NULL;
+}
+
+static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
+ struct dpmac_attr attr)
+{
+ phy_interface_t if_mode;
+ int err;
+
+ err = fwnode_get_phy_mode(dpmac_node);
+ if (err > 0)
+ return err;
+
+ err = phy_mode(attr.eth_if, &if_mode);
+ if (!err)
+ return if_mode;
+
+ return err;
+}
+
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+
+ return mac->pcs;
+}
+
+static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ if (state->an_enabled)
+ dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
+}
+
+static void dpaa2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 1;
+
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
+}
+
+static void dpaa2_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 0;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
+ .mac_config = dpaa2_mac_config,
+ .mac_link_up = dpaa2_mac_link_up,
+ .mac_link_down = dpaa2_mac_link_down,
+};
+
+static int dpaa2_pcs_create(struct dpaa2_mac *mac,
+ struct fwnode_handle *dpmac_node,
+ int id)
+{
+ struct mdio_device *mdiodev;
+ struct fwnode_handle *node;
+
+ node = fwnode_find_reference(dpmac_node, "pcs-handle", 0);
+ if (IS_ERR(node)) {
+ /* do not error out on old DTS files */
+ netdev_warn(mac->net_dev, "pcs-handle node not found\n");
+ return 0;
+ }
+
+ if (!fwnode_device_is_available(node)) {
+ netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ fwnode_handle_put(node);
+ return -ENODEV;
+ }
+
+ mdiodev = fwnode_mdio_find_device(node);
+ fwnode_handle_put(node);
+ if (!mdiodev)
+ return -EPROBE_DEFER;
+
+ mac->pcs = lynx_pcs_create(mdiodev);
+ if (!mac->pcs) {
+ netdev_err(mac->net_dev, "lynx_pcs_create() failed\n");
+ put_device(&mdiodev->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
+{
+ struct phylink_pcs *phylink_pcs = mac->pcs;
+
+ if (phylink_pcs) {
+ struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs);
+ struct device *dev = &mdio->dev;
+
+ lynx_pcs_destroy(phylink_pcs);
+ put_device(dev);
+ mac->pcs = NULL;
+ }
+}
+
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac)
+{
+ struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
+ struct phylink *phylink;
+ int err;
+
+ mac->if_link_type = mac->attr.link_type;
+
+ dpmac_node = mac->fw_node;
+ if (!dpmac_node) {
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
+ }
+
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
+ if (err < 0)
+ return -EINVAL;
+ mac->if_mode = err;
+
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
+ /* The MAC does not have the capability to add RGMII delays so
+ * error out if the interface mode requests them and there is no PHY
+ * to act upon them
+ */
+ if (of_phy_is_fixed_link(to_of_node(dpmac_node)) &&
+ (mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_err(net_dev, "RGMII delay not supported\n");
+ return -EINVAL;
+ }
+
+ if ((mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
+ if (err)
+ return err;
+ }
+
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
+ mac->phylink_config.dev = &net_dev->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+
+ dpaa2_mac_set_supported_interfaces(mac);
+
+ phylink = phylink_create(&mac->phylink_config,
+ dpmac_node, mac->if_mode,
+ &dpaa2_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_pcs_destroy;
+ }
+ mac->phylink = phylink;
+
+ err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
+ if (err) {
+ netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
+ goto err_phylink_destroy;
+ }
+
+ return 0;
+
+err_phylink_destroy:
+ phylink_destroy(mac->phylink);
+err_pcs_destroy:
+ dpaa2_pcs_destroy(mac);
+
+ return err;
+}
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
+{
+ if (!mac->phylink)
+ return;
+
+ phylink_disconnect_phy(mac->phylink);
+ phylink_destroy(mac->phylink);
+ dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *fw_node;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
+ /* Find the device node representing the MAC device and link the device
+ * behind the associated netdev to it.
+ */
+ fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ if (IS_ERR(fw_node)) {
+ err = PTR_ERR(fw_node);
+ goto err_close_dpmac;
+ }
+
+ mac->fw_node = fw_node;
+ net_dev->dev.of_node = to_of_node(mac->fw_node);
+
+ return 0;
+
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ if (mac->fw_node)
+ fwnode_handle_put(mac->fw_node);
+}
+
+static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
+ [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames",
+ [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok",
+ [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors",
+ [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards",
+ [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast",
+ [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast",
+ [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast",
+ [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes",
+ [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes",
+ [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes",
+ [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes",
+ [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes",
+ [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes",
+ [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes",
+ [DPMAC_CNT_ING_FRAG] = "[mac] rx frags",
+ [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber",
+ [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors",
+ [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized",
+ [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause",
+ [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes",
+ [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok",
+ [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast",
+ [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast",
+ [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast",
+ [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors",
+ [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized",
+ [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause",
+ [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes",
+};
+
+#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
+
+int dpaa2_mac_get_sset_count(void)
+{
+ return DPAA2_MAC_NUM_STATS;
+}
+
+void dpaa2_mac_get_strings(u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ int i, err;
+ u64 value;
+
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle,
+ i, &value);
+ if (err) {
+ netdev_err_once(mac->net_dev,
+ "dpmac_get_counter error %d\n", err);
+ *(data + i) = U64_MAX;
+ continue;
+ }
+ *(data + i) = value;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
new file mode 100644
index 000000000..a58cab188
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+#ifndef DPAA2_MAC_H
+#define DPAA2_MAC_H
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac {
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_link_state state;
+ struct net_device *net_dev;
+ struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
+
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ phy_interface_t if_mode;
+ enum dpmac_link_type if_link_type;
+ struct phylink_pcs *pcs;
+ struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io);
+
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac);
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+int dpaa2_mac_get_sset_count(void);
+
+void dpaa2_mac_get_strings(u8 *data);
+
+void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
+#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
new file mode 100644
index 000000000..c8cb54157
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ * Copyright 2020 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-ptp.h"
+
+static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct fsl_mc_device *mc_dev;
+ struct device *dev;
+ u32 mask = 0;
+ u32 bit;
+ int err;
+
+ dev = ptp_qoriq->dev;
+ mc_dev = to_fsl_mc_device(dev);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = DPRTC_EVENT_ETS1;
+ break;
+ case 1:
+ bit = DPRTC_EVENT_ETS2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (on)
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
+ break;
+ case PTP_CLK_REQ_PPS:
+ bit = DPRTC_EVENT_PPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = dprtc_get_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, &mask);
+ if (err < 0) {
+ dev_err(dev, "dprtc_get_irq_mask(): %d\n", err);
+ return err;
+ }
+
+ if (on)
+ mask |= bit;
+ else
+ mask &= ~bit;
+
+ err = dprtc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, mask);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_mask(): %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ptp_clock_info dpaa2_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "DPAA2 PTP Clock",
+ .max_adj = 512000,
+ .n_alarm = 2,
+ .n_ext_ts = 2,
+ .n_per_out = 3,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = ptp_qoriq_adjfine,
+ .adjtime = ptp_qoriq_adjtime,
+ .gettime64 = ptp_qoriq_gettime,
+ .settime64 = ptp_qoriq_settime,
+ .enable = dpaa2_ptp_enable,
+};
+
+static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
+{
+ struct ptp_qoriq *ptp_qoriq = priv;
+ struct ptp_clock_event event;
+ struct fsl_mc_device *mc_dev;
+ struct device *dev;
+ u32 status = 0;
+ int err;
+
+ dev = ptp_qoriq->dev;
+ mc_dev = to_fsl_mc_device(dev);
+
+ err = dprtc_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, &status);
+ if (unlikely(err)) {
+ dev_err(dev, "dprtc_get_irq_status err %d\n", err);
+ return IRQ_NONE;
+ }
+
+ if (status & DPRTC_EVENT_PPS) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(ptp_qoriq->clock, &event);
+ }
+
+ if (status & DPRTC_EVENT_ETS1)
+ extts_clean_up(ptp_qoriq, 0, true);
+
+ if (status & DPRTC_EVENT_ETS2)
+ extts_clean_up(ptp_qoriq, 1, true);
+
+ err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, status);
+ if (unlikely(err)) {
+ dev_err(dev, "dprtc_clear_irq_status err %d\n", err);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct ptp_qoriq *ptp_qoriq;
+ struct device_node *node;
+ void __iomem *base;
+ int err;
+
+ ptp_qoriq = devm_kzalloc(dev, sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq)
+ return -ENOMEM;
+
+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_exit;
+ }
+
+ err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dprtc_open err %d\n", err);
+ goto err_free_mcp;
+ }
+
+ ptp_qoriq->dev = dev;
+
+ node = of_find_compatible_node(NULL, NULL, "fsl,dpaa2-ptp");
+ if (!node) {
+ err = -ENODEV;
+ goto err_close;
+ }
+
+ dev->of_node = node;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ err = fsl_mc_allocate_irqs(mc_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ goto err_unmap;
+ }
+
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
+
+ err = request_threaded_irq(ptp_qoriq->irq, NULL,
+ dpaa2_ptp_irq_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), ptp_qoriq);
+ if (err < 0) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto err_free_mc_irq;
+ }
+
+ err = dprtc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
+ goto err_free_threaded_irq;
+ }
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
+ if (err)
+ goto err_free_threaded_irq;
+
+ dpaa2_phc_index = ptp_qoriq->phc_index;
+ dpaa2_ptp = ptp_qoriq;
+ dev_set_drvdata(dev, ptp_qoriq);
+
+ return 0;
+
+err_free_threaded_irq:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+err_free_mc_irq:
+ fsl_mc_free_irqs(mc_dev);
+err_unmap:
+ iounmap(base);
+err_put:
+ of_node_put(node);
+err_close:
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+ fsl_mc_portal_free(mc_dev->mc_io);
+err_exit:
+ return err;
+}
+
+static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct ptp_qoriq *ptp_qoriq;
+
+ ptp_qoriq = dev_get_drvdata(dev);
+
+ dpaa2_phc_index = -1;
+ ptp_qoriq_free(ptp_qoriq);
+
+ fsl_mc_free_irqs(mc_dev);
+ dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ fsl_mc_portal_free(mc_dev->mc_io);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprtc",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table);
+
+static struct fsl_mc_driver dpaa2_ptp_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_ptp_probe,
+ .remove = dpaa2_ptp_remove,
+ .match_id_table = dpaa2_ptp_match_id_table,
+};
+
+module_fsl_mc_driver(dpaa2_ptp_drv);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
new file mode 100644
index 000000000..e1023538b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 NXP
+ * Copyright 2020 NXP
+ */
+
+#ifndef __RTC_H
+#define __RTC_H
+
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
+
+#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
new file mode 100644
index 000000000..720c9230c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch ethtool support
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <linux/ethtool.h>
+
+#include "dpaa2-switch.h"
+
+static struct {
+ enum dpsw_counter id;
+ char name[ETH_GSTRING_LEN];
+} dpaa2_switch_ethtool_counters[] = {
+ {DPSW_CNT_ING_FRAME, "[hw] rx frames"},
+ {DPSW_CNT_ING_BYTE, "[hw] rx bytes"},
+ {DPSW_CNT_ING_FLTR_FRAME, "[hw] rx filtered frames"},
+ {DPSW_CNT_ING_FRAME_DISCARD, "[hw] rx discarded frames"},
+ {DPSW_CNT_ING_BCAST_FRAME, "[hw] rx bcast frames"},
+ {DPSW_CNT_ING_BCAST_BYTES, "[hw] rx bcast bytes"},
+ {DPSW_CNT_ING_MCAST_FRAME, "[hw] rx mcast frames"},
+ {DPSW_CNT_ING_MCAST_BYTE, "[hw] rx mcast bytes"},
+ {DPSW_CNT_EGR_FRAME, "[hw] tx frames"},
+ {DPSW_CNT_EGR_BYTE, "[hw] tx bytes"},
+ {DPSW_CNT_EGR_FRAME_DISCARD, "[hw] tx discarded frames"},
+ {DPSW_CNT_ING_NO_BUFF_DISCARD, "[hw] rx nobuffer discards"},
+};
+
+#define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters)
+
+static void dpaa2_switch_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u16 version_major, version_minor;
+ int err;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err)
+ strscpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", version_major, version_minor);
+
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+dpaa2_switch_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state = {0};
+ int err = 0;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+ link_ksettings);
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &state);
+ if (err) {
+ netdev_err(netdev, "ERROR %d getting link state\n", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPSW side or there may not exist a DPMAC at all.
+ * Report only autoneg state, duplexity and speed.
+ */
+ if (state.options & DPSW_LINK_OPT_AUTONEG)
+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
+ link_ksettings->base.duplex = DUPLEX_FULL;
+ link_ksettings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+static int
+dpaa2_switch_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_link_cfg cfg = {0};
+ bool if_running;
+ int err = 0, ret;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+ link_ksettings);
+
+ /* Interface needs to be down to change link settings */
+ if_running = netif_running(netdev);
+ if (if_running) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ cfg.rate = link_ksettings->base.speed;
+ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
+ if (link_ksettings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
+
+ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &cfg);
+
+ if (if_running) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+ return err;
+}
+
+static int
+dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (port_priv->mac)
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
+ memcpy(p, dpaa2_switch_ethtool_counters[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (port_priv->mac)
+ dpaa2_mac_get_strings(p);
+ break;
+ }
+}
+
+static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int i, err;
+
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ dpaa2_switch_ethtool_counters[i].id,
+ &data[i]);
+ if (err)
+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+
+ if (port_priv->mac)
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+}
+
+const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
+ .get_drvinfo = dpaa2_switch_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_switch_get_link_ksettings,
+ .set_link_ksettings = dpaa2_switch_set_link_ksettings,
+ .get_strings = dpaa2_switch_ethtool_get_strings,
+ .get_ethtool_stats = dpaa2_switch_ethtool_get_stats,
+ .get_sset_count = dpaa2_switch_ethtool_get_sset_count,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
new file mode 100644
index 000000000..16d3c3610
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch flower support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include "dpaa2-switch.h"
+
+static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
+ struct dpsw_acl_key *acl_key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpsw_acl_fields *acl_h, *acl_m;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h = &acl_key->match;
+ acl_m = &acl_key->mask;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ acl_h->l3_protocol = match.key->ip_proto;
+ acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
+ acl_m->l3_protocol = match.mask->ip_proto;
+ acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
+ ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
+ ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
+ ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ acl_h->l2_vlan_id = match.key->vlan_id;
+ acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
+ acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
+ match.key->vlan_dei;
+
+ acl_m->l2_vlan_id = match.mask->vlan_id;
+ acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
+ acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
+ match.mask->vlan_dei;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ acl_h->l3_source_ip = be32_to_cpu(match.key->src);
+ acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
+ acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
+ acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ acl_h->l4_source_port = be16_to_cpu(match.key->src);
+ acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
+ acl_m->l4_source_port = be16_to_cpu(match.mask->src);
+ acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if ((match.mask->tos & 0x3) != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on ECN not supported, only DSCP");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h->l3_dscp = match.key->tos >> 2;
+ acl_m->l3_dscp = match.mask->tos >> 2;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct ethsw_core *ethsw = filter_block->ethsw;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ filter_block->acl_id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+ kfree(cmd_buff);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ kfree(cmd_buff);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ struct list_head *pos, *n;
+ int index = 0;
+
+ if (list_empty(&block->acl_entries)) {
+ list_add(&entry->list, &block->acl_entries);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &block->acl_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
+ if (entry->prio < tmp->prio)
+ break;
+ index++;
+ }
+ list_add(&entry->list, pos->prev);
+ return index;
+}
+
+static struct dpaa2_switch_acl_entry*
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
+ int index)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &block->acl_entries, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry,
+ int precedence)
+{
+ int err;
+
+ err = dpaa2_switch_acl_entry_remove(block, entry);
+ if (err)
+ return err;
+
+ entry->cfg.precedence = precedence;
+ return dpaa2_switch_acl_entry_add(block, entry);
+}
+
+static int
+dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ /* Add the new ACL entry to the linked list and get its index */
+ index = dpaa2_switch_acl_entry_add_to_list(block, entry);
+
+ /* Move up in priority the ACL entries to make space
+ * for the new filter.
+ */
+ precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
+ for (i = 0; i < index; i++) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
+
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence++;
+ }
+
+ /* Add the new entry to hardware */
+ entry->cfg.precedence = precedence;
+ err = dpaa2_switch_acl_entry_add(block, entry);
+ block->num_acl_rules++;
+
+ return err;
+}
+
+static struct dpaa2_switch_acl_entry *
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+ int index = 0;
+
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
+ if (tmp->cookie == entry->cookie)
+ return index;
+ index++;
+ }
+ return -ENOENT;
+}
+
+static struct dpaa2_switch_mirror_entry *
+dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_mirror_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ index = dpaa2_switch_acl_entry_get_index(block, entry);
+
+ /* Remove from hardware the ACL entry */
+ err = dpaa2_switch_acl_entry_remove(block, entry);
+ if (err)
+ return err;
+
+ block->num_acl_rules--;
+
+ /* Remove it from the list also */
+ list_del(&entry->list);
+
+ /* Move down in priority the entries over the deleted one */
+ precedence = entry->cfg.precedence;
+ for (i = index - 1; i >= 0; i--) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence--;
+ }
+
+ kfree(entry);
+
+ return 0;
+}
+
+static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
+ struct flow_action_entry *cls_act,
+ struct dpsw_acl_result *dpsw_act,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (cls_act->id) {
+ case FLOW_ACTION_TRAP:
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+
+ dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
+ break;
+ case FLOW_ACTION_DROP:
+ dpsw_act->action = DPSW_ACL_ACTION_DROP;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static int
+dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry,
+ u16 to, struct netlink_ext_ack *extack)
+{
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct ethsw_port_priv *port_priv;
+ unsigned long ports_added = 0;
+ u16 vlan = entry->cfg.vlan_id;
+ bool mirror_port_enabled;
+ int err, port;
+
+ /* Setup the mirroring port */
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ if (!mirror_port_enabled) {
+ err = dpsw_set_reflection_if(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, to);
+ if (err)
+ return err;
+ ethsw->mirror_port = to;
+ }
+
+ /* Setup the same egress mirroring configuration on all the switch
+ * ports that share the same filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
+ port_priv = ethsw->ports[port];
+
+ /* We cannot add a per VLAN mirroring rule if the VLAN in
+ * question is not installed on the switch port.
+ */
+ if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
+ NL_SET_ERR_MSG(extack,
+ "VLAN must be installed on the switch port");
+ err = -EINVAL;
+ goto err_remove_filters;
+ }
+
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port, &entry->cfg);
+ if (err)
+ goto err_remove_filters;
+
+ ports_added |= BIT(port);
+ }
+
+ list_add(&entry->list, &block->mirror_entries);
+
+ return 0;
+
+err_remove_filters:
+ for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, &entry->cfg);
+ }
+
+ if (!mirror_port_enabled)
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return err;
+}
+
+static int
+dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry)
+{
+ struct dpsw_reflection_cfg *cfg = &entry->cfg;
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ int port;
+
+ /* Remove this mirroring configuration from all the ports belonging to
+ * the filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, cfg);
+
+ /* Also remove it from the list of mirror filters */
+ list_del(&entry->list);
+ kfree(entry);
+
+ /* If this was the last mirror filter, then unset the mirror port */
+ if (list_empty(&block->mirror_entries))
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct flow_action_entry *act;
+ int err;
+
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
+ if (err)
+ goto free_acl_entry;
+
+ act = &rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
+ u16 *vlan)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring is supported only per VLAN");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_priority != 0 ||
+ match.mask->vlan_dei != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only matching on VLAN ID supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id != 0xFFF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *vlan = (u16)match.key->vlan_id;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id, vlan;
+ int err;
+
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Parse the key */
+ err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
+ if (err)
+ return err;
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ tmp->cfg.vlan_id == vlan) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
+ mirror_entry->cfg.vlan_id = vlan;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_flower_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_flower_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
+
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ act = &cls->rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+static int
+dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id;
+
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration. One matchall rule per block is the maximum.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matchall mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&cls->rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &cls->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_matchall_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_add;
+ }
+
+ return 0;
+
+unwind_add:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_remove;
+ }
+
+ return 0;
+
+unwind_remove:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block,
+ acl_entry);
+
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
new file mode 100644
index 000000000..b98ef4ba1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -0,0 +1,3531 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch driver
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/iommu.h>
+#include <net/pkt_cls.h>
+
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-switch.h"
+
+/* Minimal supported DPSW version */
+#define DPSW_MIN_VER_MAJOR 8
+#define DPSW_MIN_VER_MINOR 9
+
+#define DEFAULT_VLAN_ID 1
+
+static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->fdb->fdb_id;
+}
+
+static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->fdbs[i].in_use)
+ return &ethsw->fdbs[i];
+ return NULL;
+}
+
+static struct dpaa2_switch_filter_block *
+dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->filter_blocks[i].in_use)
+ return &ethsw->filter_blocks[i];
+ return NULL;
+}
+
+static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
+ struct net_device *bridge_dev)
+{
+ struct ethsw_port_priv *other_port_priv = NULL;
+ struct dpaa2_switch_fdb *fdb;
+ struct net_device *other_dev;
+ struct list_head *iter;
+
+ /* If we leave a bridge (bridge_dev is NULL), find an unused
+ * FDB and use that.
+ */
+ if (!bridge_dev) {
+ fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
+
+ /* If there is no unused FDB, we must be the last port that
+ * leaves the last bridge, all the others are standalone. We
+ * can just keep the FDB that we already have.
+ */
+
+ if (!fdb) {
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ port_priv->fdb = fdb;
+ port_priv->fdb->in_use = true;
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
+ * being held. Assert on it so that it's easier to catch new code
+ * paths that reach this point without the RTNL lock.
+ */
+ ASSERT_RTNL();
+
+ /* If part of a bridge, use the FDB of the first dpaa2 switch interface
+ * to be present in that bridge
+ */
+ netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ if (other_dev == port_priv->netdev)
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ break;
+ }
+
+ /* The current port is about to change its FDB to the one used by the
+ * first port that joined the bridge.
+ */
+ if (other_port_priv) {
+ /* The previous FDB is about to become unused, since the
+ * interface is no longer standalone.
+ */
+ port_priv->fdb->in_use = false;
+ port_priv->fdb->bridge_dev = NULL;
+
+ /* Get a reference to the new FDB */
+ port_priv->fdb = other_port_priv->fdb;
+ }
+
+ /* Keep track of the new upper bridge device */
+ port_priv->fdb->bridge_dev = bridge_dev;
+
+ return 0;
+}
+
+static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
+ enum dpsw_flood_type type,
+ struct dpsw_egress_flood_cfg *cfg)
+{
+ int i = 0, j;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ /* Add all the DPAA2 switch ports found in the same bridging domain to
+ * the egress flooding domain
+ */
+ for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
+ if (!ethsw->ports[j])
+ continue;
+ if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
+ continue;
+
+ if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ }
+
+ /* Add the CTRL interface to the egress flooding domain */
+ cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
+
+ cfg->fdb_id = fdb_id;
+ cfg->flood_type = type;
+ cfg->num_ifs = i;
+}
+
+static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
+{
+ struct dpsw_egress_flood_cfg flood_cfg;
+ int err;
+
+ /* Setup broadcast flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ /* Setup unknown flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_vlan_cfg vcfg = {0};
+ int err;
+
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_vlan_add(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
+static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
+{
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_link_state state;
+ int err;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return true;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ return state.up ? true : false;
+}
+
+static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_tci_cfg tci_cfg = { 0 };
+ bool up;
+ int err, ret;
+
+ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
+
+ tci_cfg.vlan_id = pvid;
+
+ /* Interface needs to be down to change PVID */
+ up = dpaa2_switch_port_is_up(port_priv);
+ if (up) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ goto set_tci_error;
+ }
+
+ /* Delete previous PVID info and mark the new one */
+ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
+ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
+ port_priv->pvid = pvid;
+
+set_tci_error:
+ if (up) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
+ u16 vid, u16 flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ int err;
+
+ if (port_priv->vlans[vid]) {
+ netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ return -EEXIST;
+ }
+
+ /* If hit, this VLAN rule will lead the packet into the FDB table
+ * specified in the vlan configuration below
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
+ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
+ return err;
+ }
+
+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_add_if_untagged err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ err = dpaa2_switch_port_set_pvid(port_priv, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
+{
+ switch (state) {
+ case BR_STATE_DISABLED:
+ return DPSW_STP_STATE_DISABLED;
+ case BR_STATE_LISTENING:
+ return DPSW_STP_STATE_LISTENING;
+ case BR_STATE_LEARNING:
+ return DPSW_STP_STATE_LEARNING;
+ case BR_STATE_FORWARDING:
+ return DPSW_STP_STATE_FORWARDING;
+ case BR_STATE_BLOCKING:
+ return DPSW_STP_STATE_BLOCKING;
+ default:
+ return DPSW_STP_STATE_DISABLED;
+ }
+}
+
+static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+{
+ struct dpsw_stp_cfg stp_cfg = {0};
+ int err;
+ u16 vid;
+
+ if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
+ return 0; /* Nothing to do */
+
+ stp_cfg.state = br_stp_state_to_dpsw(state);
+ for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ stp_cfg.vlan_id = vid;
+ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &stp_cfg);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "dpsw_if_set_stp err %d\n", err);
+ return err;
+ }
+ }
+ }
+
+ port_priv->stp_state = state;
+
+ return 0;
+}
+
+static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
+{
+ struct ethsw_port_priv *ppriv_local = NULL;
+ int i, err;
+
+ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = 0;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ ppriv_local = ethsw->ports[i];
+ if (ppriv_local)
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_add_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the add command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
+ err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENAVAIL)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_multicast err %d\n", err);
+ return err;
+}
+
+static void dpaa2_switch_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME_DISCARD,
+ &stats->rx_dropped);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FLTR_FRAME,
+ &tmp);
+ if (err)
+ goto error;
+ stats->rx_dropped += tmp;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME_DISCARD,
+ &stats->tx_dropped);
+ if (err)
+ goto error;
+
+ return;
+
+error:
+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
+static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
+ int attr_id)
+{
+ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
+}
+
+static int dpaa2_switch_port_get_offload_stats(int attr_id,
+ const struct net_device *netdev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
+ 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ (u16)ETHSW_L2_MAX_FRM(mtu));
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_if_set_max_frame_length() err %d\n", err);
+ return err;
+ }
+
+ netdev->mtu = mtu;
+ return 0;
+}
+
+static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state;
+ int err;
+
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return 0;
+
+ /* Interrupts are received even though no one issued an 'ifconfig up'
+ * on the switch interface. Ignore these link state update interrupts
+ */
+ if (!netif_running(netdev))
+ return 0;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ if (state.up != port_priv->link_state) {
+ if (state.up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ port_priv->link_state = state.up;
+ }
+
+ return 0;
+}
+
+/* Manage all NAPI instances for the control interface.
+ *
+ * We only have one RX queue and one Tx Conf queue for all
+ * switch ports. Therefore, we only need to enable the NAPI instance once, the
+ * first time one of the switch ports runs .dev_open().
+ */
+
+static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* a new interface is using the NAPI instance */
+ ethsw->napi_users++;
+
+ /* if there is already a user of the instance, return */
+ if (ethsw->napi_users > 1)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_enable(&ethsw->fq[i].napi);
+}
+
+static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* If we are not the last interface using the NAPI, return */
+ ethsw->napi_users--;
+ if (ethsw->napi_users)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_disable(&ethsw->fq[i].napi);
+}
+
+static int dpaa2_switch_port_open(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ if (!dpaa2_switch_port_is_type_phy(port_priv)) {
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(netdev);
+ }
+
+ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ dpaa2_switch_enable_ctrl_if_napi(ethsw);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ dpaa2_mac_start(port_priv->mac);
+ phylink_start(port_priv->mac->phylink);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_stop(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+
+ dpaa2_switch_disable_ctrl_if_napi(ethsw);
+
+ return 0;
+}
+
+static int dpaa2_switch_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+
+ ppid->id_len = 1;
+ ppid->id[0] = port_priv->ethsw_data->dev_id;
+
+ return 0;
+}
+
+static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
+ size_t len)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->idx);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct ethsw_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
+ struct ethsw_dump_ctx *dump)
+{
+ int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
+ struct ethsw_port_priv *port_priv)
+{
+ int idx = port_priv->idx;
+ int valid;
+
+ if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ valid = entry->if_info == port_priv->idx;
+ else
+ valid = entry->if_mask[idx / 8] & BIT(idx % 8);
+
+ return valid;
+}
+
+static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
+ dpaa2_switch_fdb_cb_t cb, void *data)
+{
+ struct net_device *net_dev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct device *dev = net_dev->dev.parent;
+ struct fdb_dump_entry *fdb_entries;
+ struct fdb_dump_entry fdb_entry;
+ dma_addr_t fdb_dump_iova;
+ u16 num_fdb_entries;
+ u32 fdb_dump_size;
+ int err = 0, i;
+ u8 *dma_mem;
+ u16 fdb_id;
+
+ fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
+ dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, fdb_dump_iova)) {
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ err = -ENOMEM;
+ goto err_map;
+ }
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
+ fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
+ if (err) {
+ netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
+ goto err_dump;
+ }
+
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
+
+ fdb_entries = (struct fdb_dump_entry *)dma_mem;
+ for (i = 0; i < num_fdb_entries; i++) {
+ fdb_entry = fdb_entries[i];
+
+ err = cb(port_priv, &fdb_entry, data);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(dma_mem);
+
+ return 0;
+
+err_dump:
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
+err_map:
+ kfree(dma_mem);
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
+}
+
+static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *net_dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_dump_ctx dump = {
+ .dev = net_dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int err;
+
+ err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
+ *idx = dump.idx;
+
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data __always_unused)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
+ return 0;
+
+ if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
+
+ return 0;
+}
+
+static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
+{
+ dpaa2_switch_fdb_iterate(port_priv,
+ dpaa2_switch_fdb_entry_fast_age, NULL);
+}
+
+static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_add(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_del(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *net_dev = port_priv->netdev;
+ struct device *dev = net_dev->dev.parent;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
+ return 0;
+
+ /* Get firmware address, if any */
+ err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, mac_addr);
+ if (err) {
+ dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ eth_hw_addr_set(net_dev, mac_addr);
+ } else {
+ /* No MAC address configured, fill in net_dev->dev_addr
+ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
+ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh, *skb;
+ dma_addr_t fd_addr;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
+
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ u8 *buff_start;
+ void *hwa;
+
+ buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
+ DPAA2_SWITCH_TX_BUF_ALIGN,
+ DPAA2_SWITCH_TX_BUF_ALIGN);
+
+ /* Clear FAS to have consistent values for TX confirmation. It is
+ * located in the first 8 bytes of the buffer's hardware annotation
+ * area
+ */
+ hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
+ memset(hwa, 0, 8);
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buff_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buff_start,
+ skb_tail_pointer(skb) - buff_start,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ /* Setup the FD fields */
+ memset(fd, 0, sizeof(*fd));
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+
+ return 0;
+}
+
+static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
+ struct dpaa2_fd fd;
+ int err;
+
+ if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
+ if (unlikely(!ns)) {
+ net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
+ goto err_free_skb;
+ }
+ dev_consume_skb_any(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx confirmation */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
+ goto err_exit;
+ }
+
+ /* At this stage, we do not support non-linear skbs so just try to
+ * linearize the skb and if that's not working, just drop the packet.
+ */
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ do {
+ err = dpaa2_io_service_enqueue_qd(NULL,
+ port_priv->tx_qdid,
+ 8, 0, &fd);
+ retries--;
+ } while (err == -EBUSY && retries);
+
+ if (unlikely(err < 0)) {
+ dpaa2_switch_free_fd(ethsw, &fd);
+ goto err_exit;
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+err_exit:
+ return NETDEV_TX_OK;
+}
+
+static int
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return dpaa2_switch_cls_flower_replace(filter_block, f);
+ case FLOW_CLS_DESTROY:
+ return dpaa2_switch_cls_flower_destroy(filter_block, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return dpaa2_switch_cls_matchall_replace(block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ return dpaa2_switch_cls_matchall_destroy(block, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(dpaa2_switch_block_cb_list);
+
+static int
+dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->filter_block)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ block->ports |= BIT(port_priv->idx);
+ port_priv->filter_block = block;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->filter_block != block)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ block->ports &= ~BIT(port_priv->idx);
+ port_priv->filter_block = NULL;
+ return 0;
+}
+
+static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
+ int err;
+
+ /* Offload all the mirror entries found in the block on this new port
+ * joining it.
+ */
+ err = dpaa2_switch_block_offload_mirror(block, port_priv);
+ if (err)
+ return err;
+
+ /* If the port is already bound to this ACL table then do nothing. This
+ * can happen when this port is the first one to join a tc block
+ */
+ if (port_priv->filter_block == block)
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
+ if (err)
+ return err;
+
+ /* Mark the previous ACL table as being unused if this was the last
+ * port that was using it.
+ */
+ if (old_block->ports == 0)
+ old_block->in_use = false;
+
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
+}
+
+static int
+dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *new_block;
+ int err;
+
+ /* Unoffload all the mirror entries found in the block from the
+ * port leaving it.
+ */
+ err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
+ if (err)
+ return err;
+
+ /* We are the last port that leaves a block (an ACL table).
+ * We'll continue to use this table.
+ */
+ if (block->ports == BIT(port_priv->idx))
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
+ if (err)
+ return err;
+
+ if (block->ports == 0)
+ block->in_use = false;
+
+ new_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ new_block->in_use = true;
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
+}
+
+static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+
+ if (!block_cb) {
+ /* If the filter block is not already known, then this port
+ * must be the first to join it. In this case, we can just
+ * continue to use our private table
+ */
+ filter_block = port_priv->filter_block;
+
+ block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw, filter_block, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ register_block = true;
+ } else {
+ filter_block = flow_block_cb_priv(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ err = dpaa2_switch_port_block_bind(port_priv, filter_block);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list,
+ &dpaa2_switch_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+ return err;
+}
+
+static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+ if (!block_cb)
+ return;
+
+ filter_block = flow_block_cb_priv(block_cb);
+ err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &dpaa2_switch_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return dpaa2_switch_setup_tc_block_bind(netdev, f);
+ case FLOW_BLOCK_UNBIND:
+ dpaa2_switch_setup_tc_block_unbind(netdev, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK: {
+ return dpaa2_switch_setup_tc_block(netdev, type_data);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops dpaa2_switch_port_ops = {
+ .ndo_open = dpaa2_switch_port_open,
+ .ndo_stop = dpaa2_switch_port_stop,
+
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = dpaa2_switch_port_get_stats,
+ .ndo_change_mtu = dpaa2_switch_port_change_mtu,
+ .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
+ .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
+ .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
+ .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
+ .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
+
+ .ndo_start_xmit = dpaa2_switch_port_tx,
+ .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
+ .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
+ .ndo_setup_tc = dpaa2_switch_port_setup_tc,
+};
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
+{
+ return netdev->netdev_ops == &dpaa2_switch_port_ops;
+}
+
+static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+{
+ struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
+
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ mac = kzalloc(sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = port_priv->ethsw_data->mc_io;
+ mac->net_dev = port_priv->netdev;
+
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+ port_priv->mac = mac;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "Error connecting to the MAC endpoint %pe\n",
+ ERR_PTR(err));
+ goto err_close_mac;
+ }
+ }
+
+ return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ port_priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
+}
+
+static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+{
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ dpaa2_mac_disconnect(port_priv->mac);
+
+ if (!dpaa2_switch_port_has_mac(port_priv))
+ return;
+
+ dpaa2_mac_close(port_priv->mac);
+ kfree(port_priv->mac);
+ port_priv->mac = NULL;
+}
+
+static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct ethsw_port_priv *port_priv;
+ u32 status = ~0;
+ int err, if_id;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+ if (err) {
+ dev_err(dev, "Can't get irq status (err %d)\n", err);
+ goto out;
+ }
+
+ if_id = (status & 0xFFFF0000) >> 16;
+ port_priv = ethsw->ports[if_id];
+
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
+ dpaa2_switch_port_link_state_update(port_priv->netdev);
+ dpaa2_switch_port_set_mac_addr(port_priv);
+ }
+
+ if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ rtnl_lock();
+ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ else
+ dpaa2_switch_port_connect_mac(port_priv);
+ rtnl_unlock();
+ }
+
+out:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
+ struct fsl_mc_device_irq *irq;
+ int err;
+
+ err = fsl_mc_allocate_irqs(sw_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
+ err = -EINVAL;
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
+ dpaa2_switch_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, mask);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 1);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ return 0;
+
+free_devm_irq:
+ devm_free_irq(dev, irq->virq, dev);
+free_irq:
+ fsl_mc_free_irqs(sw_dev);
+ return err;
+}
+
+static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err)
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
+ fsl_mc_free_irqs(sw_dev);
+}
+
+static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ enum dpsw_learning_mode learn_mode;
+ int err;
+
+ if (enable)
+ learn_mode = DPSW_LEARNING_MODE_HW;
+ else
+ learn_mode = DPSW_LEARNING_MODE_DIS;
+
+ err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, learn_mode);
+ if (err)
+ netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
+
+ if (!enable)
+ dpaa2_switch_port_fast_age(port_priv);
+
+ return err;
+}
+
+static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
+ u8 state)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpaa2_switch_port_set_stp_state(port_priv, state);
+ if (err)
+ return err;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ err = dpaa2_switch_port_set_learning(port_priv,
+ port_priv->learn_ena);
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
+ struct switchdev_brport_flags flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
+
+ if (flags.mask & BR_FLOOD)
+ port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
+
+ return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+}
+
+static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
+ BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ if (err)
+ return err;
+ port_priv->learn_ena = learn_ena;
+ }
+
+ if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
+ err = dpaa2_switch_port_flood(port_priv, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ if (!attr->u.vlan_filtering) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The DPAA2 switch does not support VLAN-unaware operation");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_attr *attr = &ethsw->sw_attr;
+ int err = 0;
+
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
+ if (err)
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
+ }
+
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
+}
+
+static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
+ const unsigned char *addr)
+{
+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+ list_for_each_entry(ha, &list->list, list) {
+ if (ether_addr_equal(ha->addr, addr)) {
+ netif_addr_unlock_bh(netdev);
+ return 1;
+ }
+ }
+ netif_addr_unlock_bh(netdev);
+ return 0;
+}
+
+static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ /* Check if address is already set on this port */
+ if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -EEXIST;
+
+ err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_add(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_add err %d\n", err);
+ dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_add(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_add(netdev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_add(netdev,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg;
+ int i, err;
+
+ if (!port_priv->vlans[vid])
+ return -ENOENT;
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
+ /* If we are deleting the PVID of a port, use VLAN 4095 instead
+ * as we are sure that neither the bridge nor the 8021q module
+ * will use it
+ */
+ err = dpaa2_switch_port_set_pvid(port_priv, 4095);
+ if (err)
+ return err;
+ }
+
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
+ /* Delete VLAN from switch if it is no longer configured on
+ * any port
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ if (ethsw->ports[i] &&
+ ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ return 0; /* Found a port member in VID */
+ }
+
+ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
+ err = dpaa2_switch_dellink(ethsw, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ if (netif_is_bridge_master(vlan->obj.orig_dev))
+ return -EOPNOTSUPP;
+
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
+}
+
+static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -ENOENT;
+
+ err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_del(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_del err %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_del(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *ptr)
+{
+ int err;
+
+ err = switchdev_handle_port_attr_set(netdev, ptr,
+ dpaa2_switch_port_dev_check,
+ dpaa2_switch_port_attr_set);
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
+ bool learn_ena;
+ int err;
+
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
+ return -EINVAL;
+ }
+ }
+
+ /* Delete the previously manually installed VLAN 1 */
+ err = dpaa2_switch_port_del_vlan(port_priv, 1);
+ if (err)
+ return err;
+
+ dpaa2_switch_port_set_fdb(port_priv, upper_dev);
+
+ /* Inherit the initial bridge port learning state */
+ learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ port_priv->learn_ena = learn_ena;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast) */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
+ err = switchdev_bridge_port_offload(netdev, netdev, NULL,
+ NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
+ return 0;
+
+err_switchdev_offload:
+err_egress_flood:
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+ return err;
+}
+
+static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
+}
+
+static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
+}
+
+static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
+{
+ switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
+}
+
+static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ /* First of all, fast age any learn FDB addresses on this switch port */
+ dpaa2_switch_port_fast_age(port_priv);
+
+ /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
+ * upper devices or otherwise from the FDB table that we are about to
+ * leave
+ */
+ err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
+
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+
+ /* Restore all RX VLANs into the new FDB table that we just joined */
+ err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
+
+ /* Reset the flooding state to denote that this port can send any
+ * packet in standalone mode. With this, we are also ensuring that any
+ * later bridge join will have the flooding flag on.
+ */
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast).
+ * When the port is not under a bridge, only the CTRL interface is part
+ * of the flooding domain besides the actual port
+ */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Recreate the egress flood domain of the FDB that we just left */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* No HW learning when not under a bridge */
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ return err;
+ port_priv->learn_ena = false;
+
+ /* Add the VLAN 1 as PVID when not under a bridge. We need this since
+ * the dpaa2 switch interfaces are not capable to be VLAN unaware
+ */
+ return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
+ BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
+}
+
+static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ /* RCU read lock not necessary because we have write-side protection
+ * (rtnl_mutex), however a non-rcu iterator does not exist.
+ */
+ netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
+ if (is_vlan_dev(upper_dev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!br_vlan_enabled(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot join a bridge while VLAN uppers are present");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ int err = 0;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ break;
+
+ err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
+ upper_dev,
+ extack);
+ if (err)
+ goto out;
+
+ if (!info->linking)
+ dpaa2_switch_port_pre_bridge_leave(netdev);
+
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
+ else
+ err = dpaa2_switch_port_bridge_leave(netdev);
+ }
+ break;
+ }
+
+out:
+ return notifier_from_errno(err);
+}
+
+struct ethsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void dpaa2_switch_event_work(struct work_struct *work)
+{
+ struct ethsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct ethsw_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
+
+ rtnl_lock();
+ fdb_info = &switchdev_work->fdb_info;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
+ fdb_info->addr);
+ else
+ err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
+ fdb_info->addr);
+ if (err)
+ break;
+ fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
+ &fdb_info->info, NULL);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int dpaa2_switch_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+ struct ethsw_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+
+ if (!dpaa2_switch_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+
+ /* Take a reference on the device to avoid being freed. */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(ethsw->workqueue, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int dpaa2_switch_port_obj_event(unsigned long event,
+ struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return dpaa2_switch_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct device *dev = ethsw->dev;
+ struct sk_buff *skb = NULL;
+ void *fd_vaddr;
+
+ fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
+ dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ dev_err(dev, "build_skb() failed\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ ethsw->buf_count--;
+
+ return skb;
+}
+
+static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ dpaa2_switch_free_fd(fq->ethsw, fd);
+}
+
+static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ struct ethsw_port_priv *port_priv;
+ struct net_device *netdev;
+ struct vlan_ethhdr *hdr;
+ struct sk_buff *skb;
+ u16 vlan_tci, vid;
+ int if_id, err;
+
+ /* get switch ingress interface ID */
+ if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
+
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "Frame received from unknown interface!\n");
+ goto err_free_fd;
+ }
+ port_priv = ethsw->ports[if_id];
+ netdev = port_priv->netdev;
+
+ /* build the SKB based on the FD received */
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
+ if (net_ratelimit()) {
+ netdev_err(netdev, "Received invalid frame format\n");
+ goto err_free_fd;
+ }
+ }
+
+ skb = dpaa2_switch_build_linear_skb(ethsw, fd);
+ if (unlikely(!skb))
+ goto err_free_fd;
+
+ skb_reset_mac_header(skb);
+
+ /* Remove the VLAN header if the packet that we just received has a vid
+ * equal to the port PVIDs. Since the dpaa2-switch can operate only in
+ * VLAN-aware mode and no alterations are made on the packet when it's
+ * redirected/mirrored to the control interface, we are sure that there
+ * will always be a VLAN header present.
+ */
+ hdr = vlan_eth_hdr(skb);
+ vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
+ if (vid == port_priv->pvid) {
+ err = __skb_vlan_pop(skb, &vlan_tci);
+ if (err) {
+ dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
+ goto err_free_fd;
+ }
+ }
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ /* Setup the offload_fwd_mark only if the port is under a bridge */
+ skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
+
+ netif_receive_skb(skb);
+
+ return;
+
+err_free_fd:
+ dpaa2_switch_free_fd(ethsw, fd);
+}
+
+static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
+{
+ ethsw->features = 0;
+
+ if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
+ ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
+}
+
+static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_attr ctrl_if_attr;
+ struct device *dev = ethsw->dev;
+ int i = 0;
+ int err;
+
+ err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ctrl_if_attr);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
+ return err;
+ }
+
+ ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_RX;
+
+ ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
+
+ return 0;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
+{
+ struct device *dev = ethsw->dev;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
+ dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
+{
+ struct device *dev = ethsw->dev;
+ u64 buf_array[BUFS_PER_CMD];
+ struct page *page;
+ int retries = 0;
+ dma_addr_t addr;
+ int err;
+ int i;
+
+ for (i = 0; i < BUFS_PER_CMD; i++) {
+ /* Allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page) {
+ dev_err(dev, "buffer allocation failed\n");
+ goto err_alloc;
+ }
+
+ addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ dev_err(dev, "dma_map_single() failed\n");
+ goto err_map;
+ }
+ buf_array[i] = addr;
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful or
+ * max retries hit.
+ */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
+ break;
+
+ cpu_relax();
+ }
+
+ /* If release command failed, clean up and bail out. */
+ if (err) {
+ dpaa2_switch_free_bufs(ethsw, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ __free_pages(page, 0);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
+{
+ int *count = &ethsw->buf_count;
+ int new_count;
+ int err = 0;
+
+ if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
+ do {
+ new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll
+ * try later on
+ */
+ break;
+ }
+ *count += new_count;
+ } while (*count < DPAA2_ETHSW_NUM_BUFS);
+
+ if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
+{
+ int *count, i;
+
+ for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ count = &ethsw->buf_count;
+ *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+
+ if (unlikely(*count < BUFS_PER_CMD))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
+{
+ u64 buf_array[BUFS_PER_CMD];
+ int ret;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
+ buf_array, BUFS_PER_CMD);
+ if (ret < 0) {
+ dev_err(ethsw->dev,
+ "dpaa2_io_service_acquire() = %d\n", ret);
+ return;
+ }
+ dpaa2_switch_free_bufs(ethsw, buf_array, ret);
+
+ } while (ret);
+}
+
+static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
+ struct device *dev = ethsw->dev;
+ struct fsl_mc_device *dpbp_dev;
+ struct dpbp_attr dpbp_attrs;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+ ethsw->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
+ &dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
+ dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
+ dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
+ dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
+
+ err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &dpsw_ctrl_if_pools_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+ ethsw->bpid = dpbp_attrs.id;
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+ dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+ return err;
+}
+
+static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
+{
+ dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ fsl_mc_object_free(ethsw->dpbp_dev);
+}
+
+static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ ethsw->fq[i].store =
+ dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
+ ethsw->dev);
+ if (!ethsw->fq[i].store) {
+ dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
+ while (--i >= 0)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+}
+
+static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
+{
+ int err, retries = 0;
+
+ /* Try to pull from the FQ while the portal is busy and we didn't hit
+ * the maximum number fo retries
+ */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ if (unlikely(err))
+ dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+/* Consume all frames pull-dequeued into the store */
+static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ int cleaned = 0, is_last;
+ struct dpaa2_dq *dq;
+ int retries = 0;
+
+ do {
+ /* Get the next available FD from the store */
+ dq = dpaa2_io_store_next(fq->store, &is_last);
+ if (unlikely(!dq)) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
+ dev_err_once(ethsw->dev,
+ "No valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ if (fq->type == DPSW_QUEUE_RX)
+ dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
+ else
+ dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
+ cleaned++;
+
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* NAPI poll routine */
+static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
+{
+ int err, cleaned = 0, store_cleaned, work_done;
+ struct dpaa2_switch_fq *fq;
+ int retries = 0;
+
+ fq = container_of(napi, struct dpaa2_switch_fq, napi);
+
+ do {
+ err = dpaa2_switch_pull_fq(fq);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ dpaa2_switch_refill_bp(fq->ethsw);
+
+ store_cleaned = dpaa2_switch_store_consume(fq);
+ cleaned += store_cleaned;
+
+ if (cleaned >= budget) {
+ work_done = budget;
+ goto out;
+ }
+
+ } while (store_cleaned);
+
+ /* We didn't consume the entire budget, so finish napi and re-enable
+ * data availability notifications
+ */
+ napi_complete_done(napi, cleaned);
+ do {
+ err = dpaa2_io_service_rearm(NULL, &fq->nctx);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ work_done = max(cleaned, 1);
+out:
+
+ return work_done;
+}
+
+static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_switch_fq *fq;
+
+ fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
+
+ napi_schedule(&fq->napi);
+}
+
+static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_queue_cfg queue_cfg;
+ struct dpaa2_io_notification_ctx *nctx;
+ int err, i, j;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ nctx = &ethsw->fq[i].nctx;
+
+ /* Register a new software context for the FQID.
+ * By using NULL as the first parameter, we specify that we do
+ * not care on which cpu are interrupts received for this queue
+ */
+ nctx->is_cdan = 0;
+ nctx->id = ethsw->fq[i].fqid;
+ nctx->desired_cpu = DPAA2_IO_ANY_CPU;
+ nctx->cb = dpaa2_switch_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
+ if (err) {
+ err = -EPROBE_DEFER;
+ goto err_register;
+ }
+
+ queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
+ DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
+ queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
+ queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
+ queue_cfg.dest_cfg.priority = 0;
+ queue_cfg.user_ctx = nctx->qman64;
+
+ err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ ethsw->fq[i].type,
+ &queue_cfg);
+ if (err)
+ goto err_set_queue;
+ }
+
+ return 0;
+
+err_set_queue:
+ dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
+err_register:
+ for (j = 0; j < i; j++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[j].nctx,
+ ethsw->dev);
+
+ return err;
+}
+
+static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[i].nctx,
+ ethsw->dev);
+}
+
+static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
+{
+ int err;
+
+ /* setup FQs for Rx and Tx Conf */
+ err = dpaa2_switch_setup_fqs(ethsw);
+ if (err)
+ return err;
+
+ /* setup the buffer pool needed on the Rx path */
+ err = dpaa2_switch_setup_dpbp(ethsw);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_alloc_rings(ethsw);
+ if (err)
+ goto err_free_dpbp;
+
+ err = dpaa2_switch_setup_dpio(ethsw);
+ if (err)
+ goto err_destroy_rings;
+
+ err = dpaa2_switch_seed_bp(ethsw);
+ if (err)
+ goto err_deregister_dpio;
+
+ err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
+ goto err_drain_dpbp;
+ }
+
+ return 0;
+
+err_drain_dpbp:
+ dpaa2_switch_drain_bp(ethsw);
+err_deregister_dpio:
+ dpaa2_switch_free_dpio(ethsw);
+err_destroy_rings:
+ dpaa2_switch_destroy_rings(ethsw);
+err_free_dpbp:
+ dpaa2_switch_free_dpbp(ethsw);
+
+ return err;
+}
+
+static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
+ u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
+
+ rtnl_lock();
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ rtnl_unlock();
+ free_netdev(port_priv->netdev);
+ ethsw->ports[port_idx] = NULL;
+}
+
+static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ struct dpsw_tci_cfg tci_cfg = {0};
+ struct dpsw_stp_cfg stp_cfg;
+ int err;
+ u16 i;
+
+ ethsw->dev_id = sw_dev->obj_desc.id;
+
+ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
+ return err;
+ }
+
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &ethsw->major,
+ &ethsw->minor);
+ if (err) {
+ dev_err(dev, "dpsw_get_api_version err %d\n", err);
+ goto err_close;
+ }
+
+ /* Minimum supported DPSW version check */
+ if (ethsw->major < DPSW_MIN_VER_MAJOR ||
+ (ethsw->major == DPSW_MIN_VER_MAJOR &&
+ ethsw->minor < DPSW_MIN_VER_MINOR)) {
+ dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
+ ethsw->major, ethsw->minor);
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ dpaa2_switch_detect_features(ethsw);
+
+ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
+ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
+ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
+ if (err) {
+ dev_err(dev, "dpsw_if_disable err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ err, i);
+ goto err_close;
+ }
+
+ /* Switch starts with all ports configured to VLAN 1. Need to
+ * remove this setting to allow configuration at bridge join
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = i;
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ goto err_close;
+ }
+
+ tci_cfg.vlan_id = 4095;
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_tci err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
+ goto err_close;
+ }
+ }
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove err %d\n", err);
+ goto err_close;
+ }
+
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
+ err = -ENOMEM;
+ goto err_close;
+ }
+
+ err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ err = dpaa2_switch_ctrl_if_setup(ethsw);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ return 0;
+
+err_destroy_ordered_workqueue:
+ destroy_workqueue(ethsw->workqueue);
+
+err_close:
+ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
+/* Add an ACL to redirect frames with specific destination MAC address to
+ * control interface
+ */
+static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
+ const char *mac)
+{
+ struct dpaa2_switch_acl_entry acl_entry = {0};
+
+ /* Match on the destination MAC address */
+ ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
+ eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
+
+ /* Trap to CPU */
+ acl_entry.cfg.precedence = 0;
+ acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+
+ return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
+}
+
+static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
+ const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = DEFAULT_VLAN_ID,
+ .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
+ };
+ struct net_device *netdev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpsw_fdb_cfg fdb_cfg = {0};
+ struct dpsw_if_attr dpsw_if_attr;
+ struct dpaa2_switch_fdb *fdb;
+ struct dpsw_acl_cfg acl_cfg;
+ u16 fdb_id, acl_tbl_id;
+ int err;
+
+ /* Get the Tx queue for this specific port */
+ err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &dpsw_if_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
+ return err;
+ }
+ port_priv->tx_qdid = dpsw_if_attr.qdid;
+
+ /* Create a FDB table for this particular switch port */
+ fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
+ err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &fdb_id, &fdb_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
+ return err;
+ }
+
+ /* Find an unused dpaa2_switch_fdb structure and use it */
+ fdb = dpaa2_switch_fdb_get_unused(ethsw);
+ fdb->fdb_id = fdb_id;
+ fdb->in_use = true;
+ fdb->bridge_dev = NULL;
+ port_priv->fdb = fdb;
+
+ /* We need to add VLAN 1 as the PVID on this port until it is under a
+ * bridge since the DPAA2 switch is not able to handle the traffic in a
+ * VLAN unaware fashion
+ */
+ err = dpaa2_switch_port_vlans_add(netdev, &vlan);
+ if (err)
+ return err;
+
+ /* Setup the egress flooding domains (broadcast, unknown unicast */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Create an ACL table to be used by this switch port */
+ acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
+ err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &acl_tbl_id, &acl_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add err %d\n", err);
+ return err;
+ }
+
+ filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ filter_block->ethsw = ethsw;
+ filter_block->acl_id = acl_tbl_id;
+ filter_block->in_use = true;
+ filter_block->num_acl_rules = 0;
+ INIT_LIST_HEAD(&filter_block->acl_entries);
+ INIT_LIST_HEAD(&filter_block->mirror_entries);
+
+ err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ destroy_workqueue(ethsw->workqueue);
+
+ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err)
+ dev_warn(dev, "dpsw_close err %d\n", err);
+}
+
+static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
+{
+ struct ethsw_port_priv *port_priv;
+ struct ethsw_core *ethsw;
+ struct device *dev;
+ int i;
+
+ dev = &sw_dev->dev;
+ ethsw = dev_get_drvdata(dev);
+
+ dpaa2_switch_teardown_irqs(sw_dev);
+
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ dpaa2_switch_remove_port(ethsw, i);
+ }
+
+ kfree(ethsw->fdbs);
+ kfree(ethsw->filter_blocks);
+ kfree(ethsw->ports);
+
+ dpaa2_switch_teardown(sw_dev);
+
+ fsl_mc_portal_free(ethsw->mc_io);
+
+ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv;
+ struct device *dev = ethsw->dev;
+ struct net_device *port_netdev;
+ int err;
+
+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
+ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
+
+ port_priv = netdev_priv(port_netdev);
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+ SET_NETDEV_DEV(port_netdev, dev);
+ port_netdev->netdev_ops = &dpaa2_switch_port_ops;
+ port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
+
+ port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
+
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Set MTU limits */
+ port_netdev->min_mtu = ETH_MIN_MTU;
+ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
+ /* Populate the private port structure so that later calls to
+ * dpaa2_switch_port_init() can use it.
+ */
+ ethsw->ports[port_idx] = port_priv;
+
+ /* The DPAA2 switch's ingress path depends on the VLAN table,
+ * thus we are not able to disable VLAN filtering.
+ */
+ port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_FILTER |
+ NETIF_F_HW_TC;
+
+ err = dpaa2_switch_port_init(port_priv, port_idx);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_mac_addr(port_priv);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ goto err_port_probe;
+ port_priv->learn_ena = false;
+
+ err = dpaa2_switch_port_connect_mac(port_priv);
+ if (err)
+ goto err_port_probe;
+
+ return 0;
+
+err_port_probe:
+ free_netdev(port_netdev);
+ ethsw->ports[port_idx] = NULL;
+
+ return err;
+}
+
+static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw;
+ int i, err;
+
+ /* Allocate switch core*/
+ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
+ if (!ethsw)
+ return -ENOMEM;
+
+ ethsw->dev = dev;
+ ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
+ dev_set_drvdata(dev, ethsw);
+
+ err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &ethsw->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_drvdata;
+ }
+
+ err = dpaa2_switch_init(sw_dev);
+ if (err)
+ goto err_free_cmdport;
+
+ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
+ GFP_KERNEL);
+ if (!(ethsw->ports)) {
+ err = -ENOMEM;
+ goto err_teardown;
+ }
+
+ ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
+ GFP_KERNEL);
+ if (!ethsw->fdbs) {
+ err = -ENOMEM;
+ goto err_free_ports;
+ }
+
+ ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
+ sizeof(*ethsw->filter_blocks),
+ GFP_KERNEL);
+ if (!ethsw->filter_blocks) {
+ err = -ENOMEM;
+ goto err_free_fdbs;
+ }
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpaa2_switch_probe_port(ethsw, i);
+ if (err)
+ goto err_free_netdev;
+ }
+
+ /* Add a NAPI instance for each of the Rx queues. The first port's
+ * net_device will be associated with the instances since we do not have
+ * different queues for each switch ports.
+ */
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
+
+ /* Setup IRQs */
+ err = dpaa2_switch_setup_irqs(sw_dev);
+ if (err)
+ goto err_stop;
+
+ /* By convention, if the mirror port is equal to the number of switch
+ * interfaces, then mirroring of any kind is disabled.
+ */
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ /* Register the netdev only when the entire setup is done and the
+ * switch port interfaces are ready to receive traffic
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = register_netdev(ethsw->ports[i]->netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ goto err_unregister_ports;
+ }
+ }
+
+ return 0;
+
+err_unregister_ports:
+ for (i--; i >= 0; i--)
+ unregister_netdev(ethsw->ports[i]->netdev);
+ dpaa2_switch_teardown_irqs(sw_dev);
+err_stop:
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+err_free_netdev:
+ for (i--; i >= 0; i--)
+ dpaa2_switch_remove_port(ethsw, i);
+ kfree(ethsw->filter_blocks);
+err_free_fdbs:
+ kfree(ethsw->fdbs);
+err_free_ports:
+ kfree(ethsw->ports);
+
+err_teardown:
+ dpaa2_switch_teardown(sw_dev);
+
+err_free_cmdport:
+ fsl_mc_portal_free(ethsw->mc_io);
+
+err_free_drvdata:
+ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
+
+ return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
+
+static struct fsl_mc_driver dpaa2_switch_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_switch_probe,
+ .remove = dpaa2_switch_remove,
+ .match_id_table = dpaa2_switch_match_id_table
+};
+
+static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
+ .notifier_call = dpaa2_switch_port_netdevice_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_nb = {
+ .notifier_call = dpaa2_switch_port_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
+ .notifier_call = dpaa2_switch_port_blocking_event,
+};
+
+static int dpaa2_switch_register_notifiers(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
+ return err;
+ }
+
+ err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
+ goto err_switchdev_nb;
+ }
+
+ err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
+ goto err_switchdev_blocking_nb;
+ }
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+
+ return err;
+}
+
+static void dpaa2_switch_unregister_notifiers(void)
+{
+ int err;
+
+ err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
+ err);
+
+ err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
+
+ err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
+}
+
+static int __init dpaa2_switch_driver_init(void)
+{
+ int err;
+
+ err = fsl_mc_driver_register(&dpaa2_switch_drv);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_register_notifiers();
+ if (err) {
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_switch_driver_exit(void)
+{
+ dpaa2_switch_unregister_notifiers();
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+}
+
+module_init(dpaa2_switch_driver_init);
+module_exit(dpaa2_switch_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
new file mode 100644
index 000000000..0002dca4d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DPAA2 Ethernet Switch declarations
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __ETHSW_H
+#define __ETHSW_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <uapi/linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/fsl/mc.h>
+#include <net/pkt_cls.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "dpaa2-mac.h"
+#include "dpsw.h"
+
+/* Number of IRQs supported */
+#define DPSW_IRQ_NUM 2
+
+/* Port is member of VLAN */
+#define ETHSW_VLAN_MEMBER 1
+/* VLAN to be treated as untagged on egress */
+#define ETHSW_VLAN_UNTAGGED 2
+/* Untagged frames will be assigned to this VLAN */
+#define ETHSW_VLAN_PVID 4
+/* VLAN configured on the switch */
+#define ETHSW_VLAN_GLOBAL 8
+
+/* Maximum Frame Length supported by HW (currently 10k) */
+#define DPAA2_MFL (10 * 1024)
+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+
+#define ETHSW_FEATURE_MAC_ADDR BIT(0)
+
+/* Number of receive queues (one RX and one TX_CONF) */
+#define DPAA2_SWITCH_RX_NUM_FQS 2
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_SWITCH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_SWITCH_RX_BUF_SIZE \
+ (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
+
+#define DPAA2_SWITCH_STORE_SIZE 16
+
+/* Buffer management */
+#define BUFS_PER_CMD 7
+#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD)
+#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6)
+
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000
+
+/* Hardware annotation buffer size */
+#define DPAA2_SWITCH_HWA_SIZE 64
+/* Software annotation buffer size */
+#define DPAA2_SWITCH_SWA_SIZE 64
+
+#define DPAA2_SWITCH_TX_BUF_ALIGN 64
+
+#define DPAA2_SWITCH_TX_DATA_OFFSET \
+ (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
+
+#define DPAA2_SWITCH_NEEDED_HEADROOM \
+ (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
+
+#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16
+#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1
+
+#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256
+
+extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
+
+struct ethsw_core;
+
+struct dpaa2_switch_fq {
+ struct ethsw_core *ethsw;
+ enum dpsw_queue_type type;
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+ struct napi_struct napi;
+ u32 fqid;
+};
+
+struct dpaa2_switch_fdb {
+ struct net_device *bridge_dev;
+ u16 fdb_id;
+ bool in_use;
+};
+
+struct dpaa2_switch_acl_entry {
+ struct list_head list;
+ u16 prio;
+ unsigned long cookie;
+
+ struct dpsw_acl_entry_cfg cfg;
+ struct dpsw_acl_key key;
+};
+
+struct dpaa2_switch_mirror_entry {
+ struct list_head list;
+ struct dpsw_reflection_cfg cfg;
+ unsigned long cookie;
+ u16 if_id;
+};
+
+struct dpaa2_switch_filter_block {
+ struct ethsw_core *ethsw;
+ u64 ports;
+ bool in_use;
+
+ struct list_head acl_entries;
+ u16 acl_id;
+ u8 num_acl_rules;
+
+ struct list_head mirror_entries;
+};
+
+static inline bool
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
+{
+ if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+ DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
+ return true;
+ return false;
+}
+
+/* Per port private data */
+struct ethsw_port_priv {
+ struct net_device *netdev;
+ u16 idx;
+ struct ethsw_core *ethsw_data;
+ u8 link_state;
+ u8 stp_state;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+ u16 pvid;
+ u16 tx_qdid;
+
+ struct dpaa2_switch_fdb *fdb;
+ bool bcast_flood;
+ bool ucast_flood;
+ bool learn_ena;
+
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpaa2_mac *mac;
+};
+
+/* Switch data */
+struct ethsw_core {
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ u16 dpsw_handle;
+ struct dpsw_attr sw_attr;
+ u16 major, minor;
+ unsigned long features;
+ int dev_id;
+ struct ethsw_port_priv **ports;
+ struct iommu_domain *iommu_domain;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+
+ struct workqueue_struct *workqueue;
+
+ struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS];
+ struct fsl_mc_device *dpbp_dev;
+ int buf_count;
+ u16 bpid;
+ int napi_users;
+
+ struct dpaa2_switch_fdb *fdbs;
+ struct dpaa2_switch_filter_block *filter_blocks;
+ u16 mirror_port;
+};
+
+static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
+ struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->netdev == netdev)
+ return ethsw->ports[i]->idx;
+
+ return -EINVAL;
+}
+
+static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
+{
+ if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
+ dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
+ dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
+ dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool
+dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
+{
+ if (port_priv->mac &&
+ (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->mac ? true : false;
+}
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data);
+
+/* TC offload */
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry);
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
+#endif /* __ETHSW_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
new file mode 100644
index 000000000..6f596a5fb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
@@ -0,0 +1,481 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <linux/types.h>
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * DPKG_NUM_OF_MASKS - Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+
+/**
+ * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ u8 mask;
+ u8 offset;
+};
+
+/* Protocol fields */
+
+/* Ethernet fields */
+#define NH_FLD_ETH_DA BIT(0)
+#define NH_FLD_ETH_SA BIT(1)
+#define NH_FLD_ETH_LENGTH BIT(2)
+#define NH_FLD_ETH_TYPE BIT(3)
+#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
+#define NH_FLD_ETH_PADDING BIT(5)
+#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
+
+/* VLAN fields */
+#define NH_FLD_VLAN_VPRI BIT(0)
+#define NH_FLD_VLAN_CFI BIT(1)
+#define NH_FLD_VLAN_VID BIT(2)
+#define NH_FLD_VLAN_LENGTH BIT(3)
+#define NH_FLD_VLAN_TYPE BIT(4)
+#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/* IP (generic) fields */
+#define NH_FLD_IP_VER BIT(0)
+#define NH_FLD_IP_DSCP BIT(2)
+#define NH_FLD_IP_ECN BIT(3)
+#define NH_FLD_IP_PROTO BIT(4)
+#define NH_FLD_IP_SRC BIT(5)
+#define NH_FLD_IP_DST BIT(6)
+#define NH_FLD_IP_TOS_TC BIT(7)
+#define NH_FLD_IP_ID BIT(8)
+#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
+
+/* IPV4 fields */
+#define NH_FLD_IPV4_VER BIT(0)
+#define NH_FLD_IPV4_HDR_LEN BIT(1)
+#define NH_FLD_IPV4_TOS BIT(2)
+#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
+#define NH_FLD_IPV4_ID BIT(4)
+#define NH_FLD_IPV4_FLAG_D BIT(5)
+#define NH_FLD_IPV4_FLAG_M BIT(6)
+#define NH_FLD_IPV4_OFFSET BIT(7)
+#define NH_FLD_IPV4_TTL BIT(8)
+#define NH_FLD_IPV4_PROTO BIT(9)
+#define NH_FLD_IPV4_CKSUM BIT(10)
+#define NH_FLD_IPV4_SRC_IP BIT(11)
+#define NH_FLD_IPV4_DST_IP BIT(12)
+#define NH_FLD_IPV4_OPTS BIT(13)
+#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
+#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
+
+/* IPV6 fields */
+#define NH_FLD_IPV6_VER BIT(0)
+#define NH_FLD_IPV6_TC BIT(1)
+#define NH_FLD_IPV6_SRC_IP BIT(2)
+#define NH_FLD_IPV6_DST_IP BIT(3)
+#define NH_FLD_IPV6_NEXT_HDR BIT(4)
+#define NH_FLD_IPV6_FL BIT(5)
+#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
+#define NH_FLD_IPV6_ID BIT(7)
+#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
+
+/* ICMP fields */
+#define NH_FLD_ICMP_TYPE BIT(0)
+#define NH_FLD_ICMP_CODE BIT(1)
+#define NH_FLD_ICMP_CKSUM BIT(2)
+#define NH_FLD_ICMP_ID BIT(3)
+#define NH_FLD_ICMP_SQ_NUM BIT(4)
+#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
+
+/* IGMP fields */
+#define NH_FLD_IGMP_VERSION BIT(0)
+#define NH_FLD_IGMP_TYPE BIT(1)
+#define NH_FLD_IGMP_CKSUM BIT(2)
+#define NH_FLD_IGMP_DATA BIT(3)
+#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
+
+/* TCP fields */
+#define NH_FLD_TCP_PORT_SRC BIT(0)
+#define NH_FLD_TCP_PORT_DST BIT(1)
+#define NH_FLD_TCP_SEQ BIT(2)
+#define NH_FLD_TCP_ACK BIT(3)
+#define NH_FLD_TCP_OFFSET BIT(4)
+#define NH_FLD_TCP_FLAGS BIT(5)
+#define NH_FLD_TCP_WINDOW BIT(6)
+#define NH_FLD_TCP_CKSUM BIT(7)
+#define NH_FLD_TCP_URGPTR BIT(8)
+#define NH_FLD_TCP_OPTS BIT(9)
+#define NH_FLD_TCP_OPTS_COUNT BIT(10)
+#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
+
+/* UDP fields */
+#define NH_FLD_UDP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_PORT_DST BIT(1)
+#define NH_FLD_UDP_LEN BIT(2)
+#define NH_FLD_UDP_CKSUM BIT(3)
+#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
+
+/* UDP-lite fields */
+#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
+#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
+
+/* UDP-encap-ESP fields */
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
+#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
+#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_PORT_SRC BIT(0)
+#define NH_FLD_SCTP_PORT_DST BIT(1)
+#define NH_FLD_SCTP_VER_TAG BIT(2)
+#define NH_FLD_SCTP_CKSUM BIT(3)
+#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
+
+/* DCCP fields */
+#define NH_FLD_DCCP_PORT_SRC BIT(0)
+#define NH_FLD_DCCP_PORT_DST BIT(1)
+#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
+
+/* IPHC fields */
+#define NH_FLD_IPHC_CID BIT(0)
+#define NH_FLD_IPHC_CID_TYPE BIT(1)
+#define NH_FLD_IPHC_HCINDEX BIT(2)
+#define NH_FLD_IPHC_GEN BIT(3)
+#define NH_FLD_IPHC_D_BIT BIT(4)
+#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
+#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
+
+/* L2TPV2 fields */
+#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
+#define NH_FLD_L2TPV2_VERSION BIT(5)
+#define NH_FLD_L2TPV2_LEN BIT(6)
+#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
+#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
+#define NH_FLD_L2TPV2_NS BIT(9)
+#define NH_FLD_L2TPV2_NR BIT(10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
+#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
+#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
+
+/* L2TPV3 fields */
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
+#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
+#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
+#define NH_FLD_L2TPV3_SESS_ID BIT(2)
+#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
+
+/* PPP fields */
+#define NH_FLD_PPP_PID BIT(0)
+#define NH_FLD_PPP_COMPRESSED BIT(1)
+#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
+
+/* PPPoE fields */
+#define NH_FLD_PPPOE_VER BIT(0)
+#define NH_FLD_PPPOE_TYPE BIT(1)
+#define NH_FLD_PPPOE_CODE BIT(2)
+#define NH_FLD_PPPOE_SID BIT(3)
+#define NH_FLD_PPPOE_LEN BIT(4)
+#define NH_FLD_PPPOE_SESSION BIT(5)
+#define NH_FLD_PPPOE_PID BIT(6)
+#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
+
+/* PPP-Mux fields */
+#define NH_FLD_PPPMUX_PID BIT(0)
+#define NH_FLD_PPPMUX_CKSUM BIT(1)
+#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
+#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
+
+/* PPP-Mux sub-frame fields */
+#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
+#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
+#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
+
+/* LLC fields */
+#define NH_FLD_LLC_DSAP BIT(0)
+#define NH_FLD_LLC_SSAP BIT(1)
+#define NH_FLD_LLC_CTRL BIT(2)
+#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
+
+/* NLPID fields */
+#define NH_FLD_NLPID_NLPID BIT(0)
+#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
+
+/* SNAP fields */
+#define NH_FLD_SNAP_OUI BIT(0)
+#define NH_FLD_SNAP_PID BIT(1)
+#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
+
+/* LLC SNAP fields */
+#define NH_FLD_LLC_SNAP_TYPE BIT(0)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
+
+/* ARP fields */
+#define NH_FLD_ARP_HTYPE BIT(0)
+#define NH_FLD_ARP_PTYPE BIT(1)
+#define NH_FLD_ARP_HLEN BIT(2)
+#define NH_FLD_ARP_PLEN BIT(3)
+#define NH_FLD_ARP_OPER BIT(4)
+#define NH_FLD_ARP_SHA BIT(5)
+#define NH_FLD_ARP_SPA BIT(6)
+#define NH_FLD_ARP_THA BIT(7)
+#define NH_FLD_ARP_TPA BIT(8)
+#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
+
+/* RFC2684 fields */
+#define NH_FLD_RFC2684_LLC BIT(0)
+#define NH_FLD_RFC2684_NLPID BIT(1)
+#define NH_FLD_RFC2684_OUI BIT(2)
+#define NH_FLD_RFC2684_PID BIT(3)
+#define NH_FLD_RFC2684_VPN_OUI BIT(4)
+#define NH_FLD_RFC2684_VPN_IDX BIT(5)
+#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
+
+/* User defined fields */
+#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
+#define NH_FLD_USER_DEFINED_PCDID BIT(1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
+
+/* Payload fields */
+#define NH_FLD_PAYLOAD_BUFFER BIT(0)
+#define NH_FLD_PAYLOAD_SIZE BIT(1)
+#define NH_FLD_MAX_FRM_SIZE BIT(2)
+#define NH_FLD_MIN_FRM_SIZE BIT(3)
+#define NH_FLD_PAYLOAD_TYPE BIT(4)
+#define NH_FLD_FRAME_SIZE BIT(5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
+
+/* GRE fields */
+#define NH_FLD_GRE_TYPE BIT(0)
+#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
+
+/* MINENCAP fields */
+#define NH_FLD_MINENCAP_SRC_IP BIT(0)
+#define NH_FLD_MINENCAP_DST_IP BIT(1)
+#define NH_FLD_MINENCAP_TYPE BIT(2)
+#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
+
+/* IPSEC AH fields */
+#define NH_FLD_IPSEC_AH_SPI BIT(0)
+#define NH_FLD_IPSEC_AH_NH BIT(1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
+
+/* IPSEC ESP fields */
+#define NH_FLD_IPSEC_ESP_SPI BIT(0)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
+
+/* MPLS fields */
+#define NH_FLD_MPLS_LABEL_STACK BIT(0)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
+
+/* MACSEC fields */
+#define NH_FLD_MACSEC_SECTAG BIT(0)
+#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
+
+/* GTP fields */
+#define NH_FLD_GTP_TEID BIT(0)
+
+/* Supported protocols */
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @extract.from_hdr.prot: Any of the supported headers
+ * @extract.from_hdr.type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @extract.from_hdr.field: One of the supported fields (NH_FLD_)
+ * @extract.from_hdr.size: Size in bytes
+ * @extract.from_hdr.offset: Byte offset
+ * @extract.from_hdr.hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ * @extract.from_data.size: Size in bytes
+ * @extract.from_data.offset: Byte offset
+ * @extract.from_parse.size: Size in bytes
+ * @extract.from_parse.offset: Byte offset
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ union {
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ u32 field;
+ u8 size;
+ u8 offset;
+ u8 hdr_index;
+ } from_hdr;
+ struct {
+ u8 size;
+ u8 offset;
+ } from_data;
+ struct {
+ u8 size;
+ u8 offset;
+ } from_parse;
+ } extract;
+
+ u8 num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ u8 num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
new file mode 100644
index 000000000..e9ac2ecef
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 4
+#define DPMAC_CMD_BASE_VERSION 1
+#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
+
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field) \
+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+ DPMAC_##field##_SHIFT)
+
+#define dpmac_set_field(var, field, val) \
+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field) \
+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+ __le32 dpmac_id;
+};
+
+struct dpmac_rsp_get_attributes {
+ u8 eth_if;
+ u8 link_type;
+ __le16 id;
+ __le32 max_rate;
+};
+
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ __le64 options;
+ __le32 rate;
+ __le32 pad0;
+ /* from lsb: up:1, state_valid:1 */
+ u8 state;
+ u8 pad1[7];
+ __le64 supported;
+ __le64 advertising;
+};
+
+struct dpmac_cmd_get_counter {
+ u8 id;
+};
+
+struct dpmac_rsp_get_counter {
+ __le64 pad;
+ __le64 counter;
+};
+
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
new file mode 100644
index 000000000..f440a4c3b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id: DPMAC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+ attr->eth_if = rsp_params->eth_if;
+ attr->link_type = rsp_params->link_type;
+ attr->id = le16_to_cpu(rsp_params->id);
+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
+ dpmac_set_field(cmd_params->state, STATE_VALID,
+ link_state->state_valid);
+ cmd_params->supported = cpu_to_le64(link_state->supported);
+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_counter() - Read a specific DPMAC counter
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @id: The requested counter ID
+ * @value: Returned counter value
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value)
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
+ dpmac_cmd->id = id;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
+ *value = le64_to_cpu(dpmac_rsp->counter);
+
+ return 0;
+}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
new file mode 100644
index 000000000..17488819e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpmac_link_type - DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+ DPMAC_LINK_TYPE_NONE,
+ DPMAC_LINK_TYPE_FIXED,
+ DPMAC_LINK_TYPE_PHY,
+ DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ * @DPMAC_ETH_IF_CAUI: CAUI interface
+ * @DPMAC_ETH_IF_1000BASEX: 1000BASEX interface
+ * @DPMAC_ETH_IF_USXGMII: USXGMII interface
+ */
+enum dpmac_eth_if {
+ DPMAC_ETH_IF_MII,
+ DPMAC_ETH_IF_RMII,
+ DPMAC_ETH_IF_SMII,
+ DPMAC_ETH_IF_GMII,
+ DPMAC_ETH_IF_RGMII,
+ DPMAC_ETH_IF_SGMII,
+ DPMAC_ETH_IF_QSGMII,
+ DPMAC_ETH_IF_XAUI,
+ DPMAC_ETH_IF_XFI,
+ DPMAC_ETH_IF_CAUI,
+ DPMAC_ETH_IF_1000BASEX,
+ DPMAC_ETH_IF_USXGMII,
+};
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id: DPMAC object ID
+ * @max_rate: Maximum supported rate - in Mbps
+ * @eth_if: Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+ u16 id;
+ u32 max_rate;
+ enum dpmac_eth_if eth_if;
+ enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr);
+
+/* DPMAC link configuration/state options */
+
+#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
+#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
+#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
+#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
+
+/* Advertised link speeds */
+#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
+#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
+#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
+#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
+#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
+
+/* Advertise auto-negotiation enable */
+#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+ int state_valid;
+ u64 supported;
+ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+/**
+ * enum dpmac_counter_id - DPMAC counter types
+ *
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
+ * (up to max frame length specified),
+ * good or bad.
+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
+ * with a wrong CRC
+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
+ * specified, with a bad frame check sequence.
+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
+ * Occurs when a receive FIFO overflows.
+ * Includes also frames truncated as a result of
+ * the receive FIFO overflow.
+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
+ * (optional used for wrong SFD).
+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
+ * bytes long with a good CRC.
+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
+ * specified, with a good frame check sequence.
+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
+ * (regular and PFC).
+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
+ * frames and valid pause frames.
+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
+ * (except for undersized/fragment frame).
+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
+ * frames and valid pause frames transmitted.
+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
+ * pause frames.
+ * @DPMAC_CNT_EGR_GOOD_FRAME: counts frames transmitted without error, including
+ * pause frames.
+ */
+enum dpmac_counter_id {
+ DPMAC_CNT_ING_FRAME_64,
+ DPMAC_CNT_ING_FRAME_127,
+ DPMAC_CNT_ING_FRAME_255,
+ DPMAC_CNT_ING_FRAME_511,
+ DPMAC_CNT_ING_FRAME_1023,
+ DPMAC_CNT_ING_FRAME_1518,
+ DPMAC_CNT_ING_FRAME_1519_MAX,
+ DPMAC_CNT_ING_FRAG,
+ DPMAC_CNT_ING_JABBER,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ DPMAC_CNT_ING_ALIGN_ERR,
+ DPMAC_CNT_EGR_UNDERSIZED,
+ DPMAC_CNT_ING_OVERSIZED,
+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
+ DPMAC_CNT_ING_BYTE,
+ DPMAC_CNT_ING_MCAST_FRAME,
+ DPMAC_CNT_ING_BCAST_FRAME,
+ DPMAC_CNT_ING_ALL_FRAME,
+ DPMAC_CNT_ING_UCAST_FRAME,
+ DPMAC_CNT_ING_ERR_FRAME,
+ DPMAC_CNT_EGR_BYTE,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ DPMAC_CNT_EGR_BCAST_FRAME,
+ DPMAC_CNT_EGR_UCAST_FRAME,
+ DPMAC_CNT_EGR_ERR_FRAME,
+ DPMAC_CNT_ING_GOOD_FRAME,
+ DPMAC_CNT_EGR_GOOD_FRAME
+};
+
+int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_counter_id id, u64 *value);
+
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
+#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
new file mode 100644
index 000000000..828f53809
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+#include "dpni.h"
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
+#define DPNI_CMD_BASE_VERSION 1
+#define DPNI_CMD_2ND_VERSION 2
+#define DPNI_CMD_ID_OFFSET 4
+
+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+
+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
+#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
+#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
+#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
+#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
+#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
+
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+
+#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+
+#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field) \
+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
+ DPNI_##field##_SHIFT)
+
+#define dpni_set_field(var, field, val) \
+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
+#define dpni_get_field(var, field) \
+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
+
+struct dpni_cmd_open {
+ __le32 dpni_id;
+};
+
+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpni_cmd_set_pools {
+ /* cmd word 0 */
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ /* cmd word 0..4 */
+ __le32 dpbp_id[DPNI_MAX_DPBP];
+ /* cmd word 4..6 */
+ __le16 buffer_size[DPNI_MAX_DPBP];
+};
+
+/* The enable indication is always the least significant bit */
+#define DPNI_ENABLE_SHIFT 0
+#define DPNI_ENABLE_SIZE 1
+
+struct dpni_rsp_is_enabled {
+ u8 enabled;
+};
+
+struct dpni_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dpni_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpni_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpni_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_attr {
+ /* response word 0 */
+ __le32 options;
+ u8 num_queues;
+ u8 num_tcs;
+ u8 mac_filter_entries;
+ u8 pad0;
+ /* response word 1 */
+ u8 vlan_filter_entries;
+ u8 pad1;
+ u8 qos_entries;
+ u8 pad2;
+ __le16 fs_entries;
+ __le16 pad3;
+ /* response word 2 */
+ u8 qos_key_size;
+ u8 fs_key_size;
+ __le16 wriop_version;
+};
+
+#define DPNI_ERROR_ACTION_SHIFT 0
+#define DPNI_ERROR_ACTION_SIZE 4
+#define DPNI_FRAME_ANN_SHIFT 4
+#define DPNI_FRAME_ANN_SIZE 1
+
+struct dpni_cmd_set_errors_behavior {
+ __le32 errors;
+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
+ u8 flags;
+};
+
+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
+ * buffer layouts, but they all share the same parameters.
+ * If one of the functions changes, below structure needs to be split.
+ */
+
+#define DPNI_PASS_TS_SHIFT 0
+#define DPNI_PASS_TS_SIZE 1
+#define DPNI_PASS_PR_SHIFT 1
+#define DPNI_PASS_PR_SIZE 1
+#define DPNI_PASS_FS_SHIFT 2
+#define DPNI_PASS_FS_SIZE 1
+
+struct dpni_cmd_get_buffer_layout {
+ u8 qtype;
+};
+
+struct dpni_rsp_get_buffer_layout {
+ /* response word 0 */
+ u8 pad0[6];
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ u8 flags;
+ u8 pad1;
+ /* response word 1 */
+ __le16 private_data_size;
+ __le16 data_align;
+ __le16 head_room;
+ __le16 tail_room;
+};
+
+struct dpni_cmd_set_buffer_layout {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 pad0[3];
+ __le16 options;
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ u8 flags;
+ u8 pad1;
+ /* cmd word 1 */
+ __le16 private_data_size;
+ __le16 data_align;
+ __le16 head_room;
+ __le16 tail_room;
+};
+
+struct dpni_cmd_set_offload {
+ u8 pad[3];
+ u8 dpni_offload;
+ __le32 config;
+};
+
+struct dpni_cmd_get_offload {
+ u8 pad[3];
+ u8 dpni_offload;
+};
+
+struct dpni_rsp_get_offload {
+ __le32 pad;
+ __le32 config;
+};
+
+struct dpni_cmd_get_qdid {
+ u8 qtype;
+};
+
+struct dpni_rsp_get_qdid {
+ __le16 qdid;
+};
+
+struct dpni_rsp_get_tx_data_offset {
+ __le16 data_offset;
+};
+
+struct dpni_cmd_get_statistics {
+ u8 page_number;
+};
+
+struct dpni_rsp_get_statistics {
+ __le64 counter[DPNI_STATISTICS_CNT];
+};
+
+struct dpni_cmd_link_cfg {
+ /* cmd word 0 */
+ __le64 pad0;
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+#define DPNI_LINK_STATE_SHIFT 0
+#define DPNI_LINK_STATE_SIZE 1
+
+struct dpni_rsp_get_link_state {
+ /* response word 0 */
+ __le32 pad0;
+ /* from LSB: up:1 */
+ u8 flags;
+ u8 pad1[3];
+ /* response word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* response word 2 */
+ __le64 options;
+};
+
+struct dpni_cmd_set_max_frame_length {
+ __le16 max_frame_length;
+};
+
+struct dpni_rsp_get_max_frame_length {
+ __le16 max_frame_length;
+};
+
+struct dpni_cmd_set_multicast_promisc {
+ u8 enable;
+};
+
+struct dpni_rsp_get_multicast_promisc {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_unicast_promisc {
+ u8 enable;
+};
+
+struct dpni_rsp_get_unicast_promisc {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_primary_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_primary_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_port_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_cmd_add_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_cmd_remove_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+#define DPNI_UNICAST_FILTERS_SHIFT 0
+#define DPNI_UNICAST_FILTERS_SIZE 1
+#define DPNI_MULTICAST_FILTERS_SHIFT 1
+#define DPNI_MULTICAST_FILTERS_SIZE 1
+
+struct dpni_cmd_clear_mac_filters {
+ /* from LSB: unicast:1, multicast:1 */
+ u8 flags;
+};
+
+#define DPNI_DIST_MODE_SHIFT 0
+#define DPNI_DIST_MODE_SIZE 4
+#define DPNI_MISS_ACTION_SHIFT 4
+#define DPNI_MISS_ACTION_SIZE 4
+
+struct dpni_cmd_set_rx_tc_dist {
+ /* cmd word 0 */
+ __le16 dist_size;
+ u8 tc_id;
+ /* from LSB: dist_mode:4, miss_action:4 */
+ u8 flags;
+ __le16 pad0;
+ __le16 default_flow_id;
+ /* cmd word 1..5 */
+ __le64 pad1[5];
+ /* cmd word 6 */
+ __le64 key_cfg_iova;
+};
+
+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
+ * key_cfg_iova)
+ */
+struct dpni_mask_cfg {
+ u8 mask;
+ u8 offset;
+};
+
+#define DPNI_EFH_TYPE_SHIFT 0
+#define DPNI_EFH_TYPE_SIZE 4
+#define DPNI_EXTRACT_TYPE_SHIFT 0
+#define DPNI_EXTRACT_TYPE_SIZE 4
+
+struct dpni_dist_extract {
+ /* word 0 */
+ u8 prot;
+ /* EFH type stored in the 4 least significant bits */
+ u8 efh_type;
+ u8 size;
+ u8 offset;
+ __le32 field;
+ /* word 1 */
+ u8 hdr_index;
+ u8 constant;
+ u8 num_of_repeats;
+ u8 num_of_byte_masks;
+ /* Extraction type is stored in the 4 LSBs */
+ u8 extract_type;
+ u8 pad[3];
+ /* word 2 */
+ struct dpni_mask_cfg masks[4];
+};
+
+struct dpni_ext_set_rx_tc_dist {
+ /* extension word 0 */
+ u8 num_extracts;
+ u8 pad[7];
+ /* words 1..25 */
+ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+struct dpni_cmd_get_queue {
+ u8 qtype;
+ u8 tc;
+ u8 index;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_STASH_CTRL_SHIFT 6
+#define DPNI_STASH_CTRL_SIZE 1
+#define DPNI_HOLD_ACTIVE_SHIFT 7
+#define DPNI_HOLD_ACTIVE_SIZE 1
+
+struct dpni_rsp_get_queue {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le32 dest_id;
+ __le16 pad1;
+ u8 dest_prio;
+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
+ u8 flags;
+ /* response word 2 */
+ __le64 flc;
+ /* response word 3 */
+ __le64 user_context;
+ /* response word 4 */
+ __le32 fqid;
+ __le16 qdbin;
+};
+
+struct dpni_cmd_set_queue {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 index;
+ u8 options;
+ __le32 pad0;
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 pad1;
+ u8 dest_prio;
+ u8 flags;
+ /* cmd word 2 */
+ __le64 flc;
+ /* cmd word 3 */
+ __le64 user_context;
+};
+
+struct dpni_cmd_set_taildrop {
+ /* cmd word 0 */
+ u8 congestion_point;
+ u8 qtype;
+ u8 tc;
+ u8 index;
+ __le32 pad0;
+ /* cmd word 1 */
+ /* Only least significant bit is relevant */
+ u8 enable;
+ u8 pad1;
+ u8 units;
+ u8 pad2;
+ __le32 threshold;
+};
+
+struct dpni_cmd_get_taildrop {
+ u8 congestion_point;
+ u8 qtype;
+ u8 tc;
+ u8 index;
+};
+
+struct dpni_rsp_get_taildrop {
+ /* cmd word 0 */
+ __le64 pad0;
+ /* cmd word 1 */
+ /* only least significant bit is relevant */
+ u8 enable;
+ u8 pad1;
+ u8 units;
+ u8 pad2;
+ __le32 threshold;
+};
+
+struct dpni_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le16 miss_flow_id;
+ __le16 pad;
+ __le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le32 pad;
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+ /* cmd word 0 */
+ __le16 options;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 flow_id;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+ /* cmd word 3 */
+ __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+ /* cmd word 0 */
+ __le16 pad0;
+ u8 tc_id;
+ u8 key_size;
+ __le32 pad1;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+};
+
+#define DPNI_DISCARD_ON_MISS_SHIFT 0
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+
+struct dpni_cmd_set_qos_table {
+ __le32 pad;
+ u8 default_tc;
+ /* only the LSB */
+ u8 discard_on_miss;
+ __le16 pad1[21];
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+ __le16 pad;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ u8 pad[3];
+ u8 key_size;
+ __le32 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
+#define DPNI_CONG_UNITS_SIZE 2
+
+struct dpni_cmd_set_congestion_notification {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
+ __le64 message_iova;
+ /* cmd word 3 */
+ __le64 message_ctx;
+ /* cmd word 4 */
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#define DPNI_COUPLED_SHIFT 0
+#define DPNI_COUPLED_SIZE 1
+
+struct dpni_cmd_set_tx_shaping {
+ __le16 tx_cr_max_burst_size;
+ __le16 tx_er_max_burst_size;
+ __le32 pad;
+ __le32 tx_cr_rate_limit;
+ __le32 tx_er_rate_limit;
+ /* from LSB: coupled:1 */
+ u8 coupled;
+};
+
+#define DPNI_PTP_ENABLE_SHIFT 0
+#define DPNI_PTP_ENABLE_SIZE 1
+#define DPNI_PTP_CH_UPDATE_SHIFT 1
+#define DPNI_PTP_CH_UPDATE_SIZE 1
+
+struct dpni_cmd_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
+};
+
+struct dpni_rsp_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
+};
+
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
new file mode 100644
index 000000000..6c3b36f20
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -0,0 +1,2181 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fsl/mc.h>
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+/**
+ * dpni_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
+{
+ int i, j;
+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
+ struct dpni_dist_extract *extr;
+
+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
+ dpni_ext->num_extracts = cfg->num_extracts;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ extr = &dpni_ext->extracts[i];
+
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+ dpni_set_field(extr->efh_type, EFH_TYPE,
+ cfg->extracts[i].extract.from_hdr.type);
+ extr->size = cfg->extracts[i].extract.from_hdr.size;
+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+ extr->field = cpu_to_le32(
+ cfg->extracts[i].extract.from_hdr.field);
+ extr->hdr_index =
+ cfg->extracts[i].extract.from_hdr.hdr_index;
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ extr->size = cfg->extracts[i].extract.from_data.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_data.offset;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ extr->size = cfg->extracts[i].extract.from_parse.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_parse.offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
+ cfg->extracts[i].type);
+
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+ extr->masks[j].offset =
+ cfg->extracts[i].masks[j].offset;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpni_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_open *cmd_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpni_cmd_open *)cmd.params;
+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_pools *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_is_enabled *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state: - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_enable *cmd_params;
+ struct dpni_rsp_get_irq_enable *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_mask *cmd_params;
+ struct dpni_rsp_get_irq_mask *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_status *cmd_params;
+ struct dpni_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpni_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_attr *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->num_queues = rsp_params->num_queues;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->qos_entries = rsp_params->qos_entries;
+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
+ attr->qos_key_size = rsp_params->qos_key_size;
+ attr->fs_key_size = rsp_params->fs_key_size;
+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
+
+ return 0;
+}
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * this function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_error_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_errors_behavior *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
+ cmd_params->errors = cpu_to_le32(cfg->errors);
+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to retrieve configuration for
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_buffer_layout *cmd_params;
+ struct dpni_rsp_get_buffer_layout *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
+ layout->data_align = le16_to_cpu(rsp_params->data_align);
+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
+
+ return 0;
+}
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue this configuration applies to
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_buffer_layout *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->options = cpu_to_le16(layout->options);
+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
+ cmd_params->data_align = cpu_to_le16(layout->data_align);
+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables the offload
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 config)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_offload *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+ cmd_params->config = cpu_to_le32(config);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 *config)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_offload *cmd_params;
+ struct dpni_rsp_get_offload *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
+ *config = le32_to_cpu(rsp_params->config);
+
+ return 0;
+}
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to receive QDID for
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u16 *qdid)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_qdid *cmd_params;
+ struct dpni_rsp_get_qdid *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
+ *qdid = le16_to_cpu(rsp_params->qdid);
+
+ return 0;
+}
+
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *data_offset)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_tx_data_offset *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
+ *data_offset = le16_to_cpu(rsp_params->data_offset);
+
+ return 0;
+}
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_link_cfg() - return the link configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+ cfg->options = le64_to_cpu(rsp_params->options);
+
+ return err;
+}
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_state *state)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *max_frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_max_frame_length *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
+
+ return 0;
+}
+
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_multicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_multicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_unicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_unicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_rsp_get_port_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_add_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_remove_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int unicast,
+ int multicast)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_clear_mac_filters *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ cmd_params->tc_id = tc_id;
+ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
+ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header =
+ mc_encode_cmd_header(DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
+ * configuration options are set on the queue
+ * @queue: Queue structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ u8 options,
+ const struct dpni_queue *queue)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_queue *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
+ cmd_params->dest_prio = queue->destination.priority;
+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
+ queue->destination.hold_active);
+ cmd_params->flc = cpu_to_le64(queue->flc.value);
+ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_queue *cmd_params;
+ struct dpni_rsp_get_queue *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+ queue->destination.priority = rsp_params->dest_prio;
+ queue->destination.type = dpni_get_field(rsp_params->flags,
+ DEST_TYPE);
+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+ STASH_CTRL);
+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
+ HOLD_ACTIVE);
+ queue->flc.value = le64_to_cpu(rsp_params->flc);
+ queue->user_context = le64_to_cpu(rsp_params->user_context);
+ qid->fqid = le32_to_cpu(rsp_params->fqid);
+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
+ return 0;
+}
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
+ union dpni_statistics *stat)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_statistics *cmd_params;
+ struct dpni_rsp_get_statistics *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+ cmd_params->page_number = page;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
+
+ return 0;
+}
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @qtype: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_set_taildrop *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
+ cmd_params->units = taildrop->units;
+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @qtype: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_get_taildrop *cmd_params;
+ struct dpni_rsp_get_taildrop *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
+ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
+ taildrop->units = rsp_params->units;
+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+
+ return 0;
+}
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct dpni_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx flow steering distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ *
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_fs_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx hash distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the default queue
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_hash_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the FS table where to insert the entry.
+ * Only relevant if MASKING is enabled for FS
+ * classification on this DPNI, it is ignored for exact match.
+ * @cfg: Flow steering rule to add
+ * @action: Action to be taken as result of a classification hit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action)
+{
+ struct dpni_cmd_add_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+ cmd_params->options = cpu_to_le16(action->options);
+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
+ cmd_params->flc = cpu_to_le64(action->flc);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Flow steering rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ * prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+ cmd_params->default_tc = cfg->default_tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->discard_on_miss, DISCARD_ON_MISS,
+ cfg->discard_on_miss);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to add
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification on
+ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_qos_table() - Clear all QoS mapping entries
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Following this function call, all frames are directed to
+ * the default traffic class (0)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_tx_shaping() - Set the transmit shaping
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tx_cr_shaper: TX committed rate shaping configuration
+ * @tx_er_shaper: TX excess rate shaping configuration
+ * @coupled: Committed and excess rate shapers are coupled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
+ int coupled)
+{
+ struct dpni_cmd_set_tx_shaping *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
+ cmd_params->tx_cr_max_burst_size = cpu_to_le16(tx_cr_shaper->max_burst_size);
+ cmd_params->tx_er_max_burst_size = cpu_to_le16(tx_er_shaper->max_burst_size);
+ cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
+ cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
+ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_single_step_cfg() - return current configuration for
+ * single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ */
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_rsp_single_step_cfg *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* read command response */
+ rsp_params = (struct dpni_rsp_single_step_cfg *)cmd.params;
+ ptp_cfg->offset = le16_to_cpu(rsp_params->offset);
+ ptp_cfg->en = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_ENABLE) ? 1 : 0;
+ ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_CH_UPDATE) ? 1 : 0;
+ ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
+
+ return err;
+}
+
+/**
+ * dpni_set_single_step_cfg() - enable/disable and configure single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * The function has effect only when dpni object is connected to a dpmac
+ * object. If the dpni is not connected to a dpmac the configuration will
+ * be stored inside and applied when connection is made.
+ */
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_cmd_single_step_cfg *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ u16 flags;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ cmd_params = (struct dpni_cmd_single_step_cfg *)cmd.params;
+ cmd_params->offset = cpu_to_le16(ptp_cfg->offset);
+ cmd_params->peer_delay = cpu_to_le32(ptp_cfg->peer_delay);
+
+ flags = le16_to_cpu(cmd_params->flags);
+ dpni_set_field(flags, PTP_ENABLE, !!ptp_cfg->en);
+ dpni_set_field(flags, PTP_CH_UPDATE, !!ptp_cfg->ch_update);
+ cmd_params->flags = cpu_to_le16(flags);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
new file mode 100644
index 000000000..6fffd519a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -0,0 +1,1110 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ * Copyright 2020 NXP
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include "dpkg.h"
+
+struct fsl_mc_io;
+
+/* Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * DPNI_MAX_TC - Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC 8
+/**
+ * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP 8
+
+/**
+ * DPNI_ALL_TCS - All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS (u8)(-1)
+/**
+ * DPNI_ALL_TC_FLOWS - All flows within traffic class considered; see
+ * dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS (u16)(-1)
+/**
+ * DPNI_NEW_FLOW_ID - Generate new flow ID; see dpni_set_queue()
+ */
+#define DPNI_NEW_FLOW_ID (u16)(-1)
+
+/**
+ * DPNI_OPT_TX_FRM_RELEASE - Tx traffic is always released to a buffer pool on
+ * transmit, there are no resources allocated to have the frames confirmed back
+ * to the source after transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
+/**
+ * DPNI_OPT_NO_MAC_FILTER - Disables support for MAC address filtering for
+ * addresses other than primary MAC address. This affects both unicast and
+ * multicast. Promiscuous mode can still be enabled/disabled for both unicast
+ * and multicast. If promiscuous mode is disabled, only traffic matching the
+ * primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER 0x000002
+/**
+ * DPNI_OPT_HAS_POLICING - Allocate policers for this DPNI. They can be used to
+ * rate-limit traffic per traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING 0x000004
+/**
+ * DPNI_OPT_SHARED_CONGESTION - Congestion can be managed in several ways,
+ * allowing the buffer pool to deplete on ingress, taildrop on each queue or
+ * use congestion groups for sets of queues. If set, it configures a single
+ * congestion groups across all TCs. If reset, a congestion group is allocated
+ * for each TC. Only relevant if the DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION 0x000008
+/**
+ * DPNI_OPT_HAS_KEY_MASKING - Enables TCAM for Flow Steering and QoS look-ups.
+ * If not specified, all look-ups are exact match. Note that TCAM is not
+ * available on LS1088 and its variants. Setting this bit on these SoCs will
+ * trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
+/**
+ * DPNI_OPT_NO_FS - Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS 0x000020
+/**
+ * DPNI_OPT_SHARED_FS - Flow steering table is shared between all traffic
+ * classes
+ */
+#define DPNI_OPT_SHARED_FS 0x001000
+
+int dpni_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpni_id,
+ u16 *token);
+
+int dpni_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpni_pools_cfg {
+ u8 num_dpbp;
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPNI_MAX_DPBP];
+};
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_pools_cfg *cfg);
+
+int dpni_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPNI IRQ Index and Events */
+
+#define DPNI_IRQ_INDEX 0
+
+/* DPNI_IRQ_EVENT_LINK_CHANGED - indicates a change in link state */
+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+/* DPNI_IRQ_EVENT_ENDPOINT_CHANGED - indicates a change in endpoint */
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
+
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ * than this when adding QoS entries will result in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ * key larger than this when composing the hash + FS key will
+ * result in an error.
+ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
+ * on 6, 5, 5 bits respectively.
+ */
+struct dpni_attr {
+ u32 options;
+ u8 num_queues;
+ u8 num_tcs;
+ u8 mac_filter_entries;
+ u8 vlan_filter_entries;
+ u8 qos_entries;
+ u16 fs_entries;
+ u8 qos_key_size;
+ u8 fs_key_size;
+ u16 wriop_version;
+};
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_attr *attr);
+
+/* DPNI errors */
+
+/**
+ * DPNI_ERROR_EOFHE - Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE 0x00020000
+/**
+ * DPNI_ERROR_FLE - Frame length error
+ */
+#define DPNI_ERROR_FLE 0x00002000
+/**
+ * DPNI_ERROR_FPE - Frame physical error
+ */
+#define DPNI_ERROR_FPE 0x00001000
+/**
+ * DPNI_ERROR_PHE - Parsing header error
+ */
+#define DPNI_ERROR_PHE 0x00000020
+/**
+ * DPNI_ERROR_L3CE - Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE 0x00000004
+/**
+ * DPNI_ERROR_L4CE - Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE 0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+ DPNI_ERROR_ACTION_DISCARD = 0,
+ DPNI_ERROR_ACTION_CONTINUE = 1,
+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
+ * status (FAS); relevant only for the non-discard action
+ */
+struct dpni_error_cfg {
+ u32 errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
+};
+
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_error_cfg *cfg);
+
+/* DPNI buffer layout modification options */
+
+/**
+ * DPNI_BUF_LAYOUT_OPT_TIMESTAMP - Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/**
+ * DPNI_BUF_LAYOUT_OPT_PARSER_RESULT - Select to modify the parser-result
+ * setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/**
+ * DPNI_BUF_LAYOUT_OPT_FRAME_STATUS - Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/**
+ * DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE - Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/**
+ * DPNI_BUF_LAYOUT_OPT_DATA_ALIGN - Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/**
+ * DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM - Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/**
+ * DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM - Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ u32 options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ u16 private_data_size;
+ u16 data_align;
+ u16 data_head_room;
+ u16 data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */
+enum dpni_queue_type {
+ DPNI_QUEUE_RX,
+ DPNI_QUEUE_TX,
+ DPNI_QUEUE_TX_CONFIRM,
+ DPNI_QUEUE_RX_ERR,
+};
+
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
+
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ */
+enum dpni_offload {
+ DPNI_OFF_RX_L3_CSUM,
+ DPNI_OFF_RX_L4_CSUM,
+ DPNI_OFF_TX_L3_CSUM,
+ DPNI_OFF_TX_L4_CSUM,
+};
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 config);
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 *config);
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u16 *qdid);
+
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *data_offset);
+
+#define DPNI_STATISTICS_CNT 7
+
+/**
+ * union dpni_statistics - Union describing the DPNI statistics
+ * @page_0: Page_0 statistics structure
+ * @page_0.ingress_all_frames: Ingress frame count
+ * @page_0.ingress_all_bytes: Ingress byte count
+ * @page_0.ingress_multicast_frames: Ingress multicast frame count
+ * @page_0.ingress_multicast_bytes: Ingress multicast byte count
+ * @page_0.ingress_broadcast_frames: Ingress broadcast frame count
+ * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count
+ * @page_1: Page_1 statistics structure
+ * @page_1.egress_all_frames: Egress frame count
+ * @page_1.egress_all_bytes: Egress byte count
+ * @page_1.egress_multicast_frames: Egress multicast frame count
+ * @page_1.egress_multicast_bytes: Egress multicast byte count
+ * @page_1.egress_broadcast_frames: Egress broadcast frame count
+ * @page_1.egress_broadcast_bytes: Egress broadcast byte count
+ * @page_2: Page_2 statistics structure
+ * @page_2.ingress_filtered_frames: Ingress filtered frame count
+ * @page_2.ingress_discarded_frames: Ingress discarded frame count
+ * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @page_2.egress_discarded_frames: Egress discarded frame count
+ * @page_2.egress_confirmed_frames: Egress confirmed frame count
+ * @page_3: Page_3 statistics structure
+ * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes
+ * dequeued from egress FQs
+ * @page_3.egress_dequeue_frames: Cumulative count of the number of frames
+ * dequeued from egress FQs
+ * @page_3.egress_reject_bytes: Cumulative count of the number of bytes in
+ * egress frames whose enqueue was rejected
+ * @page_3.egress_reject_frames: Cumulative count of the number of egress
+ * frames whose enqueue was rejected
+ * @page_4: Page_4 statistics structure: congestion points
+ * @page_4.cgr_reject_frames: number of rejected frames due to congestion point
+ * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
+ * @page_5: Page_5 statistics structure: policer
+ * @page_5.policer_cnt_red: NUmber of red colored frames
+ * @page_5.policer_cnt_yellow: number of yellow colored frames
+ * @page_5.policer_cnt_green: number of green colored frames
+ * @page_5.policer_cnt_re_red: number of recolored red frames
+ * @page_5.policer_cnt_re_yellow: number of recolored yellow frames
+ * @page_6: Page_6 statistics structure
+ * @page_6.tx_pending_frames: total number of frames pending in egress FQs
+ * @raw: raw statistics structure, used to index counters
+ */
+union dpni_statistics {
+ struct {
+ u64 ingress_all_frames;
+ u64 ingress_all_bytes;
+ u64 ingress_multicast_frames;
+ u64 ingress_multicast_bytes;
+ u64 ingress_broadcast_frames;
+ u64 ingress_broadcast_bytes;
+ } page_0;
+ struct {
+ u64 egress_all_frames;
+ u64 egress_all_bytes;
+ u64 egress_multicast_frames;
+ u64 egress_multicast_bytes;
+ u64 egress_broadcast_frames;
+ u64 egress_broadcast_bytes;
+ } page_1;
+ struct {
+ u64 ingress_filtered_frames;
+ u64 ingress_discarded_frames;
+ u64 ingress_nobuffer_discards;
+ u64 egress_discarded_frames;
+ u64 egress_confirmed_frames;
+ } page_2;
+ struct {
+ u64 egress_dequeue_bytes;
+ u64 egress_dequeue_frames;
+ u64 egress_reject_bytes;
+ u64 egress_reject_frames;
+ } page_3;
+ struct {
+ u64 cgr_reject_frames;
+ u64 cgr_reject_bytes;
+ } page_4;
+ struct {
+ u64 policer_cnt_red;
+ u64 policer_cnt_yellow;
+ u64 policer_cnt_green;
+ u64 policer_cnt_re_red;
+ u64 policer_cnt_re_yellow;
+ } page_5;
+ struct {
+ u64 tx_pending_frames;
+ } page_6;
+ struct {
+ u64 counter[DPNI_STATISTICS_CNT];
+ } raw;
+};
+
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
+ union dpni_statistics *stat);
+
+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
+/**
+ * struct dpni_link_cfg - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_link_cfg *cfg);
+
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg);
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+};
+
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_state *state);
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length);
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *max_frame_length);
+
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en);
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en);
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6]);
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cm_flags,
+ u16 token,
+ u8 mac_addr[6]);
+
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int unicast,
+ int multicast);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+ DPNI_DIST_MODE_NONE = 0,
+ DPNI_DIST_MODE_HASH = 1,
+ DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+ DPNI_FS_MISS_DROP = 0,
+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+ DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+ enum dpni_fs_miss_action miss_action;
+ u16 default_flow_id;
+};
+
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ u8 *key_cfg_buf);
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ * 112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg() relevant only when
+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ * 'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+ u16 dist_size;
+ enum dpni_dist_mode dist_mode;
+ u64 key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
+/**
+ * DPNI_FS_MISS_DROP - When used for fs_miss_flow_id in function
+ * dpni_set_rx_dist, will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - Rx distribution configuration
+ * @dist_size: distribution size
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ u16 dist_size;
+ u64 key_cfg_iova;
+ u8 enable;
+ u8 tc;
+ u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * key extractions to be used as the QoS criteria by calling
+ * dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ * '0' to use the 'default_tc' in such cases
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+ u64 key_cfg_iova;
+ int discard_on_miss;
+ u8 default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpni_queue - Queue structure
+ * @destination: - Destination structure
+ * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @destination.type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @destination.hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ * @flc: FD FLow Context structure
+ * @flc.value: Default FLC value for traffic dequeued from
+ * this queue. Please check description of FD
+ * structure for more information.
+ * Note that FLC values set using dpni_add_fs_entry,
+ * if any, take precedence over values per queue.
+ * @flc.stash_control: Boolean, indicates whether the 6 lowest
+ * - significant bits are used for stash control.
+ * significant bits are used for stash control. If set, the 6
+ * least significant bits in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context
+ * that are stashed. FLC value is interpreted as a memory address
+ * in this case, excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame
+ * annotation to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame
+ * data to be stashed. Frame data is placed at FD[ADDR] +
+ * FD[OFFSET].
+ * For more details check the Frame Descriptor section in the
+ * hardware documentation.
+ */
+struct dpni_queue {
+ struct {
+ u16 id;
+ enum dpni_dest type;
+ char hold_active;
+ u8 priority;
+ } destination;
+ u64 user_context;
+ struct {
+ u64 value;
+ char stash_control;
+ } flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ * or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
+ * for Tx queues.
+ */
+struct dpni_queue_id {
+ u32 fqid;
+ u16 qdbin;
+};
+
+/* Set User Context */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+#define DPNI_QUEUE_OPT_FLC 0x00000004
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ u8 options,
+ const struct dpni_queue *queue);
+
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid);
+
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+ DPNI_CONGESTION_UNIT_BYTES = 0,
+ DPNI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ * QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
+ * define the DPNI this can be either per TC (default) or per
+ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ * QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+ DPNI_CP_QUEUE,
+ DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * DPNI_CONG_OPT_FLOW_CONTROL - This congestion will trigger flow control or
+ * priority flow control. This will have effect only if flow control is
+ * enabled with dpni_set_link_cfg().
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: Units type
+ * @threshold_entry: Above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: Below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
+ * is contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable: Indicates whether the taildrop is active or not.
+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
+ * byte units, this field is ignored and assumed = 0 if
+ * CONGESTION_POINT is 0.
+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
+ * cannot be used as a valid taildrop threshold, THRESHOLD must
+ * be > 0 if the taildrop is enabled.
+ */
+struct dpni_taildrop {
+ char enable;
+ enum dpni_congestion_unit units;
+ u32 threshold;
+};
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ u8 tc,
+ u8 q_index,
+ struct dpni_taildrop *taildrop);
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ u8 tc,
+ u8 q_index,
+ struct dpni_taildrop *taildrop);
+
+/**
+ * struct dpni_rule_cfg - Rule configuration for table lookup
+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
+ * @key_size: key and mask size (in bytes)
+ */
+struct dpni_rule_cfg {
+ u64 key_iova;
+ u64 mask_iova;
+ u8 key_size;
+};
+
+/**
+ * DPNI_FS_OPT_DISCARD - Discard matching traffic. If set, this takes
+ * precedence over any other configuration and matching traffic is always
+ * discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD 0x1
+
+/**
+ * DPNI_FS_OPT_SET_FLC - Set FLC value. If set, flc member of struct
+ * dpni_fs_action_cfg is used to override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC 0x2
+
+/**
+ * DPNI_FS_OPT_SET_STASH_CONTROL - Indicates whether the 6 lowest significant
+ * bits of FLC are used for stash control. If set, the 6 least significant bits
+ * in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context that are
+ * stashed. FLC value is interpreted as a memory address in this case,
+ * excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
+ * to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc: FLC value for traffic matching this rule. Please check the
+ * Frame Descriptor section in the hardware documentation for
+ * more information.
+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
+ * values are in range 0 to num_queue-1.
+ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+ u64 flc;
+ u16 flow_id;
+ u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+/**
+ * struct dpni_tx_shaping_cfg - Structure representing DPNI tx shaping configuration
+ * @rate_limit: Rate in Mbps
+ * @max_burst_size: Burst size in bytes (up to 64KB)
+ */
+struct dpni_tx_shaping_cfg {
+ u32 rate_limit;
+ u16 max_burst_size;
+};
+
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
+ int coupled);
+
+/**
+ * struct dpni_single_step_cfg - configure single step PTP (IEEE 1588)
+ * @en: enable single step PTP. When enabled the PTPv1 functionality
+ * will not work. If the field is zero, offset and ch_update
+ * parameters will be ignored
+ * @offset: start offset from the beginning of the frame where
+ * timestamp field is found. The offset must respect all MAC
+ * headers, VLAN tags and other protocol headers
+ * @ch_update: when set UDP checksum will be updated inside packet
+ * @peer_delay: For peer-to-peer transparent clocks add this value to the
+ * correction field in addition to the transient time update.
+ * The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
+ */
+struct dpni_single_step_cfg {
+ u8 en;
+ u8 ch_update;
+ u16 offset;
+ u32 peer_delay;
+ u32 ptp_onestep_reg_base;
+};
+
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
+#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
new file mode 100644
index 000000000..96ffeb948
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_VERSION_2 2
+#define DPRTC_CMD_ID_OFFSET 4
+
+#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+#define DPRTC_CMD_V2(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_VERSION_2)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
+
+#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
+#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD_V2(0x014)
+#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
+#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
+#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+ __le32 dprtc_id;
+};
+
+struct dprtc_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprtc_cmd_set_irq_enable {
+ u8 en;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_enable {
+ u8 en;
+};
+
+struct dprtc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dprtc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprtc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
new file mode 100644
index 000000000..ed52a34fa
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id: DPRTC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token)
+{
+ struct dprtc_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dprtc_cmd_open *)cmd.params;
+ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct dprtc_cmd_set_irq_enable *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->en = en;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->en;
+
+ return 0;
+}
+
+/**
+ * dprtc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @status: Bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct dprtc_cmd_clear_irq_status *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
new file mode 100644
index 000000000..01d77c685
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+#define DPRTC_MAX_IRQ_NUM 1
+#define DPRTC_IRQ_INDEX 0
+
+#define DPRTC_EVENT_PPS 0x08000000
+#define DPRTC_EVENT_ETS1 0x00800000
+#define DPRTC_EVENT_ETS2 0x00400000
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+#endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
new file mode 100644
index 000000000..397d55f2b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_CMD_H
+#define __FSL_DPSW_CMD_H
+
+#include "dpsw.h"
+
+/* DPSW Version */
+#define DPSW_VER_MAJOR 8
+#define DPSW_VER_MINOR 9
+
+#define DPSW_CMD_BASE_VERSION 1
+#define DPSW_CMD_VERSION_2 2
+#define DPSW_CMD_ID_OFFSET 4
+
+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
+#define DPSW_CMD_V2(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_VERSION_2)
+
+/* Command IDs */
+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
+
+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
+
+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
+#define DPSW_CMDID_GET_ATTR DPSW_CMD_V2(0x004)
+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
+
+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
+
+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
+
+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
+
+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
+
+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034)
+
+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
+
+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
+
+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
+
+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
+
+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
+
+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_V2(0x061)
+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
+
+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+
+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
+#define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A)
+
+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
+
+#define DPSW_CMDID_IF_GET_PORT_MAC_ADDR DPSW_CMD_ID(0x0A7)
+
+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
+#define DPSW_CMDID_CTRL_IF_SET_QUEUE DPSW_CMD_ID(0x0A6)
+
+#define DPSW_CMDID_SET_EGRESS_FLOOD DPSW_CMD_ID(0x0AC)
+#define DPSW_CMDID_IF_SET_LEARNING_MODE DPSW_CMD_ID(0x0AD)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSW_MASK(field) \
+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
+ DPSW_##field##_SHIFT)
+#define dpsw_set_field(var, field, val) \
+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
+#define dpsw_get_field(var, field) \
+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
+#define dpsw_get_bit(var, bit) \
+ (((var) >> (bit)) & GENMASK(0, 0))
+
+#pragma pack(push, 1)
+struct dpsw_cmd_open {
+ __le32 dpsw_id;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+struct dpsw_cmd_create {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 max_meters_per_if;
+ /* from LSB: only the first 4 bits */
+ u8 component_type;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le16 max_vlans;
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le16 max_fdb_mc_groups;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_destroy {
+ __le32 dpsw_id;
+};
+
+#define DPSW_ENABLE_SHIFT 0
+#define DPSW_ENABLE_SIZE 1
+
+struct dpsw_rsp_is_enabled {
+ /* from LSB: enable:1 */
+ u8 enabled;
+};
+
+struct dpsw_cmd_set_irq_enable {
+ u8 enable_state;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_enable {
+ u8 enable_state;
+};
+
+struct dpsw_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpsw_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpsw_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+#define DPSW_FLOODING_CFG_SHIFT 0
+#define DPSW_FLOODING_CFG_SIZE 4
+
+#define DPSW_BROADCAST_CFG_SHIFT 4
+#define DPSW_BROADCAST_CFG_SIZE 4
+
+struct dpsw_rsp_get_attr {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 num_fdbs;
+ __le16 max_vlans;
+ __le16 num_vlans;
+ /* cmd word 1 */
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le32 dpsw_id;
+ /* cmd word 2 */
+ __le16 mem_size;
+ __le16 max_fdb_mc_groups;
+ u8 max_meters_per_if;
+ /* from LSB only the first 4 bits */
+ u8 component_type;
+ /* [0:3] - flooding configuration
+ * [4:7] - broadcast configuration
+ */
+ u8 repl_cfg;
+ u8 pad;
+ /* cmd word 3 */
+ __le64 options;
+};
+
+#define DPSW_VLAN_ID_SHIFT 0
+#define DPSW_VLAN_ID_SIZE 12
+#define DPSW_DEI_SHIFT 12
+#define DPSW_DEI_SIZE 1
+#define DPSW_PCP_SHIFT 13
+#define DPSW_PCP_SIZE 3
+
+struct dpsw_cmd_if_set_tci {
+ __le16 if_id;
+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
+ __le16 conf;
+};
+
+struct dpsw_cmd_if_get_tci {
+ __le16 if_id;
+};
+
+struct dpsw_rsp_if_get_tci {
+ __le16 pad;
+ __le16 vlan_id;
+ u8 dei;
+ u8 pcp;
+};
+
+#define DPSW_STATE_SHIFT 0
+#define DPSW_STATE_SIZE 4
+
+struct dpsw_cmd_if_set_stp {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only the first LSB 4 bits */
+ u8 state;
+};
+
+#define DPSW_COUNTER_TYPE_SHIFT 0
+#define DPSW_COUNTER_TYPE_SIZE 5
+
+struct dpsw_cmd_if_get_counter {
+ __le16 if_id;
+ /* from LSB: type:5 */
+ u8 type;
+};
+
+struct dpsw_rsp_if_get_counter {
+ __le64 pad;
+ __le64 counter;
+};
+
+struct dpsw_cmd_if {
+ __le16 if_id;
+};
+
+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
+#define DPSW_ADMIT_UNTAGGED_SIZE 4
+#define DPSW_ENABLED_SHIFT 5
+#define DPSW_ENABLED_SIZE 1
+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
+
+struct dpsw_rsp_if_get_attr {
+ /* cmd word 0 */
+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
+ u8 conf;
+ u8 pad1;
+ u8 num_tcs;
+ u8 pad2;
+ __le16 qdid;
+ /* cmd word 1 */
+ __le32 options;
+ __le32 pad3;
+ /* cmd word 2 */
+ __le32 rate;
+};
+
+struct dpsw_cmd_if_set_max_frame_length {
+ __le16 if_id;
+ __le16 frame_length;
+};
+
+struct dpsw_cmd_if_set_link_cfg {
+ /* cmd word 0 */
+ __le16 if_id;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_if_get_link_state {
+ __le16 if_id;
+};
+
+#define DPSW_UP_SHIFT 0
+#define DPSW_UP_SIZE 1
+
+struct dpsw_rsp_if_get_link_state {
+ /* cmd word 0 */
+ __le32 pad0;
+ u8 up;
+ u8 pad1[3];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_vlan_add {
+ __le16 fdb_id;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_vlan_add_if {
+ /* cmd word 0 */
+ __le16 options;
+ __le16 vlan_id;
+ __le16 fdb_id;
+ __le16 pad0;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_vlan_manage_if {
+ /* cmd word 0 */
+ __le16 pad0;
+ __le16 vlan_id;
+ __le32 pad1;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_vlan_remove {
+ __le16 pad;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_fdb_add {
+ __le32 pad;
+ __le16 fdb_ageing_time;
+ __le16 num_fdb_entries;
+};
+
+struct dpsw_rsp_fdb_add {
+ __le16 fdb_id;
+};
+
+struct dpsw_cmd_fdb_remove {
+ __le16 fdb_id;
+};
+
+#define DPSW_ENTRY_TYPE_SHIFT 0
+#define DPSW_ENTRY_TYPE_SIZE 4
+
+struct dpsw_cmd_fdb_unicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ u8 mac_addr[6];
+ /* cmd word 1 */
+ __le16 if_egress;
+ /* only the first 4 bits from LSB */
+ u8 type;
+};
+
+struct dpsw_cmd_fdb_multicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ __le16 num_ifs;
+ /* only the first 4 bits from LSB */
+ u8 type;
+ u8 pad[3];
+ /* cmd word 1 */
+ u8 mac_addr[6];
+ __le16 pad2;
+ /* cmd word 2-5 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_fdb_dump {
+ __le16 fdb_id;
+ __le16 pad0;
+ __le32 pad1;
+ __le64 iova_addr;
+ __le32 iova_size;
+};
+
+struct dpsw_rsp_fdb_dump {
+ __le16 num_entries;
+};
+
+struct dpsw_rsp_ctrl_if_get_attr {
+ __le64 pad;
+ __le32 rx_fqid;
+ __le32 rx_err_fqid;
+ __le32 tx_err_conf_fqid;
+};
+
+#define DPSW_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpsw_cmd_ctrl_if_set_pools {
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ __le32 dpbp_id[DPSW_MAX_DPBP];
+ __le16 buffer_size[DPSW_MAX_DPBP];
+};
+
+#define DPSW_DEST_TYPE_SHIFT 0
+#define DPSW_DEST_TYPE_SIZE 4
+
+struct dpsw_cmd_ctrl_if_set_queue {
+ __le32 dest_id;
+ u8 dest_priority;
+ u8 pad;
+ /* from LSB: dest_type:4 */
+ u8 dest_type;
+ u8 qtype;
+ __le64 user_ctx;
+ __le32 options;
+};
+
+struct dpsw_rsp_get_api_version {
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dpsw_rsp_if_get_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpsw_cmd_set_egress_flood {
+ __le16 fdb_id;
+ u8 flood_type;
+ u8 pad[5];
+ __le64 if_id;
+};
+
+#define DPSW_LEARNING_MODE_SHIFT 0
+#define DPSW_LEARNING_MODE_SIZE 4
+
+struct dpsw_cmd_if_set_learning_mode {
+ __le16 if_id;
+ /* only the first 4 bits from LSB */
+ u8 mode;
+};
+
+struct dpsw_cmd_acl_add {
+ __le16 pad;
+ __le16 max_entries;
+};
+
+struct dpsw_rsp_acl_add {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_remove {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_if {
+ __le16 acl_id;
+ __le16 num_ifs;
+ __le32 pad;
+ __le64 if_id;
+};
+
+struct dpsw_prep_acl_entry {
+ u8 match_l2_dest_mac[6];
+ __le16 match_l2_tpid;
+
+ u8 match_l2_source_mac[6];
+ __le16 match_l2_vlan_id;
+
+ __le32 match_l3_dest_ip;
+ __le32 match_l3_source_ip;
+
+ __le16 match_l4_dest_port;
+ __le16 match_l4_source_port;
+ __le16 match_l2_ether_type;
+ u8 match_l2_pcp_dei;
+ u8 match_l3_dscp;
+
+ u8 mask_l2_dest_mac[6];
+ __le16 mask_l2_tpid;
+
+ u8 mask_l2_source_mac[6];
+ __le16 mask_l2_vlan_id;
+
+ __le32 mask_l3_dest_ip;
+ __le32 mask_l3_source_ip;
+
+ __le16 mask_l4_dest_port;
+ __le16 mask_l4_source_port;
+ __le16 mask_l2_ether_type;
+ u8 mask_l2_pcp_dei;
+ u8 mask_l3_dscp;
+
+ u8 match_l3_protocol;
+ u8 mask_l3_protocol;
+};
+
+#define DPSW_RESULT_ACTION_SHIFT 0
+#define DPSW_RESULT_ACTION_SIZE 4
+
+struct dpsw_cmd_acl_entry {
+ __le16 acl_id;
+ __le16 result_if_id;
+ __le32 precedence;
+ /* from LSB only the first 4 bits */
+ u8 result_action;
+ u8 pad[7];
+ __le64 pad2[4];
+ __le64 key_iova;
+};
+
+struct dpsw_cmd_set_reflection_if {
+ __le16 if_id;
+};
+
+#define DPSW_FILTER_SHIFT 0
+#define DPSW_FILTER_SIZE 2
+
+struct dpsw_cmd_if_reflection {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only 2 bits from the LSB */
+ u8 filter;
+};
+#pragma pack(pop)
+#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
new file mode 100644
index 000000000..ab921d75d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
@@ -0,0 +1,1661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpsw.h"
+#include "dpsw-cmd.h"
+
+static void build_if_id_bitmap(__le64 *bmap, const u16 *id, const u16 num_ifs)
+{
+ int i;
+
+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
+ if (id[i] < DPSW_MAX_IF)
+ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
+ }
+}
+
+/**
+ * dpsw_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpsw_id: DPSW unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpsw_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpsw_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_enable() - Enable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_disable() - Disable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_get_irq_status *cmd_params;
+ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_attributes() - Retrieve DPSW attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned DPSW attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
+ attr->max_fdbs = rsp_params->max_fdbs;
+ attr->num_fdbs = rsp_params->num_fdbs;
+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
+ attr->options = le64_to_cpu(rsp_params->options);
+ attr->component_type = dpsw_get_field(rsp_params->component_type, COMPONENT_TYPE);
+ attr->flooding_cfg = dpsw_get_field(rsp_params->repl_cfg, FLOODING_CFG);
+ attr->broadcast_cfg = dpsw_get_field(rsp_params->repl_cfg, BROADCAST_CFG);
+ return 0;
+}
+
+/**
+ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @state: Link state 1 - linkup, 0 - link down or disconnected
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_link_state *cmd_params;
+ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+ state->up = dpsw_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_tci *cmd_params;
+ u16 tmp_conf = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
+ dpsw_set_field(tmp_conf, DEI, cfg->dei);
+ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
+ cmd_params->conf = cpu_to_le16(tmp_conf);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_tci *cmd_params;
+ struct dpsw_rsp_if_get_tci *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
+ cfg->pcp = rsp_params->pcp;
+ cfg->dei = rsp_params->dei;
+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: STP State configuration parameters
+ *
+ * The following STP states are supported -
+ * blocking, listening, learning, forwarding and disabled.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @type: Counter type
+ * @counter: return value
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_counter *cmd_params;
+ struct dpsw_rsp_if_get_counter *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
+ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_attributes() - Function obtains attributes of interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @attr: Returned interface attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr)
+{
+ struct dpsw_rsp_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->rate = le32_to_cpu(rsp_params->rate);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->qdid = le16_to_cpu(rsp_params->qdid);
+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
+ ACCEPT_ALL_VLAN);
+ attr->admit_untagged = dpsw_get_field(rsp_params->conf,
+ ADMIT_UNTAGGED);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @frame_length: Maximum Frame Length
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->frame_length = cpu_to_le16(frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: VLAN configuration
+ *
+ * Only VLAN ID and FDB ID are required parameters here.
+ * 12 bit VLAN ID is defined in IEEE802.1Q.
+ * Adding a duplicate VLAN ID is not allowed.
+ * FDB ID can be shared across multiple VLANs. Shared learning
+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
+ * with same fdb_id
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces to add
+ *
+ * It adds only interfaces not belonging to this VLAN yet,
+ * otherwise an error is generated and an entire command is
+ * ignored. This function can be called numerous times always
+ * providing required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct dpsw_cmd_vlan_add_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_add_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ cmd_params->options = cpu_to_le16(cfg->options);
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
+ * transmitted as untagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
+ * These interfaces should already belong to this VLAN.
+ * By default all interfaces are transmitted as tagged.
+ * Providing un-existing interface or untagged interface that is
+ * configured untagged already generates an error and the entire
+ * command is ignored.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces must belong to this VLAN, otherwise an error
+ * is returned and an the command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
+ * converted from transmitted as untagged to transmit as tagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces provided by API have to belong to this VLAN and
+ * configured untagged, otherwise an error is returned and the
+ * command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
+ * the reference
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Returned Forwarding Database Identifier
+ * @cfg: FDB Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg)
+{
+ struct dpsw_cmd_fdb_add *cmd_params;
+ struct dpsw_rsp_fdb_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
+ cmd_params->fdb_ageing_time = cpu_to_le16(cfg->fdb_ageing_time);
+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove() - Remove FDB from switch
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id)
+{
+ struct dpsw_cmd_fdb_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_dump() - Dump the content of FDB table into memory.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @iova_addr: Data will be stored here as an array of struct fdb_dump_entry
+ * @iova_size: Memory size allocated at iova_addr
+ * @num_entries:Number of entries written at iova_addr
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ *
+ * The memory allocated at iova_addr must be initialized with zero before
+ * command execution. If the FDB table does not fit into memory MC will stop
+ * after the memory is filled up.
+ * The struct fdb_dump_entry array must be parsed until the end of memory
+ * area or until an entry with mac_addr set to zero is found.
+ */
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries)
+{
+ struct dpsw_cmd_fdb_dump *cmd_params;
+ struct dpsw_rsp_fdb_dump *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_DUMP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_dump *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->iova_addr = cpu_to_le64(iova_addr);
+ cmd_params->iova_size = cpu_to_le32(iova_size);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_dump *)cmd.params;
+ *num_entries = le16_to_cpu(rsp_params->num_entries);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * If group doesn't exist, it will be created.
+ * It adds only interfaces not belonging to this multicast group
+ * yet, otherwise error will be generated and the command is
+ * ignored.
+ * This function may be called numerous times always providing
+ * required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
+ * group.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * Interfaces provided by this API have to exist in the group,
+ * otherwise an error will be returned and an entire command
+ * ignored. If there is no interface left in the group,
+ * an entire group is deleted
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned control interface attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr)
+{
+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
+ cmd_flags, token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Buffer pools configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int i;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
+ cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPSW_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_set_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of dpsw object
+ * @qtype: dpsw_queue_type of the targeted queue
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->qtype = qtype;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpsw_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_api_version() - Get Data Path Switch API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path switch API
+ * @minor_ver: Minor version of data path switch API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->version_major);
+ *minor_ver = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_get_port_mac_addr() - Retrieve MAC address associated to the physical port
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6])
+{
+ struct dpsw_rsp_if_get_mac_addr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_enable() - Enable control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_disable() - Function disables control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_egress_flood() - Set egress parameters associated with an FDB ID
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Egress flooding configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg)
+{
+ struct dpsw_cmd_set_egress_flood *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_EGRESS_FLOOD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_set_egress_flood *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->flood_type = cfg->flood_type;
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_learning_mode() - Configure the learning mode on an interface.
+ * If this API is used, it will take precedence over the FDB configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: InterfaceID
+ * @mode: Learning mode
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode)
+{
+ struct dpsw_cmd_if_set_learning_mode *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LEARNING_MODE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add() - Create an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: Returned ACL ID, for future references
+ * @cfg: ACL configuration
+ *
+ * Create Access Control List table. Multiple ACLs can be created and
+ * co-exist in L2 switch
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg)
+{
+ struct dpsw_cmd_acl_add *cmd_params;
+ struct dpsw_rsp_acl_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
+ *acl_id = le16_to_cpu(rsp_params->acl_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_acl_remove() - Remove an ACL table from L2 switch.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id)
+{
+ struct dpsw_cmd_acl_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add_if() - Associate interface/interfaces with an ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_if() - De-associate interface/interfaces from an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_prepare_entry_cfg() - Setup an ACL entry
+ * @key: Key
+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before adding or removing acl_entry
+ *
+ */
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf)
+{
+ struct dpsw_prep_acl_entry *ext_params;
+ int i;
+
+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
+
+ for (i = 0; i < 6; i++) {
+ ext_params->match_l2_dest_mac[i] = key->match.l2_dest_mac[5 - i];
+ ext_params->match_l2_source_mac[i] = key->match.l2_source_mac[5 - i];
+ ext_params->mask_l2_dest_mac[i] = key->mask.l2_dest_mac[5 - i];
+ ext_params->mask_l2_source_mac[i] = key->mask.l2_source_mac[5 - i];
+ }
+
+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
+ ext_params->match_l4_source_port = cpu_to_le16(key->match.l4_source_port);
+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
+ ext_params->match_l3_dscp = key->match.l3_dscp;
+
+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
+ ext_params->match_l3_protocol = key->match.l3_protocol;
+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
+}
+
+/**
+ * dpsw_acl_add_entry() - Add a rule to the ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_prepare_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_reflection_if() - Set target interface for traffic mirrored
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Id
+ *
+ * Only one mirroring destination is allowed per switch
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id)
+{
+ struct dpsw_cmd_set_reflection_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_add_reflection() - Setup mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_remove_reflection() - Remove mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
new file mode 100644
index 000000000..b90bd363f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+/* Data Path L2-Switch API
+ * Contains API for handling DPSW topology and functionality
+ */
+
+struct fsl_mc_io;
+
+/* DPSW general definitions */
+
+#define DPSW_MAX_PRIORITIES 8
+
+#define DPSW_MAX_IF 64
+
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token);
+
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW options */
+
+/**
+ * DPSW_OPT_FLOODING_DIS - Flooding was disabled at device create
+ */
+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
+/**
+ * DPSW_OPT_MULTICAST_DIS - Multicast was disabled at device create
+ */
+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
+/**
+ * DPSW_OPT_CTRL_IF_DIS - Control interface support is disabled
+ */
+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
+
+/**
+ * enum dpsw_component_type - component type of a bridge
+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
+ * enterprise VLAN bridge or of a Provider Bridge used
+ * to process C-tagged frames
+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
+ * Provider Bridge
+ *
+ */
+enum dpsw_component_type {
+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
+ DPSW_COMPONENT_TYPE_S_VLAN
+};
+
+/**
+ * enum dpsw_flooding_cfg - flooding configuration requested
+ * @DPSW_FLOODING_PER_VLAN: Flooding replicators are allocated per VLAN and
+ * interfaces present in each of them can be configured using
+ * dpsw_vlan_add_if_flooding()/dpsw_vlan_remove_if_flooding().
+ * This is the default configuration.
+ *
+ * @DPSW_FLOODING_PER_FDB: Flooding replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_flooding_cfg {
+ DPSW_FLOODING_PER_VLAN = 0,
+ DPSW_FLOODING_PER_FDB,
+};
+
+/**
+ * enum dpsw_broadcast_cfg - broadcast configuration requested
+ * @DPSW_BROADCAST_PER_OBJECT: There is only one broadcast replicator per DPSW
+ * object. This is the default configuration.
+ * @DPSW_BROADCAST_PER_FDB: Broadcast replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_broadcast_cfg {
+ DPSW_BROADCAST_PER_OBJECT = 0,
+ DPSW_BROADCAST_PER_FDB,
+};
+
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW IRQ Index and Events */
+
+#define DPSW_IRQ_INDEX_IF 0x0000
+#define DPSW_IRQ_INDEX_L2SW 0x0001
+
+/**
+ * DPSW_IRQ_EVENT_LINK_CHANGED - Indicates that the link state changed
+ */
+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
+
+/**
+ * DPSW_IRQ_EVENT_ENDPOINT_CHANGED - Indicates a change in endpoint
+ */
+#define DPSW_IRQ_EVENT_ENDPOINT_CHANGED 0x0002
+
+/**
+ * struct dpsw_irq_cfg - IRQ configuration
+ * @addr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dpsw_irq_cfg {
+ u64 addr;
+ u32 val;
+ int irq_num;
+};
+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en);
+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask);
+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status);
+
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status);
+
+/**
+ * struct dpsw_attr - Structure representing DPSW attributes
+ * @id: DPSW object ID
+ * @options: Enable/Disable DPSW features
+ * @max_vlans: Maximum Number of VLANs
+ * @max_meters_per_if: Number of meters per interface
+ * @max_fdbs: Maximum Number of FDBs
+ * @max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @mem_size: DPSW frame storage memory size
+ * @num_ifs: Number of interfaces
+ * @num_vlans: Current number of VLANs
+ * @num_fdbs: Current number of FDBs
+ * @component_type: Component type of this bridge
+ * @flooding_cfg: Flooding configuration (PER_VLAN - default, PER_FDB)
+ * @broadcast_cfg: Broadcast configuration (PER_OBJECT - default, PER_FDB)
+ */
+struct dpsw_attr {
+ int id;
+ u64 options;
+ u16 max_vlans;
+ u8 max_meters_per_if;
+ u8 max_fdbs;
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ u16 max_fdb_mc_groups;
+ u16 num_ifs;
+ u16 mem_size;
+ u16 num_vlans;
+ u8 num_fdbs;
+ enum dpsw_component_type component_type;
+ enum dpsw_flooding_cfg flooding_cfg;
+ enum dpsw_broadcast_cfg broadcast_cfg;
+};
+
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr);
+
+/**
+ * struct dpsw_ctrl_if_attr - Control interface attributes
+ * @rx_fqid: Receive FQID
+ * @rx_err_fqid: Receive error FQID
+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
+ */
+struct dpsw_ctrl_if_attr {
+ u32 rx_fqid;
+ u32 rx_err_fqid;
+ u32 tx_err_conf_fqid;
+};
+
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr);
+
+enum dpsw_queue_type {
+ DPSW_QUEUE_RX,
+ DPSW_QUEUE_TX_ERR_CONF,
+ DPSW_QUEUE_RX_ERR,
+};
+
+#define DPSW_MAX_DPBP 8
+
+/**
+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpsw_ctrl_if_pools_cfg {
+ u8 num_dpbp;
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPSW_MAX_DPBP];
+};
+
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg);
+
+#define DPSW_CTRL_IF_QUEUE_OPT_USER_CTX 0x00000001
+#define DPSW_CTRL_IF_QUEUE_OPT_DEST 0x00000002
+
+enum dpsw_ctrl_if_dest {
+ DPSW_CTRL_IF_DEST_NONE = 0,
+ DPSW_CTRL_IF_DEST_DPIO = 1,
+};
+
+struct dpsw_ctrl_if_dest_cfg {
+ enum dpsw_ctrl_if_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+struct dpsw_ctrl_if_queue_cfg {
+ u32 options;
+ u64 user_ctx;
+ struct dpsw_ctrl_if_dest_cfg dest_cfg;
+};
+
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg);
+
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
+ */
+enum dpsw_action {
+ DPSW_ACTION_DROP = 0,
+ DPSW_ACTION_REDIRECT = 1
+};
+
+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ */
+struct dpsw_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg);
+
+/**
+ * struct dpsw_link_state - Structure representing DPSW link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ * @up: 0 - covers two cases: down and disconnected, 1 - up
+ */
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
+ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state);
+
+/**
+ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+ * separately or in conjunction with PCP to indicate frames
+ * eligible to be dropped in the presence of congestion
+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
+ * to which the frame belongs. The hexadecimal values
+ * of 0x000 and 0xFFF are reserved;
+ * all other values may be used as VLAN identifiers,
+ * allowing up to 4,094 VLANs
+ */
+struct dpsw_tci_cfg {
+ u8 pcp;
+ u8 dei;
+ u16 vlan_id;
+};
+
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg);
+
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg);
+
+/**
+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
+ * @DPSW_STP_STATE_DISABLED: Disabled state
+ * @DPSW_STP_STATE_LISTENING: Listening state
+ * @DPSW_STP_STATE_LEARNING: Learning state
+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
+ * @DPSW_STP_STATE_BLOCKING: Blocking state
+ *
+ */
+enum dpsw_stp_state {
+ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
+ DPSW_STP_STATE_FORWARDING = 3,
+ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
+ * @vlan_id: VLAN ID STP state
+ * @state: STP state
+ */
+struct dpsw_stp_cfg {
+ u16 vlan_id;
+ enum dpsw_stp_state state;
+};
+
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg);
+
+/**
+ * enum dpsw_accepted_frames - Types of frames to accept
+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
+ * priority tagged frames
+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
+ * Priority-Tagged frames received on this interface.
+ *
+ */
+enum dpsw_accepted_frames {
+ DPSW_ADMIT_ALL = 1,
+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
+/**
+ * enum dpsw_counter - Counters types
+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
+ * @DPSW_CNT_EGR_BYTE: Counts egress bytes
+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames
+ */
+enum dpsw_counter {
+ DPSW_CNT_ING_FRAME = 0x0,
+ DPSW_CNT_ING_BYTE = 0x1,
+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
+ DPSW_CNT_EGR_FRAME = 0x8,
+ DPSW_CNT_EGR_BYTE = 0x9,
+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb,
+ DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc,
+};
+
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+/**
+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
+ * @num_tcs: Number of traffic classes
+ * @rate: Transmit rate in bits per second
+ * @options: Interface configuration options (bitmap)
+ * @enabled: Indicates if interface is enabled
+ * @accept_all_vlan: The device discards/accepts incoming frames
+ * for VLANs that do not include this interface
+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
+ * discards untagged frames or priority-tagged frames received on
+ * this interface;
+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
+ * tagged frames received on this interface are accepted
+ * @qdid: control frames transmit qdid
+ */
+struct dpsw_if_attr {
+ u8 num_tcs;
+ u32 rate;
+ u32 options;
+ int enabled;
+ int accept_all_vlan;
+ enum dpsw_accepted_frames admit_untagged;
+ u16 qdid;
+};
+
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr);
+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length);
+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ */
+struct dpsw_vlan_cfg {
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg);
+
+#define DPSW_VLAN_ADD_IF_OPT_FDB_ID 0x0001
+
+/**
+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
+ * @num_ifs: The number of interfaces that are assigned to the egress
+ * list for this VLAN
+ * @if_id: The set of interfaces that are
+ * assigned to the egress list for this VLAN
+ * @options: Options map for this command (DPSW_VLAN_ADD_IF_OPT_FDB_ID)
+ * @fdb_id: FDB id to be used by this VLAN on these specific interfaces
+ * (taken into account only if the DPSW_VLAN_ADD_IF_OPT_FDB_ID is
+ * specified in the options field)
+ */
+struct dpsw_vlan_if_cfg {
+ u16 num_ifs;
+ u16 options;
+ u16 if_id[DPSW_MAX_IF];
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ */
+enum dpsw_fdb_entry_type {
+ DPSW_FDB_ENTRY_STATIC = 0,
+ DPSW_FDB_ENTRY_DINAMIC = 1
+};
+
+/**
+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @if_egress: Egress interface ID
+ */
+struct dpsw_fdb_unicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 if_egress;
+};
+
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0)
+#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1)
+
+/**
+ * struct fdb_dump_entry - fdb snapshot entry
+ * @mac_addr: MAC address
+ * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0)
+ * @if_info: unicast - egress interface, multicast - number of egress interfaces
+ * @if_mask: multicast - egress interface mask
+ */
+struct fdb_dump_entry {
+ u8 mac_addr[6];
+ u8 type;
+ u8 if_info;
+ u8 if_mask[8];
+};
+
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries);
+
+/**
+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @num_ifs: Number of external and internal interfaces
+ * @if_id: Egress interface IDs
+ */
+struct dpsw_fdb_multicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+/**
+ * enum dpsw_learning_mode - Auto-learning modes
+ * @DPSW_LEARNING_MODE_DIS: Disable Auto-learning
+ * @DPSW_LEARNING_MODE_HW: Enable HW auto-Learning
+ * @DPSW_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
+ * @DPSW_LEARNING_MODE_SECURE: Enable secure learning by CPU
+ *
+ * NONE - SECURE LEARNING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. DMAC destination
+ * 2. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Flooding list of interfaces
+ * 2. Control interface
+ * SECURE LEARING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Control interface
+ */
+enum dpsw_learning_mode {
+ DPSW_LEARNING_MODE_DIS = 0,
+ DPSW_LEARNING_MODE_HW = 1,
+ DPSW_LEARNING_MODE_NON_SECURE = 2,
+ DPSW_LEARNING_MODE_SECURE = 3
+};
+
+/**
+ * struct dpsw_fdb_attr - FDB Attributes
+ * @max_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ * @learning_mode: Learning mode
+ * @num_fdb_mc_groups: Current number of multicast groups
+ * @max_fdb_mc_groups: Maximum number of multicast groups
+ */
+struct dpsw_fdb_attr {
+ u16 max_fdb_entries;
+ u16 fdb_ageing_time;
+ enum dpsw_learning_mode learning_mode;
+ u16 num_fdb_mc_groups;
+ u16 max_fdb_mc_groups;
+};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6]);
+
+/**
+ * struct dpsw_fdb_cfg - FDB Configuration
+ * @num_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ */
+struct dpsw_fdb_cfg {
+ u16 num_fdb_entries;
+ u16 fdb_ageing_time;
+};
+
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg);
+
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id);
+
+/**
+ * enum dpsw_flood_type - Define the flood type of a DPSW object
+ * @DPSW_BROADCAST: Broadcast flooding
+ * @DPSW_FLOODING: Unknown flooding
+ */
+enum dpsw_flood_type {
+ DPSW_BROADCAST = 0,
+ DPSW_FLOODING,
+};
+
+struct dpsw_egress_flood_cfg {
+ u16 fdb_id;
+ enum dpsw_flood_type flood_type;
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg);
+
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode);
+
+/**
+ * struct dpsw_acl_cfg - ACL Configuration
+ * @max_entries: Number of ACL rules
+ */
+struct dpsw_acl_cfg {
+ u16 max_entries;
+};
+
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg);
+
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id);
+
+/**
+ * struct dpsw_acl_if_cfg - List of interfaces to associate with an ACL table
+ * @num_ifs: Number of interfaces
+ * @if_id: List of interfaces
+ */
+struct dpsw_acl_if_cfg {
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+/**
+ * struct dpsw_acl_fields - ACL fields.
+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
+ * slow protocols, MVRP, STP
+ * @l2_source_mac: Source MAC address
+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
+ * Q-in-Q, IPv4, IPv6, PPPoE
+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
+ * @l2_vlan_id: layer 2 VLAN ID
+ * @l2_ether_type: layer 2 Ethernet type
+ * @l3_dscp: Layer 3 differentiated services code point
+ * @l3_protocol: Tells the Network layer at the destination host, to which
+ * Protocol this packet belongs to. The following protocol are
+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
+ * (encapsulation), GRE, PTP
+ * @l3_source_ip: Source IPv4 IP
+ * @l3_dest_ip: Destination IPv4 IP
+ * @l4_source_port: Source TCP/UDP Port
+ * @l4_dest_port: Destination TCP/UDP Port
+ */
+struct dpsw_acl_fields {
+ u8 l2_dest_mac[6];
+ u8 l2_source_mac[6];
+ u16 l2_tpid;
+ u8 l2_pcp_dei;
+ u16 l2_vlan_id;
+ u16 l2_ether_type;
+ u8 l3_dscp;
+ u8 l3_protocol;
+ u32 l3_source_ip;
+ u32 l3_dest_ip;
+ u16 l4_source_port;
+ u16 l4_dest_port;
+};
+
+/**
+ * struct dpsw_acl_key - ACL key
+ * @match: Match fields
+ * @mask: Mask: b'1 - valid, b'0 don't care
+ */
+struct dpsw_acl_key {
+ struct dpsw_acl_fields match;
+ struct dpsw_acl_fields mask;
+};
+
+/**
+ * enum dpsw_acl_action - action to be run on the ACL rule match
+ * @DPSW_ACL_ACTION_DROP: Drop frame
+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
+ */
+enum dpsw_acl_action {
+ DPSW_ACL_ACTION_DROP,
+ DPSW_ACL_ACTION_REDIRECT,
+ DPSW_ACL_ACTION_ACCEPT,
+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
+};
+
+/**
+ * struct dpsw_acl_result - ACL action
+ * @action: Action should be taken when ACL entry hit
+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
+ * action
+ */
+struct dpsw_acl_result {
+ enum dpsw_acl_action action;
+ u16 if_id;
+};
+
+/**
+ * struct dpsw_acl_entry_cfg - ACL entry
+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
+ * to dpsw_acl_prepare_entry_cfg()
+ * @result: Required action when entry hit occurs
+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
+ * during the lifetime of a Policy. It is user responsibility to
+ * space the priorities according to consequent rule additions.
+ */
+struct dpsw_acl_entry_cfg {
+ u64 key_iova;
+ struct dpsw_acl_result result;
+ int precedence;
+};
+
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf);
+
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+/**
+ * enum dpsw_reflection_filter - Filter type for frames to be reflected
+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to
+ * the particular VLAN defined by vid parameter
+ *
+ */
+enum dpsw_reflection_filter {
+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
+};
+
+/**
+ * struct dpsw_reflection_cfg - Structure representing the mirroring config
+ * @filter: Filter type for frames to be mirrored
+ * @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN
+ */
+struct dpsw_reflection_cfg {
+ enum dpsw_reflection_filter filter;
+ u16 vlan_id;
+};
+
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id);
+
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
+
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
+#endif /* __FSL_DPSW_H */