summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/microchip/lan966x
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/microchip/lan966x
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microchip/lan966x')
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig23
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile22
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c365
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c727
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c289
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c1073
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c50
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h174
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c363
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c592
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1324
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h792
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c551
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c137
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c226
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c570
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c1113
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h1888
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c664
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c142
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c620
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c91
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c3270
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c242
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c784
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c323
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c136
34 files changed, 17507 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
new file mode 100644
index 0000000000..f9ebffc04e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -0,0 +1,23 @@
+config LAN966X_SWITCH
+ tristate "Lan966x switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on HAS_IOMEM
+ depends on OF
+ depends on NET_SWITCHDEV
+ depends on BRIDGE || BRIDGE=n
+ select PHYLINK
+ select PAGE_POOL
+ select VCAP
+ help
+ This driver supports the Lan966x network switch device.
+
+config LAN966X_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on LAN966X_SWITCH && DCB
+ default y
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver. This can be used to assign priority to traffic, based on
+ DSCP and PCP.
+
+ If unsure, set to Y.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
new file mode 100644
index 0000000000..3b6ac33169
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Lan966x network device drivers.
+#
+
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
+
+lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
+ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
+ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
+ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \
+ lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \
+ lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o \
+ lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \
+ lan966x_tc_flower.o lan966x_goto.o
+
+lan966x-switch-$(CONFIG_LAN966X_DCB) += lan966x_dcb.o
+lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
+
+# Provide include files
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
new file mode 100644
index 0000000000..70cbbf8d2b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 cir, cbs;
+ u8 se_idx;
+
+ /* Check for invalid values */
+ if (qopt->idleslope <= 0 ||
+ qopt->sendslope >= 0 ||
+ qopt->locredit >= qopt->hicredit)
+ return -EINVAL;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+ cir = qopt->idleslope;
+ cbs = (qopt->idleslope - qopt->sendslope) *
+ (qopt->hicredit - qopt->locredit) /
+ -qopt->sendslope;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 se_idx;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
new file mode 100644
index 0000000000..ed2d96d790
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+enum lan966x_dcb_apptrust_values {
+ LAN966X_DCB_APPTRUST_EMPTY,
+ LAN966X_DCB_APPTRUST_DSCP,
+ LAN966X_DCB_APPTRUST_PCP,
+ LAN966X_DCB_APPTRUST_DSCP_PCP,
+ __LAN966X_DCB_APPTRUST_MAX
+};
+
+static const struct lan966x_dcb_apptrust {
+ u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1];
+ int nselectors;
+} *lan966x_port_apptrust[NUM_PHYS_PORTS];
+
+static const char *lan966x_dcb_apptrust_names[__LAN966X_DCB_APPTRUST_MAX] = {
+ [LAN966X_DCB_APPTRUST_EMPTY] = "empty",
+ [LAN966X_DCB_APPTRUST_DSCP] = "dscp",
+ [LAN966X_DCB_APPTRUST_PCP] = "pcp",
+ [LAN966X_DCB_APPTRUST_DSCP_PCP] = "dscp pcp"
+};
+
+/* Lan966x supported apptrust policies */
+static const struct lan966x_dcb_apptrust
+ lan966x_dcb_apptrust_policies[__LAN966X_DCB_APPTRUST_MAX] = {
+ /* Empty *must* be first */
+ [LAN966X_DCB_APPTRUST_EMPTY] = { { 0 }, 0 },
+ [LAN966X_DCB_APPTRUST_DSCP] = { { IEEE_8021QAZ_APP_SEL_DSCP }, 1 },
+ [LAN966X_DCB_APPTRUST_PCP] = { { DCB_APP_SEL_PCP }, 1 },
+ [LAN966X_DCB_APPTRUST_DSCP_PCP] = { { IEEE_8021QAZ_APP_SEL_DSCP,
+ DCB_APP_SEL_PCP }, 2 },
+};
+
+static bool lan966x_dcb_apptrust_contains(int portno, u8 selector)
+{
+ const struct lan966x_dcb_apptrust *conf = lan966x_port_apptrust[portno];
+
+ for (int i = 0; i < conf->nselectors; i++)
+ if (conf->selectors[i] == selector)
+ return true;
+
+ return false;
+}
+
+static void lan966x_dcb_app_update(struct net_device *dev)
+{
+ struct dcb_ieee_app_prio_map dscp_rewr_map = {0};
+ struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0};
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x_port_qos qos = {0};
+ struct dcb_app app_itr;
+ bool dscp_rewr = false;
+ bool pcp_rewr = false;
+
+ /* Get pcp ingress mapping */
+ for (int i = 0; i < ARRAY_SIZE(qos.pcp.map); i++) {
+ app_itr.selector = DCB_APP_SEL_PCP;
+ app_itr.protocol = i;
+ qos.pcp.map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get dscp ingress mapping */
+ for (int i = 0; i < ARRAY_SIZE(qos.dscp.map); i++) {
+ app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ app_itr.protocol = i;
+ qos.dscp.map[i] = dcb_getapp(dev, &app_itr);
+ }
+
+ /* Get default prio */
+ qos.default_prio = dcb_ieee_getapp_default_prio_mask(dev);
+ if (qos.default_prio)
+ qos.default_prio = fls(qos.default_prio) - 1;
+
+ /* Get pcp rewrite mapping */
+ dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map);
+ for (int i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
+ if (!pcp_rewr_map.map[i])
+ continue;
+
+ pcp_rewr = true;
+ qos.pcp_rewr.map[i] = fls(pcp_rewr_map.map[i]) - 1;
+ }
+
+ /* Get dscp rewrite mapping */
+ dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map);
+ for (int i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
+ if (!dscp_rewr_map.map[i])
+ continue;
+
+ dscp_rewr = true;
+ qos.dscp_rewr.map[i] = fls64(dscp_rewr_map.map[i]) - 1;
+ }
+
+ /* Enable use of pcp for queue classification */
+ if (lan966x_dcb_apptrust_contains(port->chip_port, DCB_APP_SEL_PCP)) {
+ qos.pcp.enable = true;
+
+ if (pcp_rewr)
+ qos.pcp_rewr.enable = true;
+ }
+
+ /* Enable use of dscp for queue classification */
+ if (lan966x_dcb_apptrust_contains(port->chip_port, IEEE_8021QAZ_APP_SEL_DSCP)) {
+ qos.dscp.enable = true;
+
+ if (dscp_rewr)
+ qos.dscp_rewr.enable = true;
+ }
+
+ lan966x_port_qos_set(port, &qos);
+}
+
+/* DSCP mapping is global for all ports, so set and delete app entries are
+ * replicated for each port.
+ */
+static int lan966x_dcb_ieee_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app,
+ int (*setdel)(struct net_device *,
+ struct dcb_app *))
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ for (int i = 0; i < NUM_PHYS_PORTS; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ err = setdel(port->dev, app);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int lan966x_dcb_app_validate(struct net_device *dev,
+ const struct dcb_app *app)
+{
+ int err = 0;
+
+ switch (app->selector) {
+ /* Default priority checks */
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol)
+ err = -EINVAL;
+ else if (app->priority >= NUM_PRIO_QUEUES)
+ err = -ERANGE;
+ break;
+ /* Dscp checks */
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= LAN966X_PORT_QOS_DSCP_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= NUM_PRIO_QUEUES)
+ err = -ERANGE;
+ break;
+ /* Pcp checks */
+ case DCB_APP_SEL_PCP:
+ if (app->protocol >= LAN966X_PORT_QOS_PCP_DEI_COUNT)
+ err = -EINVAL;
+ else if (app->priority >= NUM_PRIO_QUEUES)
+ err = -ERANGE;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ netdev_err(dev, "Invalid entry: %d:%d\n", app->protocol,
+ app->priority);
+
+ return err;
+}
+
+static int lan966x_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
+ if (err)
+ return err;
+
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err;
+ u8 prio;
+
+ err = lan966x_dcb_app_validate(dev, app);
+ if (err)
+ return err;
+
+ /* Delete current mapping, if it exists */
+ prio = dcb_getapp(dev, app);
+ if (prio) {
+ app_itr = *app;
+ app_itr.priority = prio;
+ lan966x_dcb_ieee_delapp(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp);
+ else
+ err = dcb_ieee_setapp(dev, app);
+
+ if (err)
+ return err;
+
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_apptrust_validate(struct net_device *dev,
+ u8 *selectors,
+ int nselectors)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan966x_dcb_apptrust_policies); i++) {
+ bool match;
+
+ if (lan966x_dcb_apptrust_policies[i].nselectors != nselectors)
+ continue;
+
+ match = true;
+ for (int j = 0; j < nselectors; j++) {
+ if (lan966x_dcb_apptrust_policies[i].selectors[j] !=
+ *(selectors + j)) {
+ match = false;
+ break;
+ }
+ }
+ if (match)
+ return i;
+ }
+
+ netdev_err(dev, "Valid apptrust configurations are:\n");
+ for (int i = 0; i < ARRAY_SIZE(lan966x_dcb_apptrust_names); i++)
+ pr_info("order: %s\n", lan966x_dcb_apptrust_names[i]);
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_dcb_setapptrust(struct net_device *dev,
+ u8 *selectors,
+ int nselectors)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int idx;
+
+ idx = lan966x_dcb_apptrust_validate(dev, selectors, nselectors);
+ if (idx < 0)
+ return idx;
+
+ lan966x_port_apptrust[port->chip_port] = &lan966x_dcb_apptrust_policies[idx];
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_getapptrust(struct net_device *dev, u8 *selectors,
+ int *nselectors)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ const struct lan966x_dcb_apptrust *trust;
+
+ trust = lan966x_port_apptrust[port->chip_port];
+
+ memcpy(selectors, trust->selectors, trust->nselectors);
+ *nselectors = trust->nselectors;
+
+ return 0;
+}
+
+static int lan966x_dcb_delrewr(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr);
+ else
+ err = dcb_delrewr(dev, app);
+
+ if (err < 0)
+ return err;
+
+ lan966x_dcb_app_update(dev);
+
+ return 0;
+}
+
+static int lan966x_dcb_setrewr(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ u16 proto;
+ int err;
+
+ err = lan966x_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists. */
+ proto = dcb_getrewr(dev, app);
+ if (proto) {
+ app_itr = *app;
+ app_itr.protocol = proto;
+ lan966x_dcb_delrewr(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = lan966x_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr);
+ else
+ err = dcb_setrewr(dev, app);
+
+ if (err)
+ goto out;
+
+ lan966x_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
+static const struct dcbnl_rtnl_ops lan966x_dcbnl_ops = {
+ .ieee_setapp = lan966x_dcb_ieee_setapp,
+ .ieee_delapp = lan966x_dcb_ieee_delapp,
+ .dcbnl_setapptrust = lan966x_dcb_setapptrust,
+ .dcbnl_getapptrust = lan966x_dcb_getapptrust,
+ .dcbnl_setrewr = lan966x_dcb_setrewr,
+ .dcbnl_delrewr = lan966x_dcb_delrewr,
+};
+
+void lan966x_dcb_init(struct lan966x *lan966x)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port;
+
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ port->dev->dcbnl_ops = &lan966x_dcbnl_ops;
+
+ lan966x_port_apptrust[port->chip_port] =
+ &lan966x_dcb_apptrust_policies[LAN966X_DCB_APPTRUST_DSCP_PCP];
+
+ /* Enable DSCP classification based on classified QoS class and
+ * DP, for all DSCP values, for all ports.
+ */
+ lan966x_port_qos_dscp_rewr_mode_set(port,
+ LAN966X_PORT_QOS_REWR_DSCP_ALL);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
new file mode 100644
index 0000000000..06811c60d5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+
+#include "lan966x_main.h"
+
+/* Number of traffic classes */
+#define LAN966X_NUM_TC 8
+#define LAN966X_STATS_CHECK_DELAY (2 * HZ)
+
+static const struct lan966x_stat_layout lan966x_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02 },
+ { .name = "rx_broadcast", .offset = 0x03 },
+ { .name = "rx_short", .offset = 0x04 },
+ { .name = "rx_frag", .offset = 0x05 },
+ { .name = "rx_jabber", .offset = 0x06 },
+ { .name = "rx_crc", .offset = 0x07 },
+ { .name = "rx_symbol_err", .offset = 0x08 },
+ { .name = "rx_sz_64", .offset = 0x09 },
+ { .name = "rx_sz_65_127", .offset = 0x0a},
+ { .name = "rx_sz_128_255", .offset = 0x0b},
+ { .name = "rx_sz_256_511", .offset = 0x0c },
+ { .name = "rx_sz_512_1023", .offset = 0x0d },
+ { .name = "rx_sz_1024_1526", .offset = 0x0e },
+ { .name = "rx_sz_jumbo", .offset = 0x0f },
+ { .name = "rx_pause", .offset = 0x10 },
+ { .name = "rx_control", .offset = 0x11 },
+ { .name = "rx_long", .offset = 0x12 },
+ { .name = "rx_cat_drop", .offset = 0x13 },
+ { .name = "rx_red_prio_0", .offset = 0x14 },
+ { .name = "rx_red_prio_1", .offset = 0x15 },
+ { .name = "rx_red_prio_2", .offset = 0x16 },
+ { .name = "rx_red_prio_3", .offset = 0x17 },
+ { .name = "rx_red_prio_4", .offset = 0x18 },
+ { .name = "rx_red_prio_5", .offset = 0x19 },
+ { .name = "rx_red_prio_6", .offset = 0x1a },
+ { .name = "rx_red_prio_7", .offset = 0x1b },
+ { .name = "rx_yellow_prio_0", .offset = 0x1c },
+ { .name = "rx_yellow_prio_1", .offset = 0x1d },
+ { .name = "rx_yellow_prio_2", .offset = 0x1e },
+ { .name = "rx_yellow_prio_3", .offset = 0x1f },
+ { .name = "rx_yellow_prio_4", .offset = 0x20 },
+ { .name = "rx_yellow_prio_5", .offset = 0x21 },
+ { .name = "rx_yellow_prio_6", .offset = 0x22 },
+ { .name = "rx_yellow_prio_7", .offset = 0x23 },
+ { .name = "rx_green_prio_0", .offset = 0x24 },
+ { .name = "rx_green_prio_1", .offset = 0x25 },
+ { .name = "rx_green_prio_2", .offset = 0x26 },
+ { .name = "rx_green_prio_3", .offset = 0x27 },
+ { .name = "rx_green_prio_4", .offset = 0x28 },
+ { .name = "rx_green_prio_5", .offset = 0x29 },
+ { .name = "rx_green_prio_6", .offset = 0x2a },
+ { .name = "rx_green_prio_7", .offset = 0x2b },
+ { .name = "rx_assembly_err", .offset = 0x2c },
+ { .name = "rx_smd_err", .offset = 0x2d },
+ { .name = "rx_assembly_ok", .offset = 0x2e },
+ { .name = "rx_merge_frag", .offset = 0x2f },
+ { .name = "rx_pmac_octets", .offset = 0x30, },
+ { .name = "rx_pmac_unicast", .offset = 0x31, },
+ { .name = "rx_pmac_multicast", .offset = 0x32 },
+ { .name = "rx_pmac_broadcast", .offset = 0x33 },
+ { .name = "rx_pmac_short", .offset = 0x34 },
+ { .name = "rx_pmac_frag", .offset = 0x35 },
+ { .name = "rx_pmac_jabber", .offset = 0x36 },
+ { .name = "rx_pmac_crc", .offset = 0x37 },
+ { .name = "rx_pmac_symbol_err", .offset = 0x38 },
+ { .name = "rx_pmac_sz_64", .offset = 0x39 },
+ { .name = "rx_pmac_sz_65_127", .offset = 0x3a },
+ { .name = "rx_pmac_sz_128_255", .offset = 0x3b },
+ { .name = "rx_pmac_sz_256_511", .offset = 0x3c },
+ { .name = "rx_pmac_sz_512_1023", .offset = 0x3d },
+ { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e },
+ { .name = "rx_pmac_sz_jumbo", .offset = 0x3f },
+ { .name = "rx_pmac_pause", .offset = 0x40 },
+ { .name = "rx_pmac_control", .offset = 0x41 },
+ { .name = "rx_pmac_long", .offset = 0x42 },
+
+ { .name = "tx_octets", .offset = 0x80, },
+ { .name = "tx_unicast", .offset = 0x81, },
+ { .name = "tx_multicast", .offset = 0x82 },
+ { .name = "tx_broadcast", .offset = 0x83 },
+ { .name = "tx_col", .offset = 0x84 },
+ { .name = "tx_drop", .offset = 0x85 },
+ { .name = "tx_pause", .offset = 0x86 },
+ { .name = "tx_sz_64", .offset = 0x87 },
+ { .name = "tx_sz_65_127", .offset = 0x88 },
+ { .name = "tx_sz_128_255", .offset = 0x89 },
+ { .name = "tx_sz_256_511", .offset = 0x8a },
+ { .name = "tx_sz_512_1023", .offset = 0x8b },
+ { .name = "tx_sz_1024_1526", .offset = 0x8c },
+ { .name = "tx_sz_jumbo", .offset = 0x8d },
+ { .name = "tx_yellow_prio_0", .offset = 0x8e },
+ { .name = "tx_yellow_prio_1", .offset = 0x8f },
+ { .name = "tx_yellow_prio_2", .offset = 0x90 },
+ { .name = "tx_yellow_prio_3", .offset = 0x91 },
+ { .name = "tx_yellow_prio_4", .offset = 0x92 },
+ { .name = "tx_yellow_prio_5", .offset = 0x93 },
+ { .name = "tx_yellow_prio_6", .offset = 0x94 },
+ { .name = "tx_yellow_prio_7", .offset = 0x95 },
+ { .name = "tx_green_prio_0", .offset = 0x96 },
+ { .name = "tx_green_prio_1", .offset = 0x97 },
+ { .name = "tx_green_prio_2", .offset = 0x98 },
+ { .name = "tx_green_prio_3", .offset = 0x99 },
+ { .name = "tx_green_prio_4", .offset = 0x9a },
+ { .name = "tx_green_prio_5", .offset = 0x9b },
+ { .name = "tx_green_prio_6", .offset = 0x9c },
+ { .name = "tx_green_prio_7", .offset = 0x9d },
+ { .name = "tx_aged", .offset = 0x9e },
+ { .name = "tx_llct", .offset = 0x9f },
+ { .name = "tx_ct", .offset = 0xa0 },
+ { .name = "tx_mm_hold", .offset = 0xa1 },
+ { .name = "tx_merge_frag", .offset = 0xa2 },
+ { .name = "tx_pmac_octets", .offset = 0xa3, },
+ { .name = "tx_pmac_unicast", .offset = 0xa4, },
+ { .name = "tx_pmac_multicast", .offset = 0xa5 },
+ { .name = "tx_pmac_broadcast", .offset = 0xa6 },
+ { .name = "tx_pmac_pause", .offset = 0xa7 },
+ { .name = "tx_pmac_sz_64", .offset = 0xa8 },
+ { .name = "tx_pmac_sz_65_127", .offset = 0xa9 },
+ { .name = "tx_pmac_sz_128_255", .offset = 0xaa },
+ { .name = "tx_pmac_sz_256_511", .offset = 0xab },
+ { .name = "tx_pmac_sz_512_1023", .offset = 0xac },
+ { .name = "tx_pmac_sz_1024_1526", .offset = 0xad },
+ { .name = "tx_pmac_sz_jumbo", .offset = 0xae },
+
+ { .name = "dr_local", .offset = 0x100 },
+ { .name = "dr_tail", .offset = 0x101 },
+ { .name = "dr_yellow_prio_0", .offset = 0x102 },
+ { .name = "dr_yellow_prio_1", .offset = 0x103 },
+ { .name = "dr_yellow_prio_2", .offset = 0x104 },
+ { .name = "dr_yellow_prio_3", .offset = 0x105 },
+ { .name = "dr_yellow_prio_4", .offset = 0x106 },
+ { .name = "dr_yellow_prio_5", .offset = 0x107 },
+ { .name = "dr_yellow_prio_6", .offset = 0x108 },
+ { .name = "dr_yellow_prio_7", .offset = 0x109 },
+ { .name = "dr_green_prio_0", .offset = 0x10a },
+ { .name = "dr_green_prio_1", .offset = 0x10b },
+ { .name = "dr_green_prio_2", .offset = 0x10c },
+ { .name = "dr_green_prio_3", .offset = 0x10d },
+ { .name = "dr_green_prio_4", .offset = 0x10e },
+ { .name = "dr_green_prio_5", .offset = 0x10f },
+ { .name = "dr_green_prio_6", .offset = 0x110 },
+ { .name = "dr_green_prio_7", .offset = 0x111 },
+};
+
+/* The following numbers are indexes into lan966x_stats_layout[] */
+#define SYS_COUNT_RX_OCT 0
+#define SYS_COUNT_RX_UC 1
+#define SYS_COUNT_RX_MC 2
+#define SYS_COUNT_RX_BC 3
+#define SYS_COUNT_RX_SHORT 4
+#define SYS_COUNT_RX_FRAG 5
+#define SYS_COUNT_RX_JABBER 6
+#define SYS_COUNT_RX_CRC 7
+#define SYS_COUNT_RX_SYMBOL_ERR 8
+#define SYS_COUNT_RX_SZ_64 9
+#define SYS_COUNT_RX_SZ_65_127 10
+#define SYS_COUNT_RX_SZ_128_255 11
+#define SYS_COUNT_RX_SZ_256_511 12
+#define SYS_COUNT_RX_SZ_512_1023 13
+#define SYS_COUNT_RX_SZ_1024_1526 14
+#define SYS_COUNT_RX_SZ_JUMBO 15
+#define SYS_COUNT_RX_PAUSE 16
+#define SYS_COUNT_RX_CONTROL 17
+#define SYS_COUNT_RX_LONG 18
+#define SYS_COUNT_RX_CAT_DROP 19
+#define SYS_COUNT_RX_RED_PRIO_0 20
+#define SYS_COUNT_RX_RED_PRIO_1 21
+#define SYS_COUNT_RX_RED_PRIO_2 22
+#define SYS_COUNT_RX_RED_PRIO_3 23
+#define SYS_COUNT_RX_RED_PRIO_4 24
+#define SYS_COUNT_RX_RED_PRIO_5 25
+#define SYS_COUNT_RX_RED_PRIO_6 26
+#define SYS_COUNT_RX_RED_PRIO_7 27
+#define SYS_COUNT_RX_YELLOW_PRIO_0 28
+#define SYS_COUNT_RX_YELLOW_PRIO_1 29
+#define SYS_COUNT_RX_YELLOW_PRIO_2 30
+#define SYS_COUNT_RX_YELLOW_PRIO_3 31
+#define SYS_COUNT_RX_YELLOW_PRIO_4 32
+#define SYS_COUNT_RX_YELLOW_PRIO_5 33
+#define SYS_COUNT_RX_YELLOW_PRIO_6 34
+#define SYS_COUNT_RX_YELLOW_PRIO_7 35
+#define SYS_COUNT_RX_GREEN_PRIO_0 36
+#define SYS_COUNT_RX_GREEN_PRIO_1 37
+#define SYS_COUNT_RX_GREEN_PRIO_2 38
+#define SYS_COUNT_RX_GREEN_PRIO_3 39
+#define SYS_COUNT_RX_GREEN_PRIO_4 40
+#define SYS_COUNT_RX_GREEN_PRIO_5 41
+#define SYS_COUNT_RX_GREEN_PRIO_6 42
+#define SYS_COUNT_RX_GREEN_PRIO_7 43
+#define SYS_COUNT_RX_ASSEMBLY_ERR 44
+#define SYS_COUNT_RX_SMD_ERR 45
+#define SYS_COUNT_RX_ASSEMBLY_OK 46
+#define SYS_COUNT_RX_MERGE_FRAG 47
+#define SYS_COUNT_RX_PMAC_OCT 48
+#define SYS_COUNT_RX_PMAC_UC 49
+#define SYS_COUNT_RX_PMAC_MC 50
+#define SYS_COUNT_RX_PMAC_BC 51
+#define SYS_COUNT_RX_PMAC_SHORT 52
+#define SYS_COUNT_RX_PMAC_FRAG 53
+#define SYS_COUNT_RX_PMAC_JABBER 54
+#define SYS_COUNT_RX_PMAC_CRC 55
+#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56
+#define SYS_COUNT_RX_PMAC_SZ_64 57
+#define SYS_COUNT_RX_PMAC_SZ_65_127 58
+#define SYS_COUNT_RX_PMAC_SZ_128_255 59
+#define SYS_COUNT_RX_PMAC_SZ_256_511 60
+#define SYS_COUNT_RX_PMAC_SZ_512_1023 61
+#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62
+#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63
+#define SYS_COUNT_RX_PMAC_PAUSE 64
+#define SYS_COUNT_RX_PMAC_CONTROL 65
+#define SYS_COUNT_RX_PMAC_LONG 66
+
+#define SYS_COUNT_TX_OCT 67
+#define SYS_COUNT_TX_UC 68
+#define SYS_COUNT_TX_MC 69
+#define SYS_COUNT_TX_BC 70
+#define SYS_COUNT_TX_COL 71
+#define SYS_COUNT_TX_DROP 72
+#define SYS_COUNT_TX_PAUSE 73
+#define SYS_COUNT_TX_SZ_64 74
+#define SYS_COUNT_TX_SZ_65_127 75
+#define SYS_COUNT_TX_SZ_128_255 76
+#define SYS_COUNT_TX_SZ_256_511 77
+#define SYS_COUNT_TX_SZ_512_1023 78
+#define SYS_COUNT_TX_SZ_1024_1526 79
+#define SYS_COUNT_TX_SZ_JUMBO 80
+#define SYS_COUNT_TX_YELLOW_PRIO_0 81
+#define SYS_COUNT_TX_YELLOW_PRIO_1 82
+#define SYS_COUNT_TX_YELLOW_PRIO_2 83
+#define SYS_COUNT_TX_YELLOW_PRIO_3 84
+#define SYS_COUNT_TX_YELLOW_PRIO_4 85
+#define SYS_COUNT_TX_YELLOW_PRIO_5 86
+#define SYS_COUNT_TX_YELLOW_PRIO_6 87
+#define SYS_COUNT_TX_YELLOW_PRIO_7 88
+#define SYS_COUNT_TX_GREEN_PRIO_0 89
+#define SYS_COUNT_TX_GREEN_PRIO_1 90
+#define SYS_COUNT_TX_GREEN_PRIO_2 91
+#define SYS_COUNT_TX_GREEN_PRIO_3 92
+#define SYS_COUNT_TX_GREEN_PRIO_4 93
+#define SYS_COUNT_TX_GREEN_PRIO_5 94
+#define SYS_COUNT_TX_GREEN_PRIO_6 95
+#define SYS_COUNT_TX_GREEN_PRIO_7 96
+#define SYS_COUNT_TX_AGED 97
+#define SYS_COUNT_TX_LLCT 98
+#define SYS_COUNT_TX_CT 99
+#define SYS_COUNT_TX_MM_HOLD 100
+#define SYS_COUNT_TX_MERGE_FRAG 101
+#define SYS_COUNT_TX_PMAC_OCT 102
+#define SYS_COUNT_TX_PMAC_UC 103
+#define SYS_COUNT_TX_PMAC_MC 104
+#define SYS_COUNT_TX_PMAC_BC 105
+#define SYS_COUNT_TX_PMAC_PAUSE 106
+#define SYS_COUNT_TX_PMAC_SZ_64 107
+#define SYS_COUNT_TX_PMAC_SZ_65_127 108
+#define SYS_COUNT_TX_PMAC_SZ_128_255 109
+#define SYS_COUNT_TX_PMAC_SZ_256_511 110
+#define SYS_COUNT_TX_PMAC_SZ_512_1023 111
+#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112
+#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113
+
+#define SYS_COUNT_DR_LOCAL 114
+#define SYS_COUNT_DR_TAIL 115
+#define SYS_COUNT_DR_YELLOW_PRIO_0 116
+#define SYS_COUNT_DR_YELLOW_PRIO_1 117
+#define SYS_COUNT_DR_YELLOW_PRIO_2 118
+#define SYS_COUNT_DR_YELLOW_PRIO_3 119
+#define SYS_COUNT_DR_YELLOW_PRIO_4 120
+#define SYS_COUNT_DR_YELLOW_PRIO_5 121
+#define SYS_COUNT_DR_YELLOW_PRIO_6 122
+#define SYS_COUNT_DR_YELLOW_PRIO_7 123
+#define SYS_COUNT_DR_GREEN_PRIO_0 124
+#define SYS_COUNT_DR_GREEN_PRIO_1 125
+#define SYS_COUNT_DR_GREEN_PRIO_2 126
+#define SYS_COUNT_DR_GREEN_PRIO_3 127
+#define SYS_COUNT_DR_GREEN_PRIO_4 128
+#define SYS_COUNT_DR_GREEN_PRIO_5 129
+#define SYS_COUNT_DR_GREEN_PRIO_6 130
+#define SYS_COUNT_DR_GREEN_PRIO_7 131
+
+/* Add a possibly wrapping 32 bit value to a 64 bit counter */
+static void lan966x_add_cnt(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+static void lan966x_stats_update(struct lan966x *lan966x)
+{
+ int i, j;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ uint idx = i * lan966x->num_stats;
+
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i),
+ lan966x, SYS_STAT_CFG);
+
+ for (j = 0; j < lan966x->num_stats; j++) {
+ u32 offset = lan966x->stats_layout[j].offset;
+
+ lan966x_add_cnt(&lan966x->stats[idx++],
+ lan_rd(lan966x, SYS_CNT(offset)));
+ }
+ }
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static int lan966x_get_sset_count(struct net_device *dev, int sset)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return lan966x->num_stats;
+}
+
+static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct lan966x_port *port = netdev_priv(netdev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < lan966x->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ lan966x->stats_layout[i].name, ETH_GSTRING_LEN);
+}
+
+static void lan966x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ /* check and update now */
+ lan966x_stats_update(lan966x);
+
+ /* Copy all counters */
+ for (i = 0; i < lan966x->num_stats; i++)
+ *data++ = lan966x->stats[port->chip_port *
+ lan966x->num_stats + i];
+}
+
+static void lan966x_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ mac_stats->FramesTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->SingleCollisionFrames =
+ lan966x->stats[idx + SYS_COUNT_TX_COL];
+ mac_stats->MultipleCollisionFrames = 0;
+ mac_stats->FramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_UC] +
+ lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->FrameCheckSequenceErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC];
+ mac_stats->AlignmentErrors = 0;
+ mac_stats->OctetsTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+ mac_stats->FramesWithDeferredXmissions =
+ lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
+ mac_stats->LateCollisions = 0;
+ mac_stats->FramesAbortedDueToXSColls = 0;
+ mac_stats->FramesLostDueToIntMACXmitError = 0;
+ mac_stats->CarrierSenseErrors = 0;
+ mac_stats->OctetsReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_OCT];
+ mac_stats->FramesLostDueToIntMACRcvError = 0;
+ mac_stats->MulticastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
+ mac_stats->BroadcastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->FramesWithExcessiveDeferral = 0;
+ mac_stats->MulticastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_MC];
+ mac_stats->BroadcastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->InRangeLengthErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC];
+ mac_stats->OutOfRangeLengthField =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ mac_stats->FrameTooLongErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void lan966x_get_eth_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ rmon_stats->undersize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT];
+ rmon_stats->oversize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ rmon_stats->fragments =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG];
+ rmon_stats->jabbers =
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER];
+ rmon_stats->hist[0] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64];
+ rmon_stats->hist[1] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127];
+ rmon_stats->hist[2] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255];
+ rmon_stats->hist[3] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511];
+ rmon_stats->hist[4] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023];
+ rmon_stats->hist[5] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+ rmon_stats->hist[6] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+
+ rmon_stats->hist_tx[0] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64];
+ rmon_stats->hist_tx[1] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127];
+ rmon_stats->hist_tx[2] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255];
+ rmon_stats->hist_tx[3] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511];
+ rmon_stats->hist_tx[4] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023];
+ rmon_stats->hist_tx[5] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+ rmon_stats->hist_tx[6] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+
+ mutex_unlock(&lan966x->stats_lock);
+
+ *ranges = lan966x_rmon_ranges;
+}
+
+static int lan966x_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int lan966x_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void lan966x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ if (!lan966x->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+const struct ethtool_ops lan966x_ethtool_ops = {
+ .get_link_ksettings = lan966x_get_link_ksettings,
+ .set_link_ksettings = lan966x_set_link_ksettings,
+ .get_pauseparam = lan966x_get_pauseparam,
+ .set_pauseparam = lan966x_set_pauseparam,
+ .get_sset_count = lan966x_get_sset_count,
+ .get_strings = lan966x_get_strings,
+ .get_ethtool_stats = lan966x_get_ethtool_stats,
+ .get_eth_mac_stats = lan966x_get_eth_mac_stats,
+ .get_rmon_stats = lan966x_get_eth_rmon_stats,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = lan966x_get_ts_info,
+};
+
+static void lan966x_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct lan966x *lan966x = container_of(del_work, struct lan966x,
+ stats_work);
+
+ lan966x_stats_update(lan966x);
+
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+}
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+ int i;
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
+
+ stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO];
+
+ stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC];
+
+ stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG];
+
+ stats->rx_dropped = dev->stats.rx_dropped +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
+ lan966x->stats[idx + SYS_COUNT_DR_TAIL] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] +
+ lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7];
+
+ for (i = 0; i < LAN966X_NUM_TC; i++) {
+ stats->rx_dropped +=
+ (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] +
+ lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]);
+ }
+
+ /* Get Tx stats */
+ stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+
+ stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO];
+
+ stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] +
+ lan966x->stats[idx + SYS_COUNT_TX_AGED];
+
+ stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+int lan966x_stats_init(struct lan966x *lan966x)
+{
+ char queue_name[32];
+
+ lan966x->stats_layout = lan966x_stats_layout;
+ lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout);
+ lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports *
+ lan966x->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!lan966x->stats)
+ return -ENOMEM;
+
+ /* Init stats worker */
+ mutex_init(&lan966x->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(lan966x->dev));
+ lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!lan966x->stats_queue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
new file mode 100644
index 0000000000..8310d3f354
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define DWRR_COST_BIT_WIDTH BIT(5)
+
+static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight)
+{
+ u32 res;
+
+ /* Round half up: Multiply with 16 before division,
+ * add 8 and divide result with 16 again
+ */
+ res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4;
+ return max_t(u32, 1, res) - 1;
+}
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params;
+ struct lan966x *lan966x = port->lan966x;
+ u32 w_min = 100;
+ u8 count = 0;
+ u32 se_idx;
+ u8 i;
+
+ /* Check the input */
+ if (qopt->parent != TC_H_ROOT)
+ return -EINVAL;
+
+ params = &qopt->replace_params;
+ if (params->bands != NUM_PRIO_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < params->bands; ++i) {
+ /* In the switch the DWRR is always on the lowest consecutive
+ * priorities. Due to this, the first priority must map to the
+ * first DWRR band.
+ */
+ if (params->priomap[i] != (7 - i))
+ return -EINVAL;
+
+ if (params->quanta[i] && params->weights[i] == 0)
+ return -EINVAL;
+ }
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ /* Find minimum weight */
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ ++count;
+
+ lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]),
+ lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i));
+ }
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 se_idx;
+ int i;
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ for (i = 0; i < NUM_PRIO_QUEUES; ++i)
+ lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i));
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
new file mode 100644
index 0000000000..2ea263e893
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct net_device *orig_dev;
+ struct lan966x *lan966x;
+ unsigned long event;
+};
+
+struct lan966x_fdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u32 references;
+};
+
+static struct lan966x_fdb_entry *
+lan966x_fdb_find_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr))
+ return fdb_entry;
+ }
+
+ return NULL;
+}
+
+static void lan966x_fdb_add_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info);
+ if (fdb_entry) {
+ fdb_entry->references++;
+ return;
+ }
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry)
+ return;
+
+ ether_addr_copy(fdb_entry->mac, fdb_info->addr);
+ fdb_entry->vid = fdb_info->vid;
+ fdb_entry->references = 1;
+ list_add_tail(&fdb_entry->list, &lan966x->fdb_entries);
+}
+
+static bool lan966x_fdb_del_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries,
+ list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr)) {
+ fdb_entry->references--;
+ if (!fdb_entry->references) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+static void lan966x_fdb_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ }
+}
+
+int lan966x_fdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->fdb_entries);
+ lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0);
+ if (!lan966x->fdb_work)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void lan966x_fdb_deinit(struct lan966x *lan966x)
+{
+ destroy_workqueue(lan966x->fdb_work);
+ lan966x_fdb_purge_entries(lan966x);
+}
+
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x)
+{
+ flush_workqueue(lan966x->fdb_work);
+}
+
+static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->orig_dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x *lan966x;
+ int ret;
+
+ lan966x = fdb_work->lan966x;
+ fdb_info = &fdb_work->fdb_info;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev))
+ return;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+
+ if (lan966x_netdevice_check(fdb_work->orig_dev))
+ lan966x_fdb_port_event_work(fdb_work);
+ else if (netif_is_bridge_master(fdb_work->orig_dev))
+ lan966x_fdb_bridge_event_work(fdb_work);
+ else if (netif_is_lag_master(fdb_work->orig_dev))
+ lan966x_fdb_lag_event_work(fdb_work);
+
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_fdb_event_work *fdb_work;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (lan966x_netdevice_check(orig_dev) &&
+ !fdb_info->added_by_user)
+ break;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (!fdb_work)
+ return -ENOMEM;
+
+ fdb_work->dev = dev;
+ fdb_work->orig_dev = orig_dev;
+ fdb_work->lan966x = lan966x;
+ fdb_work->event = event;
+ INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
+ memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info));
+ fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!fdb_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
+
+ queue_work(lan966x->fdb_work, &fdb_work->work);
+ break;
+ }
+
+ return 0;
+err_addr_alloc:
+ kfree(fdb_work);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
new file mode 100644
index 0000000000..3960534ac2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <net/page_pool/helpers.h>
+
+#include "lan966x_main.h"
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
+}
+
+static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
+ struct lan966x_db *db)
+{
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return NULL;
+
+ db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
+
+ return page;
+}
+
+static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
+{
+ int i, j;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
+{
+ struct page *page;
+
+ page = rx->page[rx->dcb_index][rx->db_index];
+ if (unlikely(!page))
+ return;
+
+ page_pool_recycle_direct(rx->page_pool, page);
+}
+
+static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
+ struct lan966x_rx_dcb *dcb,
+ u64 nextptr)
+{
+ struct lan966x_db *db;
+ int i;
+
+ for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
+ db = &dcb->db[i];
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
+
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct page_pool_params pp_params = {
+ .order = rx->page_order,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = FDMA_DCB_MAX,
+ .nid = NUMA_NO_NODE,
+ .dev = lan966x->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = XDP_PACKET_HEADROOM,
+ .max_len = rx->max_mtu -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ if (lan966x_xdp_present(lan966x))
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
+ rx->page_pool = page_pool_create(&pp_params);
+
+ for (int i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ port = lan966x->ports[i];
+ xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
+ xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ }
+
+ return PTR_ERR_OR_ZERO(rx->page_pool);
+}
+
+static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ struct page *page;
+ int i, j;
+ int size;
+
+ if (lan966x_fdma_rx_alloc_page_pool(rx))
+ return PTR_ERR(rx->page_pool);
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+
+ rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
+ if (!rx->dcbs)
+ return -ENOMEM;
+
+ rx->last_entry = rx->dcbs;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+
+ /* Now for each dcb allocate the dbs */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+ dcb->info = 0;
+
+ /* For each db allocate a page and map it to the DB dataptr. */
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (!page)
+ return -ENOMEM;
+
+ db->status = 0;
+ rx->page[i][j] = page;
+ }
+
+ lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
+ }
+
+ return 0;
+}
+
+static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
+{
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+}
+
+static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 size;
+
+ /* Now it is possible to do the cleanup of dcb */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
+}
+
+static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP(rx->channel_id));
+ lan_wr(upper_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP1(rx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(rx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
+ FDMA_PORT_CTRL_XTR_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(rx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(rx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+}
+
+static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
+ struct lan966x_tx_dcb *dcb)
+{
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = 0;
+}
+
+static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+ struct lan966x_db *db;
+ int size;
+ int i, j;
+
+ tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ GFP_KERNEL);
+ if (!tx->dcbs_buf)
+ return -ENOMEM;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
+ if (!tx->dcbs)
+ goto out;
+
+ /* Now for each dcb allocate the db */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &tx->dcbs[i];
+
+ for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ db->dataptr = 0;
+ db->status = 0;
+ }
+
+ lan966x_fdma_tx_add_dcb(tx, dcb);
+ }
+
+ return 0;
+
+out:
+ kfree(tx->dcbs_buf);
+ return -ENOMEM;
+}
+
+static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ int size;
+
+ kfree(tx->dcbs_buf);
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+}
+
+static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP(tx->channel_id));
+ lan_wr(upper_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP1(tx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
+ FDMA_PORT_CTRL_INJ_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(tx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(tx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+
+ tx->activated = false;
+ tx->last_in_use = -1;
+}
+
+static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+
+ /* Write the registers to reload the channel */
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ if (netif_queue_stopped(port->dev))
+ netif_wake_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ netif_stop_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
+{
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_rx *rx = &lan966x->rx;
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ struct xdp_frame_bulk bq;
+ struct lan966x_db *db;
+ unsigned long flags;
+ bool clear = false;
+ int i;
+
+ xdp_frame_bulk_init(&bq);
+
+ spin_lock_irqsave(&lan966x->tx_lock, flags);
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+
+ if (!dcb_buf->used)
+ continue;
+
+ db = &tx->dcbs[i].db[0];
+ if (!(db->status & FDMA_DCB_STATUS_DONE))
+ continue;
+
+ dcb_buf->dev->stats.tx_packets++;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
+
+ dcb_buf->used = false;
+ if (dcb_buf->use_skb) {
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (!dcb_buf->ptp)
+ napi_consume_skb(dcb_buf->data.skb, weight);
+ } else {
+ if (dcb_buf->xdp_ndo)
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (dcb_buf->xdp_ndo)
+ xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
+ else
+ page_pool_recycle_direct(rx->page_pool,
+ dcb_buf->data.page);
+ }
+
+ clear = true;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ if (clear)
+ lan966x_fdma_wakeup_netdev(lan966x);
+
+ spin_unlock_irqrestore(&lan966x->tx_lock, flags);
+}
+
+static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
+{
+ struct lan966x_db *db;
+
+ /* Check if there is any data */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+
+ return true;
+}
+
+static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_port *port;
+ struct lan966x_db *db;
+ struct page *page;
+
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+ if (unlikely(!page))
+ return FDMA_ERROR;
+
+ dma_sync_single_for_cpu(lan966x->dev,
+ (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+
+ lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
+ src_port);
+ if (WARN_ON(*src_port >= lan966x->num_phys_ports))
+ return FDMA_ERROR;
+
+ port = lan966x->ports[*src_port];
+ if (!lan966x_xdp_port_present(port))
+ return FDMA_PASS;
+
+ return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
+ u64 src_port)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ u64 timestamp;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+
+ skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
+ skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
+
+ lan966x_ifh_get_timestamp(skb->data, &timestamp);
+
+ skb->dev = lan966x->ports[src_port]->dev;
+ skb_pull(skb, IFH_LEN_BYTES);
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
+ struct lan966x_rx *rx = &lan966x->rx;
+ int dcb_reload = rx->dcb_index;
+ struct lan966x_rx_dcb *old_dcb;
+ struct lan966x_db *db;
+ bool redirect = false;
+ struct sk_buff *skb;
+ struct page *page;
+ int counter = 0;
+ u64 src_port;
+ u64 nextptr;
+
+ lan966x_fdma_tx_clear_buf(lan966x, weight);
+
+ /* Get all received skb */
+ while (counter < weight) {
+ if (!lan966x_fdma_rx_more_frames(rx))
+ break;
+
+ counter++;
+
+ switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
+ case FDMA_PASS:
+ break;
+ case FDMA_ERROR:
+ lan966x_fdma_rx_free_page(rx);
+ lan966x_fdma_rx_advance_dcb(rx);
+ goto allocate_new;
+ case FDMA_REDIRECT:
+ redirect = true;
+ fallthrough;
+ case FDMA_TX:
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
+ case FDMA_DROP:
+ lan966x_fdma_rx_free_page(rx);
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
+ }
+
+ skb = lan966x_fdma_rx_get_frame(rx, src_port);
+ lan966x_fdma_rx_advance_dcb(rx);
+ if (!skb)
+ goto allocate_new;
+
+ napi_gro_receive(&lan966x->napi, skb);
+ }
+
+allocate_new:
+ /* Allocate new pages and map them */
+ while (dcb_reload != rx->dcb_index) {
+ db = &rx->dcbs[dcb_reload].db[rx->db_index];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (unlikely(!page))
+ break;
+ rx->page[dcb_reload][rx->db_index] = page;
+
+ old_dcb = &rx->dcbs[dcb_reload];
+ dcb_reload++;
+ dcb_reload &= FDMA_DCB_MAX - 1;
+
+ nextptr = rx->dma + ((unsigned long)old_dcb -
+ (unsigned long)rx->dcbs);
+ lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
+ lan966x_fdma_rx_reload(rx);
+ }
+
+ if (redirect)
+ xdp_do_flush();
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ u32 db, err, err_type;
+
+ db = lan_rd(lan966x, FDMA_INTR_DB);
+ err = lan_rd(lan966x, FDMA_INTR_ERR);
+
+ if (db) {
+ lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
+ lan_wr(db, lan966x, FDMA_INTR_DB);
+
+ napi_schedule(&lan966x->napi);
+ }
+
+ if (err) {
+ err_type = lan_rd(lan966x, FDMA_ERRORS);
+
+ WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
+
+ lan_wr(err, lan966x, FDMA_INTR_ERR);
+ lan_wr(err_type, lan966x, FDMA_ERRORS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
+{
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ int i;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+ if (!dcb_buf->used && i != tx->last_in_use)
+ return i;
+ }
+
+ return -1;
+}
+
+static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
+ int next_to_use, int len,
+ dma_addr_t dma_addr)
+{
+ struct lan966x_tx_dcb *next_dcb;
+ struct lan966x_db *next_db;
+
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(len);
+}
+
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+}
+
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ struct page *page;
+ int next_to_use;
+ __be32 *ifh;
+ int ret = 0;
+
+ spin_lock(&lan966x->tx_lock);
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(port->dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Get the next buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+
+ /* Generate new IFH */
+ if (!len) {
+ xdpf = ptr;
+
+ if (xdpf->headroom < IFH_LEN_BYTES) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ ifh = xdpf->data - IFH_LEN_BYTES;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = dma_map_single(lan966x->dev,
+ xdpf->data - IFH_LEN_BYTES,
+ xdpf->len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ next_dcb_buf->data.xdpf = xdpf;
+ next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ xdpf->len + IFH_LEN_BYTES,
+ dma_addr);
+ } else {
+ page = ptr;
+
+ ifh = page_address(page) + XDP_PACKET_HEADROOM;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = page_pool_get_dma_addr(page);
+ dma_sync_single_for_device(lan966x->dev,
+ dma_addr + XDP_PACKET_HEADROOM,
+ len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+
+ next_dcb_buf->data.page = page;
+ next_dcb_buf->len = len + IFH_LEN_BYTES;
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ len + IFH_LEN_BYTES,
+ dma_addr + XDP_PACKET_HEADROOM);
+ }
+
+ /* Fill up the buffer */
+ next_dcb_buf->use_skb = false;
+ next_dcb_buf->xdp_ndo = !len;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = port->dev;
+
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
+
+out:
+ spin_unlock(&lan966x->tx_lock);
+
+ return ret;
+}
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx *tx = &lan966x->tx;
+ int needed_headroom;
+ int needed_tailroom;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ int err;
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* skb processing */
+ needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+ skb_push(skb, IFH_LEN_BYTES);
+ memcpy(skb->data, ifh, IFH_LEN_BYTES);
+ skb_put(skb, 4);
+
+ dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->use_skb = true;
+ next_dcb_buf->data.skb = skb;
+ next_dcb_buf->xdp_ndo = false;
+ next_dcb_buf->len = skb->len;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = dev;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ next_dcb_buf->ptp = true;
+
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
+
+ return NETDEV_TX_OK;
+
+release:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
+{
+ int max_mtu = 0;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+ int mtu;
+
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ if (mtu > max_mtu)
+ max_mtu = mtu;
+ }
+
+ return max_mtu;
+}
+
+static int lan966x_qsys_sw_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
+}
+
+static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
+{
+ struct page_pool *page_pool;
+ dma_addr_t rx_dma;
+ void *rx_dcbs;
+ u32 size;
+ int err;
+
+ /* Store these for later to free them */
+ rx_dma = lan966x->rx.dma;
+ rx_dcbs = lan966x->rx.dcbs;
+ page_pool = lan966x->rx.page_pool;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_stop_netdev(lan966x);
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ lan966x->rx.max_mtu = new_mtu;
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+
+ page_pool_destroy(page_pool);
+
+ lan966x_fdma_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+ lan966x->rx.page_pool = page_pool;
+ lan966x->rx.dma = rx_dma;
+ lan966x->rx.dcbs = rx_dcbs;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return err;
+}
+
+static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
+{
+ return lan966x_fdma_get_max_mtu(lan966x) +
+ IFH_LEN_BYTES +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ VLAN_HLEN * 2 +
+ XDP_PACKET_HEADROOM;
+}
+
+static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
+{
+ int err;
+ u32 val;
+
+ /* Disable the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues */
+ readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
+ val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(1000, 2000);
+
+ err = lan966x_fdma_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ if (max_mtu == lan966x->rx.max_mtu)
+ return 0;
+
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev)
+ return;
+
+ lan966x->fdma_ndev = dev;
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
+ napi_enable(&lan966x->napi);
+}
+
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev == dev) {
+ netif_napi_del(&lan966x->napi);
+ lan966x->fdma_ndev = NULL;
+ }
+}
+
+int lan966x_fdma_init(struct lan966x *lan966x)
+{
+ int err;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ lan966x->rx.lan966x = lan966x;
+ lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ lan966x->tx.lan966x = lan966x;
+ lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.last_in_use = -1;
+
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ return err;
+
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err) {
+ lan966x_fdma_rx_free(&lan966x->rx);
+ return err;
+ }
+
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return 0;
+}
+
+void lan966x_fdma_deinit(struct lan966x *lan966x)
+{
+ if (!lan966x->fdma)
+ return;
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x_fdma_rx_free(&lan966x->rx);
+ page_pool_destroy(lan966x->rx.page_pool);
+ lan966x_fdma_tx_free(&lan966x->tx);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
new file mode 100644
index 0000000000..9b18156eea
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "vcap_api_client.h"
+
+int lan966x_goto_port_add(struct lan966x_port *port,
+ int from_cid, int to_cid,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev,
+ from_cid, to_cid, goto_id,
+ true);
+ if (err == -EFAULT) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported goto chain");
+ return -EOPNOTSUPP;
+ }
+
+ if (err == -EADDRINUSE) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP already enabled");
+ return -EOPNOTSUPP;
+ }
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not enable VCAP lookups");
+ return err;
+ }
+
+ return 0;
+}
+
+int lan966x_goto_port_del(struct lan966x_port *port,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0, 0,
+ goto_id, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not disable VCAP lookups");
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
new file mode 100644
index 0000000000..f3b1e0d318
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_IFH_H__
+#define __LAN966X_IFH_H__
+
+/* Fields with description (*) should just be cleared upon injection
+ * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte)
+ */
+
+#define IFH_LEN 7
+#define IFH_LEN_BYTES (IFH_LEN * sizeof(u32))
+
+/* Timestamp for frame */
+#define IFH_POS_TIMESTAMP 192
+
+/* Bypass analyzer with a prefilled IFH */
+#define IFH_POS_BYPASS 191
+
+/* Masqueraded injection with masq_port defining logical source port */
+#define IFH_POS_MASQ 190
+
+/* Masqueraded port number for injection */
+#define IFH_POS_MASQ_PORT 186
+
+/* Frame length (*) */
+#define IFH_POS_LEN 178
+
+/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */
+#define IFH_POS_WRDMODE 176
+
+/* Frame has 16 bits rtag removed compared to line data */
+#define IFH_POS_RTAG48 175
+
+/* Frame has a redundancy tag */
+#define IFH_POS_HAS_RED_TAG 174
+
+/* Frame has been cut through forwarded (*) */
+#define IFH_POS_CUTTHRU 173
+
+/* Rewriter command */
+#define IFH_POS_REW_CMD 163
+
+/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */
+#define IFH_POS_REW_OAM 162
+
+/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN,
+ * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM).
+ */
+#define IFH_POS_PDU_TYPE 158
+
+/* Update FCS before transmission */
+#define IFH_POS_FCS_UPD 157
+
+/* Classified DSCP value of frame */
+#define IFH_POS_DSCP 151
+
+/* Yellow indication */
+#define IFH_POS_DP 150
+
+/* Process in RTE/inbound */
+#define IFH_POS_RTE_INB_UPDATE 149
+
+/* Number of tags to pop from frame */
+#define IFH_POS_POP_CNT 147
+
+/* Number of tags in front of the ethertype */
+#define IFH_POS_ETYPE_OFS 145
+
+/* Logical source port of frame (*) */
+#define IFH_POS_SRCPORT 141
+
+/* Sequence number in redundancy tag */
+#define IFH_POS_SEQ_NUM 120
+
+/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */
+#define IFH_POS_TCI 103
+
+/* Classified internal priority for queuing */
+#define IFH_POS_QOS_CLASS 100
+
+/* Bit mask with eight cpu copy classses */
+#define IFH_POS_CPUQ 92
+
+/* Relearn + learn flags (*) */
+#define IFH_POS_LEARN_FLAGS 90
+
+/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */
+#define IFH_POS_SFLOW_ID 86
+
+/* Set if an ACL/S2 rule was hit (*).
+ * Super priority: acl_hit=0 and acl_hit(4)=1.
+ */
+#define IFH_POS_ACL_HIT 85
+
+/* S2 rule index hit (*) */
+#define IFH_POS_ACL_IDX 79
+
+/* ISDX as classified by S1 */
+#define IFH_POS_ISDX 71
+
+/* Destination ports for frame */
+#define IFH_POS_DSTS 62
+
+/* Storm policer to be applied: None/Uni/Multi/Broad (*) */
+#define IFH_POS_FLOOD 60
+
+/* Redundancy tag operation */
+#define IFH_POS_SEQ_OP 58
+
+/* Classified internal priority for resourcemgt, tagging etc */
+#define IFH_POS_IPV 55
+
+/* Frame is for AFI use */
+#define IFH_POS_AFI 54
+
+/* Internal aging value (*) */
+#define IFH_POS_AGED 52
+
+/* RTP Identifier */
+#define IFH_POS_RTP_ID 42
+
+/* RTP MRPD flow */
+#define IFH_POS_RTP_SUBID 41
+
+/* Profinet DataStatus or opcua GroupVersion MSB */
+#define IFH_POS_PN_DATA_STATUS 33
+
+/* Profinet transfer status (1 iff the status is 0) */
+#define IFH_POS_PN_TRANSF_STATUS_ZERO 32
+
+/* Profinet cycle counter or opcua NetworkMessageNumber */
+#define IFH_POS_PN_CC 16
+
+#define IFH_WID_TIMESTAMP 32
+#define IFH_WID_BYPASS 1
+#define IFH_WID_MASQ 1
+#define IFH_WID_MASQ_PORT 4
+#define IFH_WID_LEN 14
+#define IFH_WID_WRDMODE 2
+#define IFH_WID_RTAG48 1
+#define IFH_WID_HAS_RED_TAG 1
+#define IFH_WID_CUTTHRU 1
+#define IFH_WID_REW_CMD 10
+#define IFH_WID_REW_OAM 1
+#define IFH_WID_PDU_TYPE 4
+#define IFH_WID_FCS_UPD 1
+#define IFH_WID_DSCP 6
+#define IFH_WID_DP 1
+#define IFH_WID_RTE_INB_UPDATE 1
+#define IFH_WID_POP_CNT 2
+#define IFH_WID_ETYPE_OFS 2
+#define IFH_WID_SRCPORT 4
+#define IFH_WID_SEQ_NUM 16
+#define IFH_WID_TCI 17
+#define IFH_WID_QOS_CLASS 3
+#define IFH_WID_CPUQ 8
+#define IFH_WID_LEARN_FLAGS 2
+#define IFH_WID_SFLOW_ID 4
+#define IFH_WID_ACL_HIT 1
+#define IFH_WID_ACL_IDX 6
+#define IFH_WID_ISDX 8
+#define IFH_WID_DSTS 9
+#define IFH_WID_FLOOD 2
+#define IFH_WID_SEQ_OP 2
+#define IFH_WID_IPV 3
+#define IFH_WID_AFI 1
+#define IFH_WID_AGED 2
+#define IFH_WID_RTP_ID 10
+#define IFH_WID_RTP_SUBID 1
+#define IFH_WID_PN_DATA_STATUS 8
+#define IFH_WID_PN_TRANSF_STATUS_ZERO 1
+#define IFH_WID_PN_CC 16
+
+#endif /* __LAN966X_IFH_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
new file mode 100644
index 0000000000..41fa2523d9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+{
+ u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
+ int p, lag, i;
+
+ /* Reset destination and aggregation PGIDS */
+ for (p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(ANA_PGID_PGID_SET(BIT(p)),
+ lan966x, ANA_PGID(p));
+
+ for (p = PGID_AGGR; p < PGID_SRC; ++p)
+ lan_wr(ANA_PGID_PGID_SET(visited),
+ lan966x, ANA_PGID(p));
+
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ visited &= ~BIT(p);
+ }
+
+ /* Now, set PGIDs for each active LAG */
+ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+ struct net_device *bond = lan966x->ports[lag]->bond;
+ int num_active_ports = 0;
+ unsigned long bond_mask;
+ u8 aggr_idx[16];
+
+ if (!bond || (visited & BIT(lag)))
+ continue;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+
+ for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ lan966x, ANA_PGID(p));
+ if (port->lag_tx_active)
+ aggr_idx[num_active_ports++] = p;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; ++i) {
+ u32 ac;
+
+ ac = lan_rd(lan966x, ANA_PGID(i));
+ ac &= ~bond_mask;
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
+ lan_wr(ANA_PGID_PGID_SET(ac),
+ lan966x, ANA_PGID(i));
+ }
+
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (p = lag; p < lan966x->num_phys_ports; p++) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ visited |= BIT(p);
+ }
+ }
+}
+
+static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ u32 bond_mask;
+ u32 lag_id;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ lag_id = port->chip_port;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ }
+}
+
+static void lan966x_lag_update_ids(struct lan966x *lan966x)
+{
+ lan966x_lag_set_port_ids(lan966x);
+ lan966x_update_fwd_mask(lan966x);
+ lan966x_lag_set_aggr_pgids(lan966x);
+}
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ u32 lag_id = -1;
+ u32 bond_mask;
+ int err;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ port->bond = bond;
+ lan966x_lag_update_ids(lan966x);
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto out;
+
+ lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
+
+ if (lan966x_lag_first_port(port->bond, port->dev) &&
+ lag_id != -1)
+ lan966x_mac_lag_replace_port_entry(lan966x,
+ lan966x->ports[lag_id],
+ port);
+
+ return 0;
+
+out:
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+
+ return err;
+}
+
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 bond_mask;
+ u32 lag_id;
+
+ if (lan966x_lag_first_port(port->bond, port->dev)) {
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ bond_mask &= ~BIT(port->chip_port);
+ if (bond_mask) {
+ lag_id = __ffs(bond_mask);
+ lan966x_mac_lag_replace_port_entry(lan966x, port,
+ lan966x->ports[lag_id]);
+ } else {
+ lan966x_mac_lag_remove_port_entry(lan966x, port);
+ }
+ }
+
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+ lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
+}
+
+static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
+ enum netdev_lag_hash hash_type)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ if (port->hash_type != hash_type)
+ return false;
+ }
+
+ return true;
+}
+
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct netdev_lag_upper_info *lui;
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ lui = info->upper_info;
+ if (!lui) {
+ port->hash_type = NETDEV_LAG_HASH_NONE;
+ return NOTIFY_DONE;
+ }
+
+ if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported Tx type");
+ return -EINVAL;
+ }
+
+ if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG devices can have only the same hash_type");
+ return -EINVAL;
+ }
+
+ switch (lui->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L34:
+ lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L23:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported hash type");
+ return -EINVAL;
+ }
+
+ port->hash_type = lui->hash_type;
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag = info->lower_state_info;
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool is_active;
+
+ if (!port->bond)
+ return NOTIFY_DONE;
+
+ is_active = lag->link_up && lag->tx_enabled;
+ if (port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ port->lag_tx_active = is_active;
+ lan966x_lag_set_aggr_pgids(lan966x);
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_prechangeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_changeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long bond_mask;
+
+ if (port->bond != lag)
+ return false;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, lag);
+ if (bond_mask && port->chip_port == __ffs(bond_mask))
+ return true;
+
+ return false;
+}
+
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
+{
+ struct lan966x_port *port;
+ u32 mask = 0;
+ int p;
+
+ if (!bond)
+ return mask;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ mask |= BIT(p);
+ }
+
+ return mask;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
new file mode 100644
index 0000000000..baa3a30c03
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+#include "lan966x_main.h"
+
+#define LAN966X_MAC_COLUMNS 4
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+#define MACACCESS_CMD_SYNC_GET_NEXT 8
+
+#define LAN966X_MAC_INVALID_ROW -1
+
+struct lan966x_mac_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u16 port_index;
+ int row;
+ bool lag;
+};
+
+struct lan966x_mac_raw_entry {
+ u32 mach;
+ u32 macl;
+ u32 maca;
+ bool processed;
+};
+
+static int lan966x_mac_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_MACACCESS);
+}
+
+static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_mac_select(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ lan_wr(macl, lan966x, ANA_MACLDATA);
+ lan_wr(mach, lan966x, ANA_MACHDATA);
+}
+
+static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lockdep_assert_held(&lan966x->mac_lock);
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a write command */
+ lan_wr(ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_CHANGE2SW_SET(0) |
+ ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
+ ANA_MACACCESS_DEST_IDX_SET(pgid) |
+ ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
+/* The mask of the front ports is encoded inside the mac parameter via a call
+ * to lan966x_mdb_encode_mac().
+ */
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
+
+ return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
+}
+
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
+}
+
+static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
+}
+
+static int lan966x_mac_forget_locked(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lockdep_assert_held(&lan966x->mac_lock);
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a forget command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing)
+{
+ lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
+ ANA_AUTOAGE_AGE_PERIOD,
+ lan966x, ANA_AUTOAGE);
+}
+
+void lan966x_mac_init(struct lan966x *lan966x)
+{
+ /* Clear the MAC table */
+ lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ spin_lock_init(&lan966x->mac_lock);
+ INIT_LIST_HEAD(&lan966x->mac_entries);
+}
+
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
+ if (!mac_entry)
+ return NULL;
+
+ memcpy(mac_entry->mac, mac, ETH_ALEN);
+ mac_entry->vid = vid;
+ mac_entry->port_index = port->chip_port;
+ mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ mac_entry->lag = port->bond ? true : false;
+ return mac_entry;
+}
+
+static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *res = NULL;
+ struct lan966x_mac_entry *mac_entry;
+
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac, mac_entry->mac) &&
+ mac_entry->port_index == port_index) {
+ res = mac_entry;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int lan966x_mac_lookup(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a read command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
+ lan966x, ANA_MACACCESS);
+
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ if (ret)
+ return ret;
+
+ return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
+}
+
+static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info = { 0 };
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
+ spin_unlock(&lan966x->mac_lock);
+ return 0;
+ }
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before we
+ * get the interrupt the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ goto mac_learn;
+ }
+
+ mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
+ port->bond ?: port->dev);
+
+mac_learn:
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(addr, mac_entry->mac)) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return 0;
+}
+
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ lan966x_mac_learn_locked(lan966x, dst->chip_port,
+ mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+ mac_entry->port_index = dst->chip_port;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid, ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
+ unsigned char *mac, u32 vid,
+ struct net_device *dev)
+{
+ rtnl_lock();
+ lan966x_fdb_call_notifiers(type, mac, vid, dev);
+ rtnl_unlock();
+}
+
+static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
+ u8 *mac, u16 *vid, u32 *dest_idx)
+{
+ mac[0] = (raw_entry->mach >> 8) & 0xff;
+ mac[1] = (raw_entry->mach >> 0) & 0xff;
+ mac[2] = (raw_entry->macl >> 24) & 0xff;
+ mac[3] = (raw_entry->macl >> 16) & 0xff;
+ mac[4] = (raw_entry->macl >> 8) & 0xff;
+ mac[5] = (raw_entry->macl >> 0) & 0xff;
+
+ *vid = (raw_entry->mach >> 16) & 0xfff;
+ *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
+}
+
+static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ struct lan966x_mac_raw_entry *raw_entries)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ struct list_head mac_deleted_entries;
+ struct lan966x_port *port;
+ u32 dest_idx;
+ u32 column;
+ u16 vid;
+
+ INIT_LIST_HEAD(&mac_deleted_entries);
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
+ bool found = false;
+
+ if (mac_entry->row != row)
+ continue;
+
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row,
+ * so when get one invalid entry it can just skip the
+ * rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
+
+ /* If the entry in SW is found, then there is nothing
+ * to do
+ */
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac_entry->mac, mac) &&
+ mac_entry->port_index == dest_idx) {
+ raw_entries[column].processed = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ list_del(&mac_entry->list);
+ /* Move the entry from SW list to a tmp list such that
+ * it would be deleted later
+ */
+ list_add_tail(&mac_entry->list, &mac_deleted_entries);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW
+ */
+ port = lan966x->ports[mac_entry->port_index];
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ port->bond ?: port->dev);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
+ /* Now go to the list of columns and see if any entry was not in the SW
+ * list, then that means that the entry is new so it needs to notify the
+ * bridge.
+ */
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row, so when
+ * get one invalid entry it can just skip the rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ /* If the entry already exists then don't do anything */
+ if (raw_entries[column].processed)
+ continue;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
+
+ spin_lock(&lan966x->mac_lock);
+ mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ continue;
+ }
+
+ port = lan966x->ports[dest_idx];
+ mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ return;
+ }
+
+ mac_entry->row = row;
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, port->bond ?: port->dev);
+ }
+}
+
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
+{
+ struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
+ u32 index, column;
+ bool stop = true;
+ u32 val;
+
+ /* Start the scan from 0, 0 */
+ lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
+ ANA_MACTINDX_BUCKET_SET(0),
+ lan966x, ANA_MACTINDX);
+
+ while (1) {
+ spin_lock(&lan966x->mac_lock);
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
+ ANA_MACACCESS_MAC_TABLE_CMD,
+ lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ val = lan_rd(lan966x, ANA_MACTINDX);
+ index = ANA_MACTINDX_M_INDEX_GET(val);
+ column = ANA_MACTINDX_BUCKET_GET(val);
+
+ /* The SYNC-GET-NEXT returns all the entries(4) in a row in
+ * which is suffered a change. By change it means that new entry
+ * was added or an entry was removed because of ageing.
+ * It would return all the columns for that row. And after that
+ * it would return the next row The stop conditions of the
+ * SYNC-GET-NEXT is when it reaches 'directly' to row 0
+ * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
+ * then it is required to continue to read more even if it
+ * reaches row 0 and column 3.
+ */
+ if (index == 0 && column == 0)
+ stop = false;
+
+ if (column == LAN966X_MAC_COLUMNS - 1 &&
+ index == 0 && stop) {
+ spin_unlock(&lan966x->mac_lock);
+ break;
+ }
+
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+ spin_unlock(&lan966x->mac_lock);
+
+ /* Once all the columns are read process them */
+ if (column == LAN966X_MAC_COLUMNS - 1) {
+ lan966x_mac_irq_process(lan966x, index, entry);
+ /* A row was processed so it is safe to assume that the
+ * next row/column can be the stop condition
+ */
+ stop = true;
+ }
+ }
+
+ lan_rmw(ANA_ANAINTR_INTR_SET(0),
+ ANA_ANAINTR_INTR,
+ lan966x, ANA_ANAINTR);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
new file mode 100644
index 0000000000..0d6e79af24
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -0,0 +1,1324 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <net/addrconf.h>
+
+#include "lan966x_main.h"
+
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define IO_RANGES 2
+
+static const struct of_device_id lan966x_match[] = {
+ { .compatible = "microchip,lan966x-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_match);
+
+struct lan966x_main_io_resource {
+ enum lan966x_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
+ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */
+ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
+ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */
+ { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
+ { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
+ { TARGET_VCAP, 0x18000, 1 }, /* 0xe2018000 */
+ { TARGET_VCAP + 1, 0x20000, 1 }, /* 0xe2020000 */
+ { TARGET_VCAP + 2, 0x24000, 1 }, /* 0xe2024000 */
+ { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
+ { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
+ { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
+ { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */
+ { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */
+ { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */
+ { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */
+ { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */
+ { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */
+ { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */
+ { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */
+ { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */
+};
+
+static int lan966x_create_targets(struct platform_device *pdev,
+ struct lan966x *lan966x)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int idx;
+
+ /* Initially map the entire range and after that update each target to
+ * point inside the region at the correct offset. It is possible that
+ * other devices access the same region so don't add any checks about
+ * this.
+ */
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+
+ begin[idx] = devm_ioremap(&pdev->dev,
+ iores[idx]->start,
+ resource_size(iores[idx]));
+ if (!begin[idx]) {
+ dev_err(&pdev->dev, "Unable to get registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
+ const struct lan966x_main_io_resource *iomap =
+ &lan966x_main_iomap[idx];
+
+ lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+
+ return 0;
+}
+
+static bool lan966x_port_unique_address(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port || port->dev == dev)
+ continue;
+
+ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
+ return false;
+ }
+
+ return true;
+}
+
+static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct sockaddr *addr = p;
+ int ret;
+
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
+ /* If there is another port with the same address as the dev, then don't
+ * delete it from the MAC table
+ */
+ if (!lan966x_port_unique_address(dev))
+ goto out;
+
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
+out:
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+}
+
+static int lan966x_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_port_open(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
+ ANA_PORT_CFG_RECV_ENA_SET(1) |
+ ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
+ ANA_PORT_CFG_LEARNAUTO |
+ ANA_PORT_CFG_RECV_ENA |
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ phylink_start(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_stop(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ lan966x_port_config_down(port);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_inj_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QS_INJ_STATUS);
+}
+
+static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
+ return 0;
+
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+}
+
+static int lan966x_port_ifh_xmit(struct sk_buff *skb,
+ __be32 *ifh,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, count, last;
+ u8 grp = 0;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
+ (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
+ goto err;
+
+ /* Write start of frame */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_SOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Write IFH header */
+ for (i = 0; i < IFH_LEN; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ goto err;
+
+ lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Write frame */
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ goto err;
+
+ lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ goto err;
+
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ ++i;
+ }
+
+ /* Inidcate EOF and valid bytes in the last word */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
+ 0 : last) |
+ QS_INJ_CTRL_EOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return NETDEV_TX_OK;
+
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+
+err:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ return NETDEV_TX_BUSY;
+}
+
+static void lan966x_ifh_set(u8 *ifh, size_t val, size_t pos, size_t length)
+{
+ int i = 0;
+
+ do {
+ u8 p = IFH_LEN_BYTES - (pos + i) / 8 - 1;
+ u8 v = val >> i & 0xff;
+
+ /* There is no need to check for limits of the array, as these
+ * will never be written
+ */
+ ifh[p] |= v << ((pos + i) % 8);
+ ifh[p - 1] |= v >> (8 - (pos + i) % 8);
+
+ i += 8;
+ } while (i < length);
+}
+
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+{
+ lan966x_ifh_set(ifh, bypass, IFH_POS_BYPASS, IFH_WID_BYPASS);
+}
+
+void lan966x_ifh_set_port(void *ifh, u64 port)
+{
+ lan966x_ifh_set(ifh, port, IFH_POS_DSTS, IFH_WID_DSTS);
+}
+
+static void lan966x_ifh_set_qos_class(void *ifh, u64 qos)
+{
+ lan966x_ifh_set(ifh, qos, IFH_POS_QOS_CLASS, IFH_WID_QOS_CLASS);
+}
+
+static void lan966x_ifh_set_ipv(void *ifh, u64 ipv)
+{
+ lan966x_ifh_set(ifh, ipv, IFH_POS_IPV, IFH_WID_IPV);
+}
+
+static void lan966x_ifh_set_vid(void *ifh, u64 vid)
+{
+ lan966x_ifh_set(ifh, vid, IFH_POS_TCI, IFH_WID_TCI);
+}
+
+static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
+{
+ lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
+}
+
+static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
+{
+ lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
+}
+
+static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ __be32 ifh[IFH_LEN];
+ int err;
+
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+ lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+
+ if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ err = lan966x_ptp_txtstamp_request(port, skb);
+ if (err)
+ return err;
+
+ lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
+ }
+
+ spin_lock(&lan966x->tx_lock);
+ if (port->lan966x->fdma)
+ err = lan966x_fdma_xmit(skb, ifh, dev);
+ else
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ spin_unlock(&lan966x->tx_lock);
+
+ return err;
+}
+
+static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int old_mtu = dev->mtu;
+ int err;
+
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = new_mtu;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ err = lan966x_fdma_change_mtu(lan966x);
+ if (err) {
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = old_mtu;
+ }
+
+ return err;
+}
+
+static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
+}
+
+static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
+}
+
+static void lan966x_port_set_rx_mode(struct net_device *dev)
+{
+ __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
+}
+
+static int lan966x_port_get_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ ppid->id_len = sizeof(lan966x->base_mac);
+ memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static int lan966x_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (!port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
+ lan966x_ptp_hwtstamp_get(port, cfg);
+
+ return 0;
+}
+
+static int lan966x_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV &&
+ cfg->source != HWTSTAMP_SOURCE_PHYLIB)
+ return -EOPNOTSUPP;
+
+ err = lan966x_ptp_setup_traps(port, cfg);
+ if (err)
+ return err;
+
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
+ if (!port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
+ err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
+ if (err) {
+ lan966x_ptp_del_traps(port);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops lan966x_port_netdev_ops = {
+ .ndo_open = lan966x_port_open,
+ .ndo_stop = lan966x_port_stop,
+ .ndo_start_xmit = lan966x_port_xmit,
+ .ndo_change_mtu = lan966x_port_change_mtu,
+ .ndo_set_rx_mode = lan966x_port_set_rx_mode,
+ .ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
+ .ndo_get_stats64 = lan966x_stats_get,
+ .ndo_set_mac_address = lan966x_port_set_mac_address,
+ .ndo_get_port_parent_id = lan966x_port_get_parent_id,
+ .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_setup_tc = lan966x_tc_setup,
+ .ndo_bpf = lan966x_xdp,
+ .ndo_xdp_xmit = lan966x_xdp_xmit,
+ .ndo_hwtstamp_get = lan966x_port_hwtstamp_get,
+ .ndo_hwtstamp_set = lan966x_port_hwtstamp_set,
+};
+
+bool lan966x_netdevice_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &lan966x_port_netdev_ops;
+}
+
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
+{
+ u32 val;
+
+ /* The IGMP and MLD frames are not forward by the HW if
+ * multicast snooping is enabled, therefor don't mark as
+ * offload to allow the SW to forward the frames accordingly.
+ */
+ val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
+ if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
+ return true;
+
+ if (eth_type_vlan(skb->protocol)) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_IGMP)
+ return false;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
+ !ipv6_mc_check_mld(skb))
+ return false;
+
+ return true;
+}
+
+static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
+{
+ return lan_rd(lan966x, QS_XTR_RD(grp));
+}
+
+static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return read_poll_timeout(lan966x_port_xtr_status, val,
+ val != XTR_NOT_READY,
+ READL_SLEEP_US, READL_TIMEOUT_US, false,
+ lan966x, grp);
+}
+
+static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
+{
+ u32 bytes_valid;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_NOT_READY) {
+ err = lan966x_port_xtr_ready(lan966x, grp);
+ if (err)
+ return -EIO;
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_ESCAPE)
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static u64 lan966x_ifh_get(u8 *ifh, size_t pos, size_t length)
+{
+ u64 val = 0;
+ u8 v;
+
+ for (int i = 0; i < length ; i++) {
+ int j = pos + i;
+ int k = j % 8;
+
+ if (i == 0 || k == 0)
+ v = ifh[IFH_LEN_BYTES - (j / 8) - 1];
+
+ if (v & (1 << k))
+ val |= (1ULL << i);
+ }
+
+ return val;
+}
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+{
+ *src_port = lan966x_ifh_get(ifh, IFH_POS_SRCPORT, IFH_WID_SRCPORT);
+}
+
+static void lan966x_ifh_get_len(void *ifh, u64 *len)
+{
+ *len = lan966x_ifh_get(ifh, IFH_POS_LEN, IFH_WID_LEN);
+}
+
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+{
+ *timestamp = lan966x_ifh_get(ifh, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
+}
+
+static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ int i, grp = 0, err = 0;
+
+ if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ u64 src_port, len, timestamp;
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int sz = 0, buf_len;
+ u32 ifh[IFH_LEN];
+ u32 *buf;
+ u32 val;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
+ if (err != 4)
+ goto recover;
+ }
+
+ err = 0;
+
+ lan966x_ifh_get_src_port(ifh, &src_port);
+ lan966x_ifh_get_len(ifh, &len);
+ lan966x_ifh_get_timestamp(ifh, &timestamp);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ dev = lan966x->ports[src_port]->dev;
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+
+recover:
+ if (sz < 0 || err)
+ lan_rd(lan966x, QS_XTR_RD(grp));
+
+ } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+
+ return lan966x_mac_irq_handler(lan966x);
+}
+
+static void lan966x_cleanup_ports(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->dev)
+ unregister_netdev(port->dev);
+
+ lan966x_xdp_port_deinit(port);
+ if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
+ lan966x_fdma_netdev_deinit(lan966x, port->dev);
+
+ if (port->phylink) {
+ rtnl_lock();
+ lan966x_port_stop(port->dev);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+
+ if (port->fwnode)
+ fwnode_handle_put(port->fwnode);
+ }
+
+ disable_irq(lan966x->xtr_irq);
+ lan966x->xtr_irq = -ENXIO;
+
+ if (lan966x->ana_irq > 0) {
+ disable_irq(lan966x->ana_irq);
+ lan966x->ana_irq = -ENXIO;
+ }
+
+ if (lan966x->fdma)
+ devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
+
+ if (lan966x->ptp_irq > 0)
+ devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
+
+ if (lan966x->ptp_ext_irq > 0)
+ devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
+}
+
+static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
+ phy_interface_t phy_mode,
+ struct fwnode_handle *portnp)
+{
+ struct lan966x_port *port;
+ struct phylink *phylink;
+ struct net_device *dev;
+ int err;
+
+ if (p >= lan966x->num_phys_ports)
+ return -EINVAL;
+
+ dev = devm_alloc_etherdev_mqs(lan966x->dev,
+ sizeof(struct lan966x_port),
+ NUM_PRIO_QUEUES, 1);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, lan966x->dev);
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->lan966x = lan966x;
+ port->chip_port = p;
+ lan966x->ports[p] = port;
+
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->netdev_ops = &lan966x_port_netdev_ops;
+ dev->ethtool_ops = &lan966x_ethtool_ops;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
+ dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS;
+ dev->needed_headroom = IFH_LEN_BYTES;
+
+ eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
+
+ lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
+ ENTRYTYPE_LOCKED);
+
+ port->phylink_config.dev = &port->dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_pcs.poll = true;
+ port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
+ port->phylink_pcs.neg_mode = true;
+
+ port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QUSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->phylink_config,
+ portnp,
+ phy_mode,
+ &lan966x_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ port->dev = NULL;
+ return PTR_ERR(phylink);
+ }
+
+ port->phylink = phylink;
+
+ if (lan966x->fdma)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(lan966x->dev, "register_netdev failed\n");
+ return err;
+ }
+
+ lan966x_vlan_port_set_vlan_aware(port, 0);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+
+ return 0;
+}
+
+static void lan966x_init(struct lan966x *lan966x)
+{
+ u32 p, i;
+
+ /* MAC table initialization */
+ lan966x_mac_init(lan966x);
+
+ lan966x_vlan_init(lan966x);
+
+ /* Flush queues */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
+ GENMASK(1, 0),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
+ ~(GENMASK(1, 0)),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Set MAC age time to default value, the entry is aged after
+ * 2 * AGE_PERIOD
+ */
+ lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ lan966x, ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
+ ANA_ADVLEARN_VLAN_CHK,
+ lan966x, ANA_ADVLEARN);
+
+ /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
+ lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
+ (20000000 / 65),
+ lan966x, SYS_FRM_AGING);
+
+ /* Map the 8 CPU extraction queues to CPU port */
+ lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
+
+ /* Do byte-swap and expect status after last data word
+ * Extraction: Mode: manual extraction) | Byte_swap
+ */
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_XTR_GRP_CFG(0));
+
+ /* Injection: Mode: manual injection | Byte_swap */
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_INJ_GRP_CFG(0));
+
+ lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
+ QS_INJ_CTRL_GAP_SIZE,
+ lan966x, QS_INJ_CTRL(0));
+
+ /* Enable IFH insertion/parsing on CPU ports */
+ lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
+ SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
+ lan966x, SYS_PORT_MODE(CPU_PORT));
+
+ /* Setup flooding PGIDs */
+ lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
+ lan966x, ANA_FLOODING_IPMC);
+
+ /* There are 8 priorities */
+ for (i = 0; i < 8; ++i)
+ lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
+ ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
+ ANA_FLOODING_FLD_MULTICAST |
+ ANA_FLOODING_FLD_UNICAST |
+ ANA_FLOODING_FLD_BROADCAST,
+ lan966x, ANA_FLOODING(i));
+
+ for (i = 0; i < PGID_ENTRIES; ++i)
+ /* Set all the entries to obey VLAN_VLAN */
+ lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
+ ANA_PGID_CFG_OBEY_VLAN,
+ lan966x, ANA_PGID_CFG(i));
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ /* Disable bridging by default */
+ lan_rmw(ANA_PGID_PGID_SET(0x0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(p + PGID_SRC));
+
+ /* Do not forward BPDU frames to the front ports and copy them
+ * to CPU
+ */
+ lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
+ }
+
+ /* Set source buffer size for each priority and each port to 1500 bytes */
+ for (i = 0; i <= QSYS_Q_RSRV; ++i) {
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
+ }
+
+ /* Enable switching to/from cpu port */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Configure and enable the CPU port */
+ lan_rmw(ANA_PGID_PGID_SET(0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(CPU_PORT));
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_CPU));
+
+ /* Multicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MC));
+
+ /* This will be controlled by mrouter ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV4));
+
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV6));
+
+ /* Unicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_UC));
+
+ /* Broadcast to the CPU port and to other ports */
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_BC));
+
+ lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
+ lan966x, REW_PORT_CFG(CPU_PORT));
+
+ lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
+ ANA_ANAINTR_INTR_ENA,
+ lan966x, ANA_ANAINTR);
+
+ spin_lock_init(&lan966x->tx_lock);
+
+ lan966x_taprio_init(lan966x);
+}
+
+static int lan966x_ram_init(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, SYS_RAM_INIT);
+}
+
+static int lan966x_reset_switch(struct lan966x *lan966x)
+{
+ struct reset_control *switch_reset;
+ int val = 0;
+ int ret;
+
+ switch_reset = devm_reset_control_get_optional_shared(lan966x->dev,
+ "switch");
+ if (IS_ERR(switch_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
+ "Could not obtain switch reset");
+
+ reset_control_reset(switch_reset);
+
+ /* Don't reinitialize the switch core, if it is already initialized. In
+ * case it is initialized twice, some pointers inside the queue system
+ * in HW will get corrupted and then after a while the queue system gets
+ * full and no traffic is passing through the switch. The issue is seen
+ * when loading and unloading the driver and sending traffic through the
+ * switch.
+ */
+ if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
+ return 0;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
+ lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
+ ret = readx_poll_timeout(lan966x_ram_init, lan966x,
+ val, (val & BIT(1)) == 0, READL_SLEEP_US,
+ READL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
+
+ return 0;
+}
+
+static int lan966x_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *ports, *portnp;
+ struct lan966x *lan966x;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
+ if (!lan966x)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lan966x);
+ lan966x->dev = &pdev->dev;
+
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
+ if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ ether_addr_copy(lan966x->base_mac, mac_addr);
+ } else {
+ pr_info("MAC addr was not set, use random MAC\n");
+ eth_random_addr(lan966x->base_mac);
+ lan966x->base_mac[5] &= 0xf0;
+ }
+
+ err = lan966x_create_targets(pdev, lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Failed to create targets");
+
+ err = lan966x_reset_switch(lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Reset failed");
+
+ lan966x->num_phys_ports = NUM_PHYS_PORTS;
+ lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
+ sizeof(struct lan966x_port *),
+ GFP_KERNEL);
+ if (!lan966x->ports)
+ return -ENOMEM;
+
+ /* There QS system has 32KB of memory */
+ lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
+
+ /* set irq */
+ lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
+ if (lan966x->xtr_irq < 0)
+ return lan966x->xtr_irq;
+
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
+ lan966x_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", lan966x);
+ if (err) {
+ pr_err("Unable to use xtr irq");
+ return -ENODEV;
+ }
+
+ lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
+ if (lan966x->ana_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
+ lan966x_ana_irq_handler, IRQF_ONESHOT,
+ "ana irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
+ }
+
+ lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
+ if (lan966x->ptp_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
+ lan966x_ptp_irq_handler, IRQF_ONESHOT,
+ "ptp irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
+
+ lan966x->ptp = 1;
+ }
+
+ lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
+ if (lan966x->fdma_irq > 0) {
+ err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
+ lan966x_fdma_irq_handler, 0,
+ "fdma irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
+
+ lan966x->fdma = true;
+ }
+
+ if (lan966x->ptp) {
+ lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
+ if (lan966x->ptp_ext_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ lan966x->ptp_ext_irq, NULL,
+ lan966x_ptp_ext_irq_handler,
+ IRQF_ONESHOT,
+ "ptp-ext irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Unable to use ptp-ext irq");
+ }
+ }
+
+ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "no ethernet-ports child found\n");
+
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+
+ /* go over the child nodes */
+ fwnode_for_each_available_child_node(ports, portnp) {
+ phy_interface_t phy_mode;
+ struct phy *serdes;
+ u32 p;
+
+ if (fwnode_property_read_u32(portnp, "reg", &p))
+ continue;
+
+ phy_mode = fwnode_get_phy_mode(portnp);
+ err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
+ if (err)
+ goto cleanup_ports;
+
+ /* Read needed configuration */
+ lan966x->ports[p]->config.portmode = phy_mode;
+ lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
+
+ serdes = devm_of_phy_optional_get(lan966x->dev,
+ to_of_node(portnp), NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ goto cleanup_ports;
+ }
+ lan966x->ports[p]->serdes = serdes;
+
+ lan966x_port_init(lan966x->ports[p]);
+ err = lan966x_xdp_port_init(lan966x->ports[p]);
+ if (err)
+ goto cleanup_ports;
+ }
+
+ fwnode_handle_put(ports);
+
+ lan966x_mdb_init(lan966x);
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+ goto cleanup_ports;
+
+ err = lan966x_ptp_init(lan966x);
+ if (err)
+ goto cleanup_fdb;
+
+ err = lan966x_fdma_init(lan966x);
+ if (err)
+ goto cleanup_ptp;
+
+ err = lan966x_vcap_init(lan966x);
+ if (err)
+ goto cleanup_fdma;
+
+ lan966x_dcb_init(lan966x);
+
+ return 0;
+
+cleanup_fdma:
+ lan966x_fdma_deinit(lan966x);
+
+cleanup_ptp:
+ lan966x_ptp_deinit(lan966x);
+
+cleanup_fdb:
+ lan966x_fdb_deinit(lan966x);
+
+cleanup_ports:
+ fwnode_handle_put(ports);
+ fwnode_handle_put(portnp);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ return err;
+}
+
+static int lan966x_remove(struct platform_device *pdev)
+{
+ struct lan966x *lan966x = platform_get_drvdata(pdev);
+
+ lan966x_taprio_deinit(lan966x);
+ lan966x_vcap_deinit(lan966x);
+ lan966x_fdma_deinit(lan966x);
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ lan966x_mac_purge_entries(lan966x);
+ lan966x_mdb_deinit(lan966x);
+ lan966x_fdb_deinit(lan966x);
+ lan966x_ptp_deinit(lan966x);
+
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
+ return 0;
+}
+
+static struct platform_driver lan966x_driver = {
+ .probe = lan966x_probe,
+ .remove = lan966x_remove,
+ .driver = {
+ .name = "lan966x-switch",
+ .of_match_table = lan966x_match,
+ },
+};
+
+static int __init lan966x_switch_driver_init(void)
+{
+ int ret;
+
+ lan966x_register_notifier_blocks();
+
+ ret = platform_driver_register(&lan966x_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ lan966x_unregister_notifier_blocks();
+ return ret;
+}
+
+static void __exit lan966x_switch_driver_exit(void)
+{
+ platform_driver_unregister(&lan966x_driver);
+ lan966x_unregister_notifier_blocks();
+}
+
+module_init(lan966x_switch_driver_init);
+module_exit(lan966x_switch_driver_exit);
+
+MODULE_DESCRIPTION("Microchip LAN966X switch driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
new file mode 100644
index 0000000000..caa9e0533c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_MAIN_H__
+#define __LAN966X_MAIN_H__
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/switchdev.h>
+#include <net/xdp.h>
+
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
+#include "lan966x_regs.h"
+#include "lan966x_ifh.h"
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
+#define LAN966X_BUFFER_CELL_SZ 64
+#define LAN966X_BUFFER_MEMORY (160 * 1024)
+#define LAN966X_BUFFER_MIN_SZ 60
+
+#define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN)
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+#define PGID_ENTRIES 89
+
+#define UNAWARE_PVID 0
+#define HOST_PVID 4095
+
+/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
+#define QSYS_Q_RSRV 95
+
+#define NUM_PHYS_PORTS 8
+#define CPU_PORT 8
+#define NUM_PRIO_QUEUES 8
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 6)
+#define PGID_UC (PGID_AGGR - 5)
+#define PGID_BC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+/* Non-reserved PGIDs, used for general purpose */
+#define PGID_GP_START (CPU_PORT + 1)
+#define PGID_GP_END PGID_CPU
+
+#define LAN966X_SPEED_NONE 0
+#define LAN966X_SPEED_2500 1
+#define LAN966X_SPEED_1000 1
+#define LAN966X_SPEED_100 2
+#define LAN966X_SPEED_10 3
+
+#define LAN966X_PHC_COUNT 3
+#define LAN966X_PHC_PORT 0
+#define LAN966X_PHC_PINS_NUM 7
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define FDMA_RX_DCB_MAX_DBS 1
+#define FDMA_TX_DCB_MAX_DBS 1
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+#define FDMA_DCB_MAX 512
+
+#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
+#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+
+#define LAN966X_VCAP_CID_IS1_L0 VCAP_CID_INGRESS_L0 /* IS1 lookup 0 */
+#define LAN966X_VCAP_CID_IS1_L1 VCAP_CID_INGRESS_L1 /* IS1 lookup 1 */
+#define LAN966X_VCAP_CID_IS1_L2 VCAP_CID_INGRESS_L2 /* IS1 lookup 2 */
+#define LAN966X_VCAP_CID_IS1_MAX (VCAP_CID_INGRESS_L3 - 1) /* IS1 Max */
+
+#define LAN966X_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
+#define LAN966X_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
+#define LAN966X_VCAP_CID_IS2_MAX (VCAP_CID_INGRESS_STAGE2_L2 - 1) /* IS2 Max */
+
+#define LAN966X_VCAP_CID_ES0_L0 VCAP_CID_EGRESS_L0 /* ES0 lookup 0 */
+#define LAN966X_VCAP_CID_ES0_MAX (VCAP_CID_EGRESS_L1 - 1) /* ES0 Max */
+
+#define LAN966X_PORT_QOS_PCP_COUNT 8
+#define LAN966X_PORT_QOS_DEI_COUNT 8
+#define LAN966X_PORT_QOS_PCP_DEI_COUNT \
+ (LAN966X_PORT_QOS_PCP_COUNT + LAN966X_PORT_QOS_DEI_COUNT)
+
+#define LAN966X_PORT_QOS_DSCP_COUNT 64
+
+/* Port PCP rewrite mode */
+#define LAN966X_PORT_REW_TAG_CTRL_CLASSIFIED 0
+#define LAN966X_PORT_REW_TAG_CTRL_MAPPED 2
+
+/* Port DSCP rewrite mode */
+#define LAN966X_PORT_REW_DSCP_FRAME 0
+#define LAN966X_PORT_REW_DSCP_ANALIZER 1
+#define LAN966X_PORT_QOS_REWR_DSCP_ALL 3
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACV4,
+ ENTRYTYPE_MACV6,
+};
+
+/* FDMA return action codes for checking if the frame is valid
+ * FDMA_PASS, frame is valid and can be used
+ * FDMA_ERROR, something went wrong, stop getting more frames
+ * FDMA_DROP, frame is dropped, but continue to get more frames
+ * FDMA_TX, frame is given to TX, but continue to get more frames
+ * FDMA_REDIRECT, frame is given to TX, but continue to get more frames
+ */
+enum lan966x_fdma_action {
+ FDMA_PASS = 0,
+ FDMA_ERROR,
+ FDMA_DROP,
+ FDMA_TX,
+ FDMA_REDIRECT,
+};
+
+/* Controls how PORT_MASK is applied */
+enum LAN966X_PORT_MASK_MODE {
+ LAN966X_PMM_NO_ACTION,
+ LAN966X_PMM_REPLACE,
+ LAN966X_PMM_FORWARDING,
+ LAN966X_PMM_REDIRECT,
+};
+
+enum vcap_is2_port_sel_ipv6 {
+ VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
+ VCAP_IS2_PS_IPV6_STD,
+ VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
+ VCAP_IS2_PS_IPV6_MAC_ETYPE,
+};
+
+enum vcap_is1_port_sel_other {
+ VCAP_IS1_PS_OTHER_NORMAL,
+ VCAP_IS1_PS_OTHER_7TUPLE,
+ VCAP_IS1_PS_OTHER_DBL_VID,
+ VCAP_IS1_PS_OTHER_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_ipv4 {
+ VCAP_IS1_PS_IPV4_NORMAL,
+ VCAP_IS1_PS_IPV4_7TUPLE,
+ VCAP_IS1_PS_IPV4_5TUPLE_IP4,
+ VCAP_IS1_PS_IPV4_DBL_VID,
+ VCAP_IS1_PS_IPV4_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_ipv6 {
+ VCAP_IS1_PS_IPV6_NORMAL,
+ VCAP_IS1_PS_IPV6_7TUPLE,
+ VCAP_IS1_PS_IPV6_5TUPLE_IP4,
+ VCAP_IS1_PS_IPV6_NORMAL_IP6,
+ VCAP_IS1_PS_IPV6_5TUPLE_IP6,
+ VCAP_IS1_PS_IPV6_DBL_VID,
+ VCAP_IS1_PS_IPV6_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_rt {
+ VCAP_IS1_PS_RT_NORMAL,
+ VCAP_IS1_PS_RT_7TUPLE,
+ VCAP_IS1_PS_RT_DBL_VID,
+ VCAP_IS1_PS_RT_DMAC_VID,
+ VCAP_IS1_PS_RT_FOLLOW_OTHER = 7,
+};
+
+struct lan966x_port;
+
+struct lan966x_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct lan966x_rx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct lan966x_tx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
+};
+
+struct lan966x_rx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the array of hardware dcbs. */
+ struct lan966x_rx_dcb *dcbs;
+
+ /* Pointer to the last address in the dcbs. */
+ struct lan966x_rx_dcb *last_entry;
+
+ /* For each DB, there is a page */
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+
+ /* Represents the db_index, it can have a value between 0 and
+ * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
+ * it means that the DCB can be reused.
+ */
+ int db_index;
+
+ /* Represents the index in the dcbs. It has a value between 0 and
+ * FDMA_DCB_MAX
+ */
+ int dcb_index;
+
+ /* Represents the dma address to the dcbs array */
+ dma_addr_t dma;
+
+ /* Represents the page order that is used to allocate the pages for the
+ * RX buffers. This value is calculated based on max MTU of the devices.
+ */
+ u8 page_order;
+
+ /* Represents the max size frame that it can receive to the CPU. This
+ * includes the IFH + VLAN tags + frame + skb_shared_info
+ */
+ u32 max_mtu;
+
+ u8 channel_id;
+
+ struct page_pool *page_pool;
+};
+
+struct lan966x_tx_dcb_buf {
+ dma_addr_t dma_addr;
+ struct net_device *dev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ struct page *page;
+ } data;
+ u32 len;
+ u32 used : 1;
+ u32 ptp : 1;
+ u32 use_skb : 1;
+ u32 xdp_ndo : 1;
+};
+
+struct lan966x_tx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the dcb list */
+ struct lan966x_tx_dcb *dcbs;
+ u16 last_in_use;
+
+ /* Represents the DMA address to the first entry of the dcb entries. */
+ dma_addr_t dma;
+
+ /* Array of dcbs that are given to the HW */
+ struct lan966x_tx_dcb_buf *dcbs_buf;
+
+ u8 channel_id;
+
+ bool activated;
+};
+
+struct lan966x_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct lan966x_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
+ struct kernel_hwtstamp_config hwtstamp_config;
+ struct lan966x *lan966x;
+ u8 index;
+};
+
+struct lan966x_skb_cb {
+ u8 rew_op;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10)
+#define LAN966X_SKB_CB(skb) \
+ ((struct lan966x_skb_cb *)((skb)->cb))
+
+struct lan966x {
+ struct device *dev;
+
+ u8 num_phys_ports;
+ struct lan966x_port **ports;
+
+ void __iomem *regs[NUM_TARGETS];
+
+ int shared_queue_sz;
+
+ u8 base_mac[ETH_ALEN];
+
+ spinlock_t tx_lock; /* lock for frame transmition */
+
+ struct net_device *bridge;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct list_head mac_entries;
+ spinlock_t mac_lock; /* lock for mac_entries list */
+
+ u16 vlan_mask[VLAN_N_VID];
+ DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID);
+
+ /* stats */
+ const struct lan966x_stat_layout *stats_layout;
+ u32 num_stats;
+
+ /* workqueue for reading stats */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ /* interrupts */
+ int xtr_irq;
+ int ana_irq;
+ int ptp_irq;
+ int fdma_irq;
+ int ptp_ext_irq;
+
+ /* worqueue for fdb */
+ struct workqueue_struct *fdb_work;
+ struct list_head fdb_entries;
+
+ /* mdb */
+ struct list_head mdb_entries;
+ struct list_head pgid_entries;
+
+ /* ptp */
+ bool ptp;
+ struct lan966x_phc phc[LAN966X_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+
+ /* fdma */
+ bool fdma;
+ struct net_device *fdma_ndev;
+ struct lan966x_rx rx;
+ struct lan966x_tx tx;
+ struct napi_struct napi;
+
+ /* Mirror */
+ struct lan966x_port *mirror_monitor;
+ u32 mirror_mask[2];
+ u32 mirror_count;
+
+ /* vcap */
+ struct vcap_control *vcap_ctrl;
+
+ /* debugfs */
+ struct dentry *debugfs_root;
+};
+
+struct lan966x_port_config {
+ phy_interface_t portmode;
+ const unsigned long *advertising;
+ int speed;
+ int duplex;
+ u32 pause;
+ bool inband;
+ bool autoneg;
+};
+
+struct lan966x_port_tc {
+ bool ingress_shared_block;
+ unsigned long police_id;
+ unsigned long ingress_mirror_id;
+ unsigned long egress_mirror_id;
+ struct flow_stats police_stat;
+ struct flow_stats mirror_stat;
+};
+
+struct lan966x_port_qos_pcp {
+ u8 map[LAN966X_PORT_QOS_PCP_DEI_COUNT];
+ bool enable;
+};
+
+struct lan966x_port_qos_dscp {
+ u8 map[LAN966X_PORT_QOS_DSCP_COUNT];
+ bool enable;
+};
+
+struct lan966x_port_qos_pcp_rewr {
+ u16 map[NUM_PRIO_QUEUES];
+ bool enable;
+};
+
+struct lan966x_port_qos_dscp_rewr {
+ u16 map[LAN966X_PORT_QOS_DSCP_COUNT];
+ bool enable;
+};
+
+struct lan966x_port_qos {
+ struct lan966x_port_qos_pcp pcp;
+ struct lan966x_port_qos_dscp dscp;
+ struct lan966x_port_qos_pcp_rewr pcp_rewr;
+ struct lan966x_port_qos_dscp_rewr dscp_rewr;
+ u8 default_prio;
+};
+
+struct lan966x_port {
+ struct net_device *dev;
+ struct lan966x *lan966x;
+
+ u8 chip_port;
+ u16 pvid;
+ u16 vid;
+ bool vlan_aware;
+
+ bool learn_ena;
+ bool mcast_ena;
+
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+ struct lan966x_port_config config;
+ struct phylink *phylink;
+ struct phy *serdes;
+ struct fwnode_handle *fwnode;
+
+ u8 ptp_tx_cmd;
+ bool ptp_rx_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
+
+ struct net_device *bond;
+ bool lag_tx_active;
+ enum netdev_lag_hash hash_type;
+
+ struct lan966x_port_tc tc;
+
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
+extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
+extern const struct ethtool_ops lan966x_ethtool_ops;
+extern struct notifier_block lan966x_switchdev_nb __read_mostly;
+extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+bool lan966x_netdevice_check(const struct net_device *dev);
+
+void lan966x_register_notifier_blocks(void);
+void lan966x_unregister_notifier_blocks(void);
+
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
+void lan966x_ifh_set_port(void *ifh, u64 bypass);
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+int lan966x_stats_init(struct lan966x *lan966x);
+
+void lan966x_port_config_down(struct lan966x_port *port);
+void lan966x_port_config_up(struct lan966x_port *port);
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state);
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config);
+void lan966x_port_init(struct lan966x_port *port);
+
+void lan966x_port_qos_set(struct lan966x_port *port,
+ struct lan966x_port_qos *qos);
+void lan966x_port_qos_dscp_rewr_mode_set(struct lan966x_port *port,
+ int mode);
+
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
+void lan966x_mac_init(struct lan966x *lan966x);
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing);
+int lan966x_mac_del_entry(struct lan966x *lan966x,
+ const unsigned char *addr,
+ u16 vid);
+int lan966x_mac_add_entry(struct lan966x *lan966x,
+ struct lan966x_port *port,
+ const unsigned char *addr,
+ u16 vid);
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst);
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src);
+void lan966x_mac_purge_entries(struct lan966x *lan966x);
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
+
+void lan966x_vlan_init(struct lan966x *lan966x);
+void lan966x_vlan_port_apply(struct lan966x_port *port);
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware);
+int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid);
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid);
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
+int lan966x_fdb_init(struct lan966x *lan966x);
+void lan966x_fdb_deinit(struct lan966x *lan966x);
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x);
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info);
+
+void lan966x_mdb_init(struct lan966x *lan966x);
+void lan966x_mdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_clear_entries(struct lan966x *lan966x);
+void lan966x_mdb_restore_entries(struct lan966x *lan966x);
+
+int lan966x_ptp_init(struct lan966x *lan966x);
+void lan966x_ptp_deinit(struct lan966x *lan966x);
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg);
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 src_port, u64 timestamp);
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb);
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb);
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+u32 lan966x_ptp_get_period_ps(void);
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int lan966x_ptp_setup_traps(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg);
+int lan966x_ptp_del_traps(struct lan966x_port *port);
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len);
+int lan966x_fdma_change_mtu(struct lan966x *lan966x);
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
+int lan966x_fdma_init(struct lan966x *lan966x);
+void lan966x_fdma_deinit(struct lan966x *lan966x);
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x);
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack);
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond);
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info);
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev);
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond);
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state);
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t);
+void lan966x_update_fwd_mask(struct lan966x *lan966x);
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc);
+int lan966x_mqprio_del(struct lan966x_port *port);
+
+void lan966x_taprio_init(struct lan966x *lan966x);
+void lan966x_taprio_deinit(struct lan966x *lan966x);
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt);
+int lan966x_taprio_del(struct lan966x_port *port);
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed);
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress);
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack);
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats);
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress);
+
+int lan966x_xdp_port_init(struct lan966x_port *port);
+void lan966x_xdp_port_deinit(struct lan966x_port *port);
+int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int lan966x_xdp_run(struct lan966x_port *port,
+ struct page *page,
+ u32 data_len);
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags);
+bool lan966x_xdp_present(struct lan966x *lan966x);
+static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
+{
+ return !!port->xdp_prog;
+}
+
+int lan966x_vcap_init(struct lan966x *lan966x);
+void lan966x_vcap_deinit(struct lan966x *lan966x);
+#if defined(CONFIG_DEBUG_FS)
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+#else
+static inline int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+#endif
+
+int lan966x_tc_flower(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ bool ingress);
+
+int lan966x_goto_port_add(struct lan966x_port *port,
+ int from_cid, int to_cid,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack);
+int lan966x_goto_port_del(struct lan966x_port *port,
+ unsigned long goto_id,
+ struct netlink_ext_ack *extack);
+
+#ifdef CONFIG_LAN966X_DCB
+void lan966x_dcb_init(struct lan966x *lan966x);
+#else
+static inline void lan966x_dcb_init(struct lan966x *lan966x)
+{
+}
+#endif
+
+static inline void __iomem *lan_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_wr(u32 val, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, lan_addr(lan966x->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+#endif /* __LAN966X_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
new file mode 100644
index 0000000000..2af55268bf
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_pgid_entry {
+ struct list_head list;
+ int index;
+ refcount_t refcount;
+ u16 ports;
+};
+
+struct lan966x_mdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct lan966x_pgid_entry *pgid;
+ u8 cpu_copy;
+};
+
+void lan966x_mdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->mdb_entries);
+ INIT_LIST_HEAD(&lan966x->pgid_entries);
+}
+
+static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry, *tmp;
+
+ list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ }
+}
+
+static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x)
+{
+ struct lan966x_pgid_entry *pgid_entry, *tmp;
+
+ list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) {
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+ }
+}
+
+void lan966x_mdb_deinit(struct lan966x *lan966x)
+{
+ lan966x_mdb_purge_mdb_entries(lan966x);
+ lan966x_mdb_purge_pgid_entries(lan966x);
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_get(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (ether_addr_equal(mdb_entry->mac, mac) &&
+ mdb_entry->vid == vid)
+ return mdb_entry;
+ }
+
+ return NULL;
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_add(struct lan966x *lan966x,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(mdb_entry->mac, mdb->addr);
+ mdb_entry->vid = mdb->vid;
+
+ list_add_tail(&mdb_entry->list, &lan966x->mdb_entries);
+
+ return mdb_entry;
+}
+
+static void lan966x_mdb_encode_mac(unsigned char *mac,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ ether_addr_copy(mac, mdb_entry->mac);
+
+ if (type == ENTRYTYPE_MACV4) {
+ mac[0] = 0;
+ mac[1] = mdb_entry->ports >> 8;
+ mac[2] = mdb_entry->ports & 0xff;
+ } else if (type == ENTRYTYPE_MACV6) {
+ mac[0] = mdb_entry->ports >> 8;
+ mac[1] = mdb_entry->ports & 0xff;
+ }
+}
+
+static int lan966x_mdb_ip_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port)
+ mdb_entry->cpu_copy++;
+ else
+ mdb_entry->ports |= BIT(port->chip_port);
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_ip_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports && !mdb_entry->cpu_copy) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+
+ pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL);
+ if (!pgid_entry)
+ return ERR_PTR(-ENOMEM);
+
+ pgid_entry->ports = ports;
+ pgid_entry->index = index;
+ refcount_set(&pgid_entry->refcount, 1);
+
+ list_add_tail(&pgid_entry->list, &lan966x->pgid_entries);
+
+ return pgid_entry;
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_get(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ int index;
+
+ /* Try to find an existing pgid that uses the same ports as the
+ * mdb_entry
+ */
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->ports == mdb_entry->ports) {
+ refcount_inc(&pgid_entry->refcount);
+ return pgid_entry;
+ }
+ }
+
+ /* Try to find an empty pgid entry and allocate one in case it finds it,
+ * otherwise it means that there are no more resources
+ */
+ for (index = PGID_GP_START; index < PGID_GP_END; index++) {
+ bool used = false;
+
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->index == index) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return lan966x_pgid_entry_add(lan966x, index,
+ mdb_entry->ports);
+ }
+
+ return ERR_PTR(-ENOSPC);
+}
+
+static void lan966x_pgid_entry_del(struct lan966x *lan966x,
+ struct lan966x_pgid_entry *pgid_entry)
+{
+ if (!refcount_dec_and_test(&pgid_entry->refcount))
+ return;
+
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+}
+
+static int lan966x_mdb_l2_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port) {
+ mdb_entry->ports |= BIT(CPU_PORT);
+ mdb_entry->cpu_copy++;
+ } else {
+ mdb_entry->ports |= BIT(port->chip_port);
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ mdb_entry->ports &= BIT(CPU_PORT);
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_l2_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+
+ ports &= ~BIT(CPU_PORT);
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static enum macaccess_entry_type
+lan966x_mdb_classify(const unsigned char *mac)
+{
+ if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e)
+ return ENTRYTYPE_MACV4;
+ if (mac[0] == 0x33 && mac[1] == 0x33)
+ return ENTRYTYPE_MACV6;
+ return ENTRYTYPE_LOCKED;
+}
+
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are added for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_add(port, mdb, type);
+
+ return lan966x_mdb_l2_add(port, mdb, type);
+}
+
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are removed for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_del(port, mdb, type);
+
+ return lan966x_mdb_l2_del(port, mdb, type);
+}
+
+static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports |= BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type);
+ }
+}
+
+static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports &= ~BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
+ }
+}
+
+void lan966x_mdb_clear_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ /* Remove just the MAC entry, still keep the PGID in case of L2
+ * entries because this can be restored at later point
+ */
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+}
+
+void lan966x_mdb_restore_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) {
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mac_ip_learn(lan966x, cpu_copy, mac,
+ mdb_entry->vid, type);
+ } else {
+ lan966x_mac_learn(lan966x, mdb_entry->pgid->index,
+ mdb_entry->mac,
+ mdb_entry->vid, type);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
new file mode 100644
index 0000000000..7e1ba3f40c
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_port *monitor_port;
+
+ if (!lan966x_netdevice_check(action->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an lan966x port");
+ return -EOPNOTSUPP;
+ }
+
+ monitor_port = netdev_priv(action->dev);
+
+ if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror already exists");
+ return -EEXIST;
+ }
+
+ if (lan966x->mirror_monitor &&
+ lan966x->mirror_monitor != monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change mirror port while in use");
+ return -EBUSY;
+ }
+
+ if (port == monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mirror the monitor port");
+ return -EINVAL;
+ }
+
+ lan966x->mirror_mask[ingress] |= BIT(port->chip_port);
+
+ lan966x->mirror_monitor = monitor_port;
+ lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count++;
+
+ if (ingress)
+ port->tc.ingress_mirror_id = mirror_id;
+ else
+ port->tc.egress_mirror_id = mirror_id;
+
+ return 0;
+}
+
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "There is no mirroring for this port");
+ return -ENOENT;
+ }
+
+ lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count--;
+
+ if (lan966x->mirror_count == 0) {
+ lan966x->mirror_monitor = NULL;
+ lan_wr(0, lan966x, ANA_MIRRORPORTS);
+ }
+
+ if (ingress)
+ port->tc.ingress_mirror_id = 0;
+ else
+ port->tc.egress_mirror_id = 0;
+
+ return 0;
+}
+
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.mirror_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ if (ingress) {
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(stats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
new file mode 100644
index 0000000000..7fa76e74f9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc)
+{
+ u8 i;
+
+ if (num_tc != NUM_PRIO_QUEUES) {
+ netdev_err(port->dev, "Only %d traffic classes supported\n",
+ NUM_PRIO_QUEUES);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(port->dev, num_tc);
+
+ for (i = 0; i < num_tc; ++i)
+ netdev_set_tc_queue(port->dev, i, 1, i);
+
+ return 0;
+}
+
+int lan966x_mqprio_del(struct lan966x_port *port)
+{
+ netdev_reset_tc(port->dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
new file mode 100644
index 0000000000..1d63903f90
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
+static void lan966x_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static int lan966x_phylink_mac_prepare(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t iface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ phy_interface_t serdes_mode = iface;
+ int err;
+
+ if (port->serdes) {
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
+ serdes_mode);
+ if (err) {
+ netdev_err(to_net_dev(config->dev),
+ "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void lan966x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x_port_config *port_config = &port->config;
+
+ port_config->duplex = duplex;
+ port_config->speed = speed;
+ port_config->pause = 0;
+ port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+
+ if (phy_interface_mode_is_rgmii(interface))
+ phy_set_speed(port->serdes, speed);
+
+ lan966x_port_config_up(port);
+}
+
+static void lan966x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_port_config_down(port);
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
+
+static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct lan966x_port, phylink_pcs);
+}
+
+static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+
+ lan966x_port_status_get(port, state);
+}
+
+static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+ struct lan966x_port_config config;
+ int ret;
+
+ config = port->config;
+ config.portmode = interface;
+ config.inband = neg_mode & PHYLINK_PCS_NEG_INBAND;
+ config.autoneg = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ config.advertising = advertising;
+
+ ret = lan966x_port_pcs_set(port, &config);
+ if (ret)
+ netdev_err(port->dev, "port PCS config failed: %d\n", ret);
+
+ return ret;
+}
+
+static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_mac_ops lan966x_phylink_mac_ops = {
+ .mac_select_pcs = lan966x_phylink_mac_select,
+ .mac_config = lan966x_phylink_mac_config,
+ .mac_prepare = lan966x_phylink_mac_prepare,
+ .mac_link_down = lan966x_phylink_mac_link_down,
+ .mac_link_up = lan966x_phylink_mac_link_up,
+};
+
+const struct phylink_pcs_ops lan966x_phylink_pcs_ops = {
+ .pcs_get_state = lan966x_pcs_get_state,
+ .pcs_config = lan966x_pcs_config,
+ .pcs_an_restart = lan966x_pcs_aneg_restart,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
new file mode 100644
index 0000000000..7302df2300
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+/* 0-8 : 9 port policers */
+#define POL_IDX_PORT 0
+
+/* Policer order: Serial (QoS -> Port -> VCAP) */
+#define POL_ORDER 0x1d3
+
+struct lan966x_tc_policer {
+ /* kilobit per second */
+ u32 rate;
+ /* bytes */
+ u32 burst;
+};
+
+static int lan966x_police_add(struct lan966x_port *port,
+ struct lan966x_tc_policer *pol,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Rate unit is 33 1/3 kpps */
+ pol->rate = DIV_ROUND_UP(pol->rate * 3, 100);
+ /* Avoid zero burst size */
+ pol->burst = pol->burst ?: 1;
+ /* Unit is 4kB */
+ pol->burst = DIV_ROUND_UP(pol->burst, 4096);
+
+ if (pol->rate > GENMASK(15, 0) ||
+ pol->burst > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(1) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static void lan966x_police_del(struct lan966x_port *port, u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(2) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(0),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+}
+
+static int lan966x_police_validate(struct lan966x_port *port,
+ const struct flow_action *action,
+ const struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.ingress_shared_block) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on shared ingress blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct rtnl_link_stats64 new_stats;
+ struct lan966x_tc_policer pol;
+ struct flow_stats *old_stats;
+ int err;
+
+ err = lan966x_police_validate(port, action, act, police_id, ingress,
+ extack);
+ if (err)
+ return err;
+
+ memset(&pol, 0, sizeof(pol));
+
+ pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = act->police.burst;
+
+ err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = police_id;
+
+ /* Setup initial stats */
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+
+ return 0;
+}
+
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid policer id");
+ return -EINVAL;
+ }
+
+ lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = 0;
+
+ return 0;
+}
+
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
new file mode 100644
index 0000000000..92108d3540
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+/* Watermark encode */
+#define MULTIPLIER_BIT BIT(8)
+static u32 lan966x_wm_enc(u32 value)
+{
+ value /= LAN966X_BUFFER_CELL_SZ;
+
+ if (value >= MULTIPLIER_BIT) {
+ value /= 16;
+ if (value >= MULTIPLIER_BIT)
+ value = (MULTIPLIER_BIT - 1);
+
+ value |= MULTIPLIER_BIT;
+ }
+
+ return value;
+}
+
+static void lan966x_port_link_down(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val, delay = 0;
+
+ /* 0.5: Disable any AFI */
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(0),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+
+ /* wait for reg afi_port_frm_out to become 0 for the port */
+ while (true) {
+ val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port));
+ if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("AFI timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ delay = 0;
+
+ /* 1: Reset the PCS Rx clock domain */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1),
+ DEV_CLOCK_CFG_PCS_RX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 2: Disable MAC frame reception */
+ lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_RX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* 3: Disable traffic being sent to or from switch port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 4: Disable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 5: Disable Flowcontrol */
+ lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0),
+ SYS_PAUSE_CFG_PAUSE_ENA,
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* 5.1: Disable PFC */
+ lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0),
+ QSYS_SW_PORT_MODE_TX_PFC_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */
+ usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC);
+
+ /* 7: Disable HDX backpressure */
+ lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0),
+ SYS_FRONT_PORT_MODE_HDX_MODE,
+ lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
+
+ /* 8: Flush the queues accociated with the port */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 9: Enable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 10: Wait until flushing is complete */
+ while (true) {
+ val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port));
+ if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("Flush timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ /* 11: Reset the Port and MAC clock domains */
+ lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_TX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) |
+ DEV_CLOCK_CFG_MAC_RX_RST_SET(1) |
+ DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 12: Clear flushing */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* The port is disabled and flushed, now set up the port in the
+ * new operating mode
+ */
+}
+
+static void lan966x_port_link_up(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+ int speed = 0, mode = 0;
+ int atop_wm = 0;
+
+ switch (config->speed) {
+ case SPEED_10:
+ speed = LAN966X_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = LAN966X_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = LAN966X_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ case SPEED_2500:
+ speed = LAN966X_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ }
+
+ lan966x_taprio_speed_set(port, config->speed);
+
+ /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+ * port speed for QSGMII ports.
+ */
+ if (phy_interface_num_ports(config->portmode) == 4)
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+
+ lan_wr(config->duplex | mode,
+ lan966x, DEV_MAC_MODE_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) |
+ DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) |
+ DEV_MAC_IFG_CFG_RX_IFG2_SET(2),
+ DEV_MAC_IFG_CFG_TX_IFG |
+ DEV_MAC_IFG_CFG_RX_IFG1 |
+ DEV_MAC_IFG_CFG_RX_IFG2,
+ lan966x, DEV_MAC_IFG_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) |
+ DEV_MAC_HDX_CFG_SEED_LOAD_SET(1),
+ DEV_MAC_HDX_CFG_SEED |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ lan966x, DEV_MAC_HDX_CFG(port->chip_port));
+
+ if (config->portmode == PHY_INTERFACE_MODE_GMII) {
+ if (config->speed == SPEED_1000)
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ else
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ }
+
+ /* No PFC */
+ lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed),
+ lan966x, ANA_PFC_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ DEV_PCS1G_CFG_PCS_ENA,
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0),
+ DEV_PCS1G_SD_CFG_SD_ENA,
+ lan966x, DEV_PCS1G_SD_CFG(port->chip_port));
+
+ /* Set Pause WM hysteresis, start/stop are in 1518 byte units */
+ lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) |
+ SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) |
+ SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)),
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port));
+ lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port));
+
+ /* Flow control */
+ lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) |
+ SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0),
+ SYS_MAC_FC_CFG_FC_LINK_SPEED |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG |
+ SYS_MAC_FC_CFG_RX_FC_ENA |
+ SYS_MAC_FC_CFG_TX_FC_ENA,
+ lan966x, SYS_MAC_FC_CFG(port->chip_port));
+
+ /* Tail dropping watermark */
+ atop_wm = lan966x->shared_queue_sz;
+
+ /* The total memory size is diveded by number of front ports plus CPU
+ * port
+ */
+ lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x,
+ SYS_ATOP(port->chip_port));
+ lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG);
+
+ /* This needs to be at the end */
+ /* Enable MAC module */
+ lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV_MAC_ENA_CFG_TX_ENA_SET(1),
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* Take out the clock from reset */
+ lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed),
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* Core: Enable port for frame transfer */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(16),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+}
+
+void lan966x_port_config_down(struct lan966x_port *port)
+{
+ lan966x_port_link_down(port);
+}
+
+void lan966x_port_config_up(struct lan966x_port *port)
+{
+ lan966x_port_link_up(port);
+}
+
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool link_down;
+ u16 bmsr = 0;
+ u16 lp_adv;
+ u32 val;
+
+ val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port));
+ link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val);
+ if (link_down)
+ lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port));
+
+ /* Get both current Link and Sync status */
+ val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port));
+ state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) &&
+ DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val);
+ state->link &= !link_down;
+
+ /* Get PCS ANEG status register */
+ val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port));
+ /* Aneg complete provides more information */
+ if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) {
+ state->an_complete = true;
+
+ bmsr |= state->link ? BMSR_LSTATUS : 0;
+ bmsr |= BMSR_ANEGCOMPLETE;
+
+ lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ } else {
+ if (!state->link)
+ return;
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool inband_aneg = false;
+ bool outband;
+ bool full_preamble = false;
+
+ if (config->portmode == PHY_INTERFACE_MODE_QUSGMII)
+ full_preamble = true;
+
+ if (config->inband) {
+ if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_num_ports(config->portmode) == 4)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ config->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ outband = false;
+ } else {
+ outband = true;
+ }
+
+ /* Disable or enable inband.
+ * For QUSGMII, we rely on the preamble to transmit data such as
+ * timestamps, therefore force full preamble transmission, and prevent
+ * premable shortening
+ */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA,
+ lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
+
+ /* Enable PCS */
+ lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ if (inband_aneg) {
+ int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode,
+ config->advertising);
+ if (adv >= 0)
+ /* Enable in-band aneg */
+ lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) |
+ DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1),
+ lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ } else {
+ lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ }
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) |
+ DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_LINK_SPEED |
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ port->config = *config;
+
+ return 0;
+}
+
+static void lan966x_port_qos_pcp_set(struct lan966x_port *port,
+ struct lan966x_port_qos_pcp *qos)
+{
+ u8 *pcp_itr = qos->map;
+ u8 pcp, dp;
+
+ lan_rmw(ANA_QOS_CFG_QOS_PCP_ENA_SET(qos->enable),
+ ANA_QOS_CFG_QOS_PCP_ENA,
+ port->lan966x, ANA_QOS_CFG(port->chip_port));
+
+ /* Map PCP and DEI to priority */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
+ pcp = *(pcp_itr + i);
+ dp = (i < LAN966X_PORT_QOS_PCP_COUNT) ? 0 : 1;
+
+ lan_rmw(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_SET(pcp) |
+ ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_SET(dp),
+ ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL |
+ ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL,
+ port->lan966x,
+ ANA_PCP_DEI_CFG(port->chip_port, i));
+ }
+}
+
+static void lan966x_port_qos_dscp_set(struct lan966x_port *port,
+ struct lan966x_port_qos_dscp *qos)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Enable/disable dscp for qos classification. */
+ lan_rmw(ANA_QOS_CFG_QOS_DSCP_ENA_SET(qos->enable),
+ ANA_QOS_CFG_QOS_DSCP_ENA,
+ lan966x, ANA_QOS_CFG(port->chip_port));
+
+ /* Map each dscp value to priority and dp */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++)
+ lan_rmw(ANA_DSCP_CFG_DP_DSCP_VAL_SET(0) |
+ ANA_DSCP_CFG_QOS_DSCP_VAL_SET(*(qos->map + i)),
+ ANA_DSCP_CFG_DP_DSCP_VAL |
+ ANA_DSCP_CFG_QOS_DSCP_VAL,
+ lan966x, ANA_DSCP_CFG(i));
+
+ /* Set per-dscp trust */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++)
+ lan_rmw(ANA_DSCP_CFG_DSCP_TRUST_ENA_SET(qos->enable),
+ ANA_DSCP_CFG_DSCP_TRUST_ENA,
+ lan966x, ANA_DSCP_CFG(i));
+}
+
+static int lan966x_port_qos_default_set(struct lan966x_port *port,
+ struct lan966x_port_qos *qos)
+{
+ /* Set default prio and dp level */
+ lan_rmw(ANA_QOS_CFG_DP_DEFAULT_VAL_SET(0) |
+ ANA_QOS_CFG_QOS_DEFAULT_VAL_SET(qos->default_prio),
+ ANA_QOS_CFG_DP_DEFAULT_VAL |
+ ANA_QOS_CFG_QOS_DEFAULT_VAL,
+ port->lan966x, ANA_QOS_CFG(port->chip_port));
+
+ /* Set default pcp and dei for untagged frames */
+ lan_rmw(ANA_VLAN_CFG_VLAN_DEI_SET(0) |
+ ANA_VLAN_CFG_VLAN_PCP_SET(0),
+ ANA_VLAN_CFG_VLAN_DEI |
+ ANA_VLAN_CFG_VLAN_PCP,
+ port->lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ return 0;
+}
+
+static void lan966x_port_qos_pcp_rewr_set(struct lan966x_port *port,
+ struct lan966x_port_qos_pcp_rewr *qos)
+{
+ u8 mode = LAN966X_PORT_REW_TAG_CTRL_CLASSIFIED;
+ u8 pcp, dei;
+
+ if (qos->enable)
+ mode = LAN966X_PORT_REW_TAG_CTRL_MAPPED;
+
+ /* Map the values only if it is enabled otherwise will be the classified
+ * value
+ */
+ lan_rmw(REW_TAG_CFG_TAG_PCP_CFG_SET(mode) |
+ REW_TAG_CFG_TAG_DEI_CFG_SET(mode),
+ REW_TAG_CFG_TAG_PCP_CFG |
+ REW_TAG_CFG_TAG_DEI_CFG,
+ port->lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Map each value to pcp and dei */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
+ pcp = qos->map[i];
+ if (pcp > LAN966X_PORT_QOS_PCP_COUNT)
+ dei = 1;
+ else
+ dei = 0;
+
+ lan_rmw(REW_PCP_DEI_CFG_DEI_QOS_VAL_SET(dei) |
+ REW_PCP_DEI_CFG_PCP_QOS_VAL_SET(pcp),
+ REW_PCP_DEI_CFG_DEI_QOS_VAL |
+ REW_PCP_DEI_CFG_PCP_QOS_VAL,
+ port->lan966x,
+ REW_PCP_DEI_CFG(port->chip_port,
+ i + dei * LAN966X_PORT_QOS_PCP_COUNT));
+ }
+}
+
+static void lan966x_port_qos_dscp_rewr_set(struct lan966x_port *port,
+ struct lan966x_port_qos_dscp_rewr *qos)
+{
+ u16 dscp;
+ u8 mode;
+
+ if (qos->enable)
+ mode = LAN966X_PORT_REW_DSCP_ANALIZER;
+ else
+ mode = LAN966X_PORT_REW_DSCP_FRAME;
+
+ /* Enable the rewrite otherwise will use the values from the frame */
+ lan_rmw(REW_DSCP_CFG_DSCP_REWR_CFG_SET(mode),
+ REW_DSCP_CFG_DSCP_REWR_CFG,
+ port->lan966x, REW_DSCP_CFG(port->chip_port));
+
+ /* Map each classified Qos class and DP to classified DSCP value */
+ for (int i = 0; i < ARRAY_SIZE(qos->map); i++) {
+ dscp = qos->map[i];
+
+ lan_rmw(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_SET(dscp),
+ ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL,
+ port->lan966x, ANA_DSCP_REWR_CFG(i));
+ }
+}
+
+void lan966x_port_qos_dscp_rewr_mode_set(struct lan966x_port *port,
+ int mode)
+{
+ lan_rmw(ANA_QOS_CFG_DSCP_REWR_CFG_SET(mode),
+ ANA_QOS_CFG_DSCP_REWR_CFG,
+ port->lan966x, ANA_QOS_CFG(port->chip_port));
+}
+
+void lan966x_port_qos_set(struct lan966x_port *port,
+ struct lan966x_port_qos *qos)
+{
+ lan966x_port_qos_pcp_set(port, &qos->pcp);
+ lan966x_port_qos_dscp_set(port, &qos->dscp);
+ lan966x_port_qos_default_set(port, qos);
+ lan966x_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
+ lan966x_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
+}
+
+void lan966x_port_init(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_port_config_down(port);
+
+ if (lan966x->fdma)
+ lan966x_fdma_netdev_init(lan966x, port->dev);
+
+ if (phy_interface_num_ports(config->portmode) != 4)
+ return;
+
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0) |
+ DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
new file mode 100644
index 0000000000..63905bb5a6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -0,0 +1,1113 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/ptp_classify.h>
+
+#include "lan966x_main.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+#define LAN966X_MAX_PTP_ID 512
+
+/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849)
+ */
+#define LAN966X_1PPM_FORMAT 3480517749723LL
+
+/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849)
+ */
+#define LAN966X_1PPB_FORMAT 3480517749LL
+
+#define TOD_ACC_PIN 0x7
+
+/* This represents the base rule ID for the PTP rules that are added in the
+ * VCAP to trap frames to CPU. This number needs to be bigger than the maximum
+ * number of entries that can exist in the VCAP.
+ */
+#define LAN966X_VCAP_PTP_RULE_ID 1000000
+#define LAN966X_VCAP_L2_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 0)
+#define LAN966X_VCAP_IPV4_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 1)
+#define LAN966X_VCAP_IPV4_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 2)
+#define LAN966X_VCAP_IPV6_EV_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 3)
+#define LAN966X_VCAP_IPV6_GEN_PTP_TRAP (LAN966X_VCAP_PTP_RULE_ID + 4)
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 lan966x_ptp_get_nominal_value(void)
+{
+ /* This is the default value that for each system clock, the time of day
+ * is increased. It has the format 5.59 nanosecond.
+ */
+ return 0x304d4873ecade305;
+}
+
+static int lan966x_ptp_add_trap(struct lan966x_port *port,
+ int (*add_ptp_key)(struct vcap_rule *vrule,
+ struct lan966x_port*),
+ u32 rule_id,
+ u16 proto)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct vcap_rule *vrule;
+ int err;
+
+ vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+ if (!IS_ERR(vrule)) {
+ u32 value, mask;
+
+ /* Just modify the ingress port mask and exit */
+ vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask);
+ mask &= ~BIT(port->chip_port);
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK,
+ value, mask);
+
+ err = vcap_mod_rule(vrule);
+ goto free_rule;
+ }
+
+ vrule = vcap_alloc_rule(lan966x->vcap_ctrl, port->dev,
+ LAN966X_VCAP_CID_IS2_L0,
+ VCAP_USER_PTP, 0, rule_id);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ err = add_ptp_key(vrule, port);
+ if (err)
+ goto free_rule;
+
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
+ err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
+ err |= vcap_val_rule(vrule, proto);
+ if (err)
+ goto free_rule;
+
+ err = vcap_add_rule(vrule);
+
+free_rule:
+ /* Free the local copy of the rule */
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_ptp_del_trap(struct lan966x_port *port,
+ u32 rule_id)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct vcap_rule *vrule;
+ u32 value, mask;
+ int err;
+
+ vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
+ if (IS_ERR(vrule))
+ return -EEXIST;
+
+ vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask);
+ mask |= BIT(port->chip_port);
+
+ /* No other port requires this trap, so it is safe to remove it */
+ if (mask == GENMASK(lan966x->num_phys_ports, 0)) {
+ err = vcap_del_rule(lan966x->vcap_ctrl, port->dev, rule_id);
+ goto free_rule;
+ }
+
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, value, mask);
+ err = vcap_mod_rule(vrule);
+
+free_rule:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_ptp_add_l2_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ETYPE, ETH_P_1588, ~0);
+}
+
+static int lan966x_ptp_add_ip_event_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_EV_PORT, ~0) ||
+ vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
+}
+
+static int lan966x_ptp_add_ip_general_key(struct vcap_rule *vrule,
+ struct lan966x_port *port)
+{
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_L4_DPORT, PTP_GEN_PORT, ~0) ||
+ vcap_rule_add_key_bit(vrule, VCAP_KF_TCP_IS, VCAP_BIT_0);
+}
+
+static int lan966x_ptp_add_l2_rule(struct lan966x_port *port)
+{
+ return lan966x_ptp_add_trap(port, lan966x_ptp_add_l2_key,
+ LAN966X_VCAP_L2_PTP_TRAP, ETH_P_ALL);
+}
+
+static int lan966x_ptp_add_ipv4_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
+ LAN966X_VCAP_IPV4_EV_PTP_TRAP, ETH_P_IP);
+ if (err)
+ return err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
+ LAN966X_VCAP_IPV4_GEN_PTP_TRAP, ETH_P_IP);
+ if (err)
+ lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_add_ipv6_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_event_key,
+ LAN966X_VCAP_IPV6_EV_PTP_TRAP, ETH_P_IPV6);
+ if (err)
+ return err;
+
+ err = lan966x_ptp_add_trap(port, lan966x_ptp_add_ip_general_key,
+ LAN966X_VCAP_IPV6_GEN_PTP_TRAP, ETH_P_IPV6);
+ if (err)
+ lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_del_l2_rule(struct lan966x_port *port)
+{
+ return lan966x_ptp_del_trap(port, LAN966X_VCAP_L2_PTP_TRAP);
+}
+
+static int lan966x_ptp_del_ipv4_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_EV_PTP_TRAP);
+ err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV4_GEN_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_del_ipv6_rules(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_EV_PTP_TRAP);
+ err |= lan966x_ptp_del_trap(port, LAN966X_VCAP_IPV6_GEN_PTP_TRAP);
+
+ return err;
+}
+
+static int lan966x_ptp_add_traps(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_add_l2_rule(port);
+ if (err)
+ goto err_l2;
+
+ err = lan966x_ptp_add_ipv4_rules(port);
+ if (err)
+ goto err_ipv4;
+
+ err = lan966x_ptp_add_ipv6_rules(port);
+ if (err)
+ goto err_ipv6;
+
+ return err;
+
+err_ipv6:
+ lan966x_ptp_del_ipv4_rules(port);
+err_ipv4:
+ lan966x_ptp_del_l2_rule(port);
+err_l2:
+ return err;
+}
+
+int lan966x_ptp_del_traps(struct lan966x_port *port)
+{
+ int err;
+
+ err = lan966x_ptp_del_l2_rule(port);
+ err |= lan966x_ptp_del_ipv4_rules(port);
+ err |= lan966x_ptp_del_ipv6_rules(port);
+
+ return err;
+}
+
+int lan966x_ptp_setup_traps(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg)
+{
+ if (cfg->rx_filter == HWTSTAMP_FILTER_NONE)
+ return lan966x_ptp_del_traps(port);
+ else
+ return lan966x_ptp_add_traps(port);
+}
+
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_tx_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_tx_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_tx_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->ptp_rx_cmd = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->ptp_rx_cmd = true;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&lan966x->ptp_lock);
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ phc->hwtstamp_config = *cfg;
+ mutex_unlock(&lan966x->ptp_lock);
+
+ return 0;
+}
+
+void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ *cfg = phc->hwtstamp_config;
+}
+
+static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
+ return IFH_REW_OP_NOOP;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return IFH_REW_OP_NOOP;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return IFH_REW_OP_NOOP;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ return IFH_REW_OP_TWO_STEP_PTP;
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0)
+ return IFH_REW_OP_ONE_STEP_PTP;
+
+ return IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+ u8 rew_op;
+
+ rew_op = lan966x_ptp_classify(port, skb);
+ LAN966X_SKB_CB(skb)->rew_op = rew_op;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ lan966x_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ LAN966X_SKB_CB(skb)->ts_id = port->ts_id;
+ LAN966X_SKB_CB(skb)->jiffies = jiffies;
+
+ lan966x->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == LAN966X_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ lan966x->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+}
+
+static void lan966x_get_hwtimestamp(struct lan966x *lan966x,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+}
+
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = LAN966X_MAX_PTP_ID;
+ struct lan966x *lan966x = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct lan966x_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = lan966x->ports[txport];
+
+ /* Retrieve the delay */
+ delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+ delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (LAN966X_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ lan966x->ptp_skbs--;
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ /* Get the h/w timestamp */
+ lan966x_get_hwtimestamp(lan966x, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ struct lan966x_phc *phc;
+ unsigned long flags;
+ u64 time = 0;
+ time64_t s;
+ int pin, i;
+ s64 ns;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR)))
+ return IRQ_NONE;
+
+ /* Go through all domains and see which pin generated the interrupt */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ struct ptp_clock_event ptp_event = {0};
+
+ phc = &lan966x->phc[i];
+ pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
+ if (pin == -1)
+ continue;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Enable to get the new interrupt.
+ * By writing 1 it clears the bit
+ */
+ lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
+
+ /* Get current time */
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+ time = ktime_set(s, ns);
+
+ ptp_event.index = pin;
+ ptp_event.timestamp = time;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(phc->clock, &ptp_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = lan966x_ptp_get_nominal_value();
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16);
+ ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ lan_wr((u32)(tod_inc >> 32), lan966x,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ lan_wr(lower_32_bits(ts->tv_sec),
+ lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta),
+ lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using lan966x_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ lan966x_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ lan966x_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct ptp_clock_info *info;
+ int i;
+
+ /* Currently support only 1 channel */
+ if (chan != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -1;
+ }
+
+ /* The PTP pins are shared by all the PHC. So it is required to see if
+ * the pin is connected to another PHC. The pin is connected to another
+ * PHC if that pin already has a function on that PHC.
+ */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ info = &lan966x->phc[i].info;
+
+ /* Ignore the check with ourself */
+ if (ptp == info)
+ continue;
+
+ if (info->pin_config[pin].func == PTP_PF_PEROUT ||
+ info->pin_config[pin].func == PTP_PF_EXTTS)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct timespec64 ts_phase, ts_period;
+ unsigned long flags;
+ s64 wf_high, wf_low;
+ bool pps = false;
+ int pin;
+
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ if (!on) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ if (rq->perout.period.sec == 1 &&
+ rq->perout.period.nsec == 0)
+ pps = true;
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(lan966x->dev,
+ "Absolute time not supported!\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ wf_high = 5000;
+ }
+
+ if (pps) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(3),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ int pin;
+ u32 val;
+
+ if (lan966x->ptp_ext_irq <= 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SELECT_SET(pin),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_SYNC |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SELECT,
+ lan966x, PTP_PIN_CFG(pin));
+
+ val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
+ if (on)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+ lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan966x_ptp_perout(ptp, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan966x_ptp_extts(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info lan966x_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan966x ptp",
+ .max_adj = 200000,
+ .gettime64 = lan966x_ptp_gettime64,
+ .settime64 = lan966x_ptp_settime64,
+ .adjtime = lan966x_ptp_adjtime,
+ .adjfine = lan966x_ptp_adjfine,
+ .verify = lan966x_ptp_verify,
+ .enable = lan966x_ptp_enable,
+ .n_per_out = LAN966X_PHC_PINS_NUM,
+ .n_ext_ts = LAN966X_PHC_PINS_NUM,
+ .n_pins = LAN966X_PHC_PINS_NUM,
+};
+
+static int lan966x_ptp_phc_init(struct lan966x *lan966x,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct lan966x_phc *phc = &lan966x->phc[index];
+ struct ptp_pin_desc *p;
+ int i;
+
+ for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
+ p = &phc->pins[i];
+
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ phc->info = *clock_info;
+ phc->info.pin_config = &phc->pins[0];
+ phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->lan966x = lan966x;
+
+ return 0;
+}
+
+int lan966x_ptp_init(struct lan966x *lan966x)
+{
+ u64 tod_adj = lan966x_ptp_get_nominal_value();
+ struct lan966x_port *port;
+ int err, i;
+
+ if (!lan966x->ptp)
+ return 0;
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&lan966x->ptp_clock_lock);
+ spin_lock_init(&lan966x->ptp_ts_id_lock);
+ mutex_init(&lan966x->ptp_lock);
+
+ /* Disable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(i, 0));
+ lan_wr((u32)(tod_adj >> 32), lan966x,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ /* Enable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void lan966x_ptp_deinit(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ if (!lan966x->ptp)
+ return;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i)
+ ptp_clock_unregister(lan966x->phc[i].clock);
+}
+
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 src_port, u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan966x_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!lan966x->ptp ||
+ !lan966x->ports[src_port]->ptp_rx_cmd)
+ return;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ lan966x_ptp_gettime64(&phc->info, &ts);
+
+ /* Drop the sub-ns precision */
+ timestamp = timestamp >> 2;
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
+
+u32 lan966x_ptp_get_period_ps(void)
+{
+ /* This represents the system clock period in picoseconds */
+ return 15125;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
new file mode 100644
index 0000000000..4b553927d2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -0,0 +1,1888 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200.
+ * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty)
+ */
+
+#ifndef _LAN966X_REGS_H_
+#define _LAN966X_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_AFI = 2,
+ TARGET_ANA = 3,
+ TARGET_CHIP_TOP = 5,
+ TARGET_CPU = 6,
+ TARGET_DEV = 13,
+ TARGET_FDMA = 21,
+ TARGET_GCB = 27,
+ TARGET_ORG = 36,
+ TARGET_PTP = 41,
+ TARGET_QS = 42,
+ TARGET_QSYS = 46,
+ TARGET_REW = 47,
+ TARGET_SYS = 52,
+ TARGET_VCAP = 61,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* AFI:PORT_TBL:PORT_FRM_OUT */
+#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4)
+
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\
+ FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\
+ FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+
+/* AFI:PORT_TBL:PORT_CFG */
+#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4)
+
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+
+#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0)
+#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x)
+#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x)
+
+/* ANA:ANA:ADVLEARN */
+#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4)
+
+#define ANA_ADVLEARN_VLAN_CHK BIT(0)
+#define ANA_ADVLEARN_VLAN_CHK_SET(x)\
+ FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x)
+#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
+ FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
+
+/* ANA:ANA:VLANMASK */
+#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4)
+
+/* ANA:ANA:ANAINTR */
+#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
+
+#define ANA_ANAINTR_INTR BIT(1)
+#define ANA_ANAINTR_INTR_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR, x)
+#define ANA_ANAINTR_INTR_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR, x)
+
+#define ANA_ANAINTR_INTR_ENA BIT(0)
+#define ANA_ANAINTR_INTR_ENA_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR_ENA, x)
+#define ANA_ANAINTR_INTR_ENA_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR_ENA, x)
+
+/* ANA:ANA:AUTOAGE */
+#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4)
+
+#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\
+ FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x)
+#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
+ FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+
+/* ANA:ANA:MIRRORPORTS */
+#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4)
+
+#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0)
+#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x)
+#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x)
+
+/* ANA:ANA:EMIRRORPORTS */
+#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4)
+
+#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+
+/* ANA:ANA:FLOODING */
+#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
+
+#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x)
+#define ANA_FLOODING_FLD_UNICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_UNICAST, x)
+
+#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
+#define ANA_FLOODING_FLD_BROADCAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x)
+
+#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0)
+#define ANA_FLOODING_FLD_MULTICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x)
+#define ANA_FLOODING_FLD_MULTICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x)
+
+/* ANA:ANA:FLOODING_IPMC */
+#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+
+/* ANA:PGID:PGID */
+#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4)
+
+#define ANA_PGID_PGID GENMASK(8, 0)
+#define ANA_PGID_PGID_SET(x)\
+ FIELD_PREP(ANA_PGID_PGID, x)
+#define ANA_PGID_PGID_GET(x)\
+ FIELD_GET(ANA_PGID_PGID, x)
+
+/* ANA:PGID:PGID_CFG */
+#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4)
+
+#define ANA_PGID_CFG_OBEY_VLAN BIT(0)
+#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\
+ FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x)
+#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\
+ FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x)
+
+/* ANA:ANA_TABLES:MACHDATA */
+#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACLDATA */
+#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACACCESS */
+#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4)
+
+#define ANA_MACACCESS_CHANGE2SW BIT(17)
+#define ANA_MACACCESS_CHANGE2SW_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x)
+#define ANA_MACACCESS_CHANGE2SW_GET(x)\
+ FIELD_GET(ANA_MACACCESS_CHANGE2SW, x)
+
+#define ANA_MACACCESS_MAC_CPU_COPY BIT(16)
+#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x)
+#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x)
+
+#define ANA_MACACCESS_VALID BIT(12)
+#define ANA_MACACCESS_VALID_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_VALID, x)
+#define ANA_MACACCESS_VALID_GET(x)\
+ FIELD_GET(ANA_MACACCESS_VALID, x)
+
+#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10)
+#define ANA_MACACCESS_ENTRYTYPE_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x)
+#define ANA_MACACCESS_ENTRYTYPE_GET(x)\
+ FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x)
+
+#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4)
+#define ANA_MACACCESS_DEST_IDX_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_DEST_IDX, x)
+#define ANA_MACACCESS_DEST_IDX_GET(x)\
+ FIELD_GET(ANA_MACACCESS_DEST_IDX, x)
+
+#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0)
+#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x)
+#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
+
+/* ANA:ANA_TABLES:MACTINDX */
+#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4)
+
+#define ANA_MACTINDX_BUCKET GENMASK(12, 11)
+#define ANA_MACTINDX_BUCKET_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_BUCKET, x)
+#define ANA_MACTINDX_BUCKET_GET(x)\
+ FIELD_GET(ANA_MACTINDX_BUCKET, x)
+
+#define ANA_MACTINDX_M_INDEX GENMASK(10, 0)
+#define ANA_MACTINDX_M_INDEX_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_M_INDEX, x)
+#define ANA_MACTINDX_M_INDEX_GET(x)\
+ FIELD_GET(ANA_MACTINDX_M_INDEX, x)
+
+/* ANA:ANA_TABLES:VLAN_PORT_MASK */
+#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\
+ FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\
+ FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+
+/* ANA:ANA_TABLES:VLANACCESS */
+#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4)
+
+#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\
+ FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\
+ FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+
+/* ANA:ANA_TABLES:VLANTIDX */
+#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4)
+
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+
+#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0)
+#define ANA_VLANTIDX_V_INDEX_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_V_INDEX, x)
+#define ANA_VLANTIDX_V_INDEX_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_V_INDEX, x)
+
+/* ANA:PORT:VLAN_CFG */
+#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4)
+
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+
+#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+
+#define ANA_VLAN_CFG_VLAN_PCP GENMASK(15, 13)
+#define ANA_VLAN_CFG_VLAN_PCP_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_PCP, x)
+#define ANA_VLAN_CFG_VLAN_PCP_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_PCP, x)
+
+#define ANA_VLAN_CFG_VLAN_DEI BIT(12)
+#define ANA_VLAN_CFG_VLAN_DEI_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_DEI, x)
+#define ANA_VLAN_CFG_VLAN_DEI_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_DEI, x)
+
+#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0)
+#define ANA_VLAN_CFG_VLAN_VID_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x)
+#define ANA_VLAN_CFG_VLAN_VID_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x)
+
+/* ANA:PORT:DROP_CFG */
+#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4)
+
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+
+/* ANA:PORT:QOS_CFG */
+#define ANA_QOS_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 8, 0, 1, 4)
+
+#define ANA_QOS_CFG_DP_DEFAULT_VAL BIT(8)
+#define ANA_QOS_CFG_DP_DEFAULT_VAL_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_DP_DEFAULT_VAL, x)
+#define ANA_QOS_CFG_DP_DEFAULT_VAL_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_DP_DEFAULT_VAL, x)
+
+#define ANA_QOS_CFG_QOS_DEFAULT_VAL GENMASK(7, 5)
+#define ANA_QOS_CFG_QOS_DEFAULT_VAL_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_QOS_DEFAULT_VAL, x)
+#define ANA_QOS_CFG_QOS_DEFAULT_VAL_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_QOS_DEFAULT_VAL, x)
+
+#define ANA_QOS_CFG_QOS_DSCP_ENA BIT(4)
+#define ANA_QOS_CFG_QOS_DSCP_ENA_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_QOS_DSCP_ENA, x)
+#define ANA_QOS_CFG_QOS_DSCP_ENA_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_QOS_DSCP_ENA, x)
+
+#define ANA_QOS_CFG_QOS_PCP_ENA BIT(3)
+#define ANA_QOS_CFG_QOS_PCP_ENA_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_QOS_PCP_ENA, x)
+#define ANA_QOS_CFG_QOS_PCP_ENA_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_QOS_PCP_ENA, x)
+
+#define ANA_QOS_CFG_DSCP_REWR_CFG GENMASK(1, 0)
+#define ANA_QOS_CFG_DSCP_REWR_CFG_SET(x)\
+ FIELD_PREP(ANA_QOS_CFG_DSCP_REWR_CFG, x)
+#define ANA_QOS_CFG_DSCP_REWR_CFG_GET(x)\
+ FIELD_GET(ANA_QOS_CFG_DSCP_REWR_CFG, x)
+
+/* ANA:PORT:VCAP_CFG */
+#define ANA_VCAP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 12, 0, 1, 4)
+
+#define ANA_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_VCAP_CFG_S1_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_CFG_S1_ENA, x)
+#define ANA_VCAP_CFG_S1_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_CFG_S1_ENA, x)
+
+/* ANA:PORT:VCAP_S1_KEY_CFG */
+#define ANA_VCAP_S1_CFG(g, r) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 16, r, 3, 4)
+
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG GENMASK(11, 9)
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_RT_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_RT_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG GENMASK(8, 6)
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_IP6_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_IP6_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG GENMASK(5, 3)
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_IP4_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_IP4_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG GENMASK(2, 0)
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_OTHER_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_OTHER_CFG, x)
+
+/* ANA:PORT:VCAP_S2_CFG */
+#define ANA_VCAP_S2_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 28, 0, 1, 4)
+
+#define ANA_VCAP_S2_CFG_ISDX_ENA GENMASK(20, 19)
+#define ANA_VCAP_S2_CFG_ISDX_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ISDX_ENA, x)
+#define ANA_VCAP_S2_CFG_ISDX_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ISDX_ENA, x)
+
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA GENMASK(18, 17)
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA, x)
+#define ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_UDP_PAYLOAD_ENA, x)
+
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA GENMASK(16, 15)
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA, x)
+#define ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ETYPE_PAYLOAD_ENA, x)
+
+#define ANA_VCAP_S2_CFG_ENA BIT(14)
+#define ANA_VCAP_S2_CFG_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ENA, x)
+#define ANA_VCAP_S2_CFG_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ENA, x)
+
+#define ANA_VCAP_S2_CFG_SNAP_DIS GENMASK(13, 12)
+#define ANA_VCAP_S2_CFG_SNAP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_SNAP_DIS, x)
+#define ANA_VCAP_S2_CFG_SNAP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_SNAP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_ARP_DIS GENMASK(11, 10)
+#define ANA_VCAP_S2_CFG_ARP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_ARP_DIS, x)
+#define ANA_VCAP_S2_CFG_ARP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_ARP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS GENMASK(9, 8)
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP_TCPUDP_DIS, x)
+#define ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP_TCPUDP_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS GENMASK(7, 6)
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP_OTHER_DIS, x)
+#define ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP_OTHER_DIS, x)
+
+#define ANA_VCAP_S2_CFG_IP6_CFG GENMASK(5, 2)
+#define ANA_VCAP_S2_CFG_IP6_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_IP6_CFG, x)
+#define ANA_VCAP_S2_CFG_IP6_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_IP6_CFG, x)
+
+#define ANA_VCAP_S2_CFG_OAM_DIS GENMASK(1, 0)
+#define ANA_VCAP_S2_CFG_OAM_DIS_SET(x)\
+ FIELD_PREP(ANA_VCAP_S2_CFG_OAM_DIS, x)
+#define ANA_VCAP_S2_CFG_OAM_DIS_GET(x)\
+ FIELD_GET(ANA_VCAP_S2_CFG_OAM_DIS, x)
+
+/* ANA:PORT:QOS_PCP_DEI_MAP_CFG */
+#define ANA_PCP_DEI_CFG(g, r) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 32, r, 16, 4)
+
+#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL BIT(3)
+#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_SET(x)\
+ FIELD_PREP(ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, x)
+#define ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL_GET(x)\
+ FIELD_GET(ANA_PCP_DEI_CFG_DP_PCP_DEI_VAL, x)
+
+#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL GENMASK(2, 0)
+#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_SET(x)\
+ FIELD_PREP(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL, x)
+#define ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL_GET(x)\
+ FIELD_GET(ANA_PCP_DEI_CFG_QOS_PCP_DEI_VAL, x)
+
+/* ANA:PORT:CPU_FWD_CFG */
+#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+
+/* ANA:PORT:CPU_FWD_BPDU_CFG */
+#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4)
+
+/* ANA:PORT:PORT_CFG */
+#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+
+#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+
+#define ANA_PORT_CFG_LEARNAUTO BIT(6)
+#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
+#define ANA_PORT_CFG_LEARNAUTO_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x)
+
+#define ANA_PORT_CFG_LEARN_ENA BIT(5)
+#define ANA_PORT_CFG_LEARN_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x)
+#define ANA_PORT_CFG_LEARN_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x)
+
+#define ANA_PORT_CFG_RECV_ENA BIT(4)
+#define ANA_PORT_CFG_RECV_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x)
+#define ANA_PORT_CFG_RECV_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_RECV_ENA, x)
+
+#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0)
+#define ANA_PORT_CFG_PORTID_VAL_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x)
+#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+
+/* ANA:COMMON:DSCP_REWR_CFG */
+#define ANA_DSCP_REWR_CFG(r) __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 332, r, 16, 4)
+
+#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL GENMASK(5, 0)
+#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_SET(x)\
+ FIELD_PREP(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, x)
+#define ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL_GET(x)\
+ FIELD_GET(ANA_DSCP_REWR_CFG_DSCP_QOS_REWR_VAL, x)
+
+/* ANA:PORT:POL_CFG */
+#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4)
+
+#define ANA_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x)
+#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\
+ FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x)
+
+#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0)
+#define ANA_POL_CFG_POL_ORDER_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_POL_ORDER, x)
+#define ANA_POL_CFG_POL_ORDER_GET(x)\
+ FIELD_GET(ANA_POL_CFG_POL_ORDER, x)
+
+/* ANA:PFC:PFC_CFG */
+#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
+
+#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0)
+#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x)
+#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+
+/* ANA:COMMON:AGGR_CFG */
+#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x)
+#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x)
+
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+
+/* ANA:COMMON:DSCP_CFG */
+#define ANA_DSCP_CFG(r) __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 76, r, 64, 4)
+
+#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
+#define ANA_DSCP_CFG_DP_DSCP_VAL_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_DP_DSCP_VAL, x)
+#define ANA_DSCP_CFG_DP_DSCP_VAL_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_DP_DSCP_VAL, x)
+
+#define ANA_DSCP_CFG_QOS_DSCP_VAL GENMASK(10, 8)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_QOS_DSCP_VAL, x)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_QOS_DSCP_VAL, x)
+
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_DSCP_TRUST_ENA, x)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_DSCP_TRUST_ENA, x)
+
+#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA_SET(x)\
+ FIELD_PREP(ANA_DSCP_CFG_DSCP_REWR_ENA, x)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA_GET(x)\
+ FIELD_GET(ANA_DSCP_CFG_DSCP_REWR_ENA, x)
+
+/* ANA:POL:POL_PIR_CFG */
+#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4)
+
+#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x)
+#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x)
+
+#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0)
+#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x)
+#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x)
+
+/* ANA:POL:POL_MODE_CFG */
+#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4)
+
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5)
+#define ANA_POL_MODE_IPG_SIZE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x)
+#define ANA_POL_MODE_IPG_SIZE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_IPG_SIZE, x)
+
+#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3)
+#define ANA_POL_MODE_FRM_MODE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_FRM_MODE, x)
+#define ANA_POL_MODE_FRM_MODE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_FRM_MODE, x)
+
+#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0)
+#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x)
+#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x)
+
+/* ANA:POL:POL_PIR_STATE */
+#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4)
+
+#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0)
+#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x)
+#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\
+ FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x)
+
+/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
+#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
+
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\
+ FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\
+ FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+
+/* DEV:PORT_MODE:CLOCK_CFG */
+#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4)
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x)
+#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x)
+
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x)
+#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x)
+#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x)
+#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PORT_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x)
+#define DEV_CLOCK_CFG_PORT_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x)
+
+#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0)
+#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x)
+#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4)
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x)
+#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x)
+#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4)
+
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4)
+
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4)
+
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x)
+
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
+
+#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x)
+#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4)
+
+#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x)
+#define DEV_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED, x)
+
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */
+#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */
+#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x)
+#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+
+#define DEV_PCS1G_ANEG_CFG_ENA BIT(0)
+#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_CH_DB_DISCARD */
+#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4)
+
+#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\
+ FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\
+ FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVE */
+#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(3)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0)
+#define PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_INTR_PTP, x)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0)
+#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x)
+#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x)
+
+/* PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_DOM_CFG_ENA GENMASK(11, 9)
+#define PTP_DOM_CFG_ENA_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_ENA, x)
+#define PTP_DOM_CFG_ENA_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_ENA, x)
+
+#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x)
+#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x)
+
+/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4)
+
+/* PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4)
+
+#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27)
+#define PTP_PIN_CFG_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x)
+#define PTP_PIN_CFG_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x)
+
+#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25)
+#define PTP_PIN_CFG_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+
+#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21)
+#define PTP_PIN_CFG_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x)
+#define PTP_PIN_CFG_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x)
+
+#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
+#define PTP_PIN_CFG_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
+#define PTP_PIN_CFG_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_DOM, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4)
+
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4)
+
+/* PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4)
+
+#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0)
+#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x)
+#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+
+/* PTP:PTP_PINS:WF_HIGH_PERIOD */
+#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 24, 0, 1, 4)
+
+#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0)
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_PINS:WF_LOW_PERIOD */
+#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 28, 0, 1, 4)
+
+#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0)
+#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x)
+#define PTP_TWOSTEP_CTRL_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x)
+#define PTP_TWOSTEP_CTRL_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x)
+#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */
+#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+/* QSYS:SYSTEM:PORT_MODE */
+#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4)
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+
+/* QSYS:SYSTEM:SWITCH_PORT_MODE */
+#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4)
+
+#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18)
+#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x)
+#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x)
+
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+
+#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0)
+#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x)
+#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x)
+
+/* QSYS:SYSTEM:SW_STATUS */
+#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4)
+
+#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0)
+#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\
+ FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x)
+#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\
+ FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x)
+
+/* QSYS:SYSTEM:CPU_GROUP_MAP */
+#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4)
+
+/* QSYS:RES_CTRL:RES_CFG */
+#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+
+/* QSYS:HSCH:CIR_CFG */
+#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4)
+
+#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x)
+#define QSYS_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x)
+
+#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define QSYS_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x)
+#define QSYS_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x)
+
+/* QSYS:HSCH:SE_CFG */
+#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4)
+
+#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x)
+#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x)
+
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x)
+#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x)
+
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x)
+#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x)
+
+#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x)
+#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x)
+
+#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4)
+
+#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\
+ FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\
+ FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+
+/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */
+#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+
+/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */
+#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4)
+
+#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+
+/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4)
+
+#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+
+/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */
+#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */
+#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4)
+
+#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0)
+#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x)
+#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */
+#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */
+#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4)
+
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */
+#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */
+#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4)
+
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\
+ FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\
+ FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */
+#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\
+ FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\
+ FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */
+#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4)
+
+#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0)
+#define QSYS_TAS_LST_LIST_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x)
+#define QSYS_TAS_LST_LIST_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_LST_LIST_STATE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */
+#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */
+#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */
+#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4)
+
+/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */
+#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x)
+#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CFG */
+#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4)
+
+#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_CFG, x)
+#define REW_TAG_CFG_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_CFG, x)
+
+#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x)
+#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x)
+
+#define REW_TAG_CFG_TAG_PCP_CFG GENMASK(3, 2)
+#define REW_TAG_CFG_TAG_PCP_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_PCP_CFG, x)
+#define REW_TAG_CFG_TAG_PCP_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_PCP_CFG, x)
+
+#define REW_TAG_CFG_TAG_DEI_CFG GENMASK(1, 0)
+#define REW_TAG_CFG_TAG_DEI_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_DEI_CFG, x)
+#define REW_TAG_CFG_TAG_DEI_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_DEI_CFG, x)
+
+/* REW:PORT:PORT_CFG */
+#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
+
+#define REW_PORT_CFG_ES0_EN BIT(4)
+#define REW_PORT_CFG_ES0_EN_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_ES0_EN, x)
+#define REW_PORT_CFG_ES0_EN_GET(x)\
+ FIELD_GET(REW_PORT_CFG_ES0_EN, x)
+
+#define REW_PORT_CFG_NO_REWRITE BIT(0)
+#define REW_PORT_CFG_NO_REWRITE_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x)
+#define REW_PORT_CFG_NO_REWRITE_GET(x)\
+ FIELD_GET(REW_PORT_CFG_NO_REWRITE, x)
+
+/* REW:PORT:DSCP_CFG */
+#define REW_DSCP_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 12, 0, 1, 4)
+
+#define REW_DSCP_CFG_DSCP_REWR_CFG GENMASK(1, 0)
+#define REW_DSCP_CFG_DSCP_REWR_CFG_SET(x)\
+ FIELD_PREP(REW_DSCP_CFG_DSCP_REWR_CFG, x)
+#define REW_DSCP_CFG_DSCP_REWR_CFG_GET(x)\
+ FIELD_GET(REW_DSCP_CFG_DSCP_REWR_CFG, x)
+
+/* REW:PORT:PCP_DEI_QOS_MAP_CFG */
+#define REW_PCP_DEI_CFG(g, r) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 16, r, 16, 4)
+
+#define REW_PCP_DEI_CFG_DEI_QOS_VAL BIT(3)
+#define REW_PCP_DEI_CFG_DEI_QOS_VAL_SET(x)\
+ FIELD_PREP(REW_PCP_DEI_CFG_DEI_QOS_VAL, x)
+#define REW_PCP_DEI_CFG_DEI_QOS_VAL_GET(x)\
+ FIELD_GET(REW_PCP_DEI_CFG_DEI_QOS_VAL, x)
+
+#define REW_PCP_DEI_CFG_PCP_QOS_VAL GENMASK(2, 0)
+#define REW_PCP_DEI_CFG_PCP_QOS_VAL_SET(x)\
+ FIELD_PREP(REW_PCP_DEI_CFG_PCP_QOS_VAL, x)
+#define REW_PCP_DEI_CFG_PCP_QOS_VAL_GET(x)\
+ FIELD_GET(REW_PCP_DEI_CFG_PCP_QOS_VAL, x)
+
+/* REW:COMMON:STAT_CFG */
+#define REW_STAT_CFG __REG(TARGET_REW, 0, 1, 3072, 0, 1, 528, 520, 0, 1, 4)
+
+#define REW_STAT_CFG_STAT_MODE GENMASK(1, 0)
+#define REW_STAT_CFG_STAT_MODE_SET(x)\
+ FIELD_PREP(REW_STAT_CFG_STAT_MODE, x)
+#define REW_STAT_CFG_STAT_MODE_GET(x)\
+ FIELD_GET(REW_STAT_CFG_STAT_MODE, x)
+
+/* SYS:SYSTEM:RESET_CFG */
+#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4)
+
+#define SYS_RESET_CFG_CORE_ENA BIT(0)
+#define SYS_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x)
+#define SYS_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(SYS_RESET_CFG_CORE_ENA, x)
+
+/* SYS:SYSTEM:PORT_MODE */
+#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4)
+
+#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4)
+#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x)
+#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x)
+
+#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2)
+#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x)
+#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x)
+
+/* SYS:SYSTEM:FRONT_PORT_MODE */
+#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4)
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\
+ FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\
+ FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+
+/* SYS:SYSTEM:FRM_AGING */
+#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\
+ FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x)
+#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\
+ FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x)
+
+/* SYS:SYSTEM:STAT_CFG */
+#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4)
+
+#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0)
+#define SYS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x)
+#define SYS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x)
+
+/* SYS:PAUSE_CFG:PAUSE_CFG */
+#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4)
+
+#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x)
+#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x)
+
+#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x)
+#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x)
+#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x)
+
+/* SYS:PAUSE_CFG:ATOP */
+#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4)
+
+/* SYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4)
+
+/* SYS:PAUSE_CFG:MAC_FC_CFG */
+#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4)
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+
+/* SYS:STAT:CNT */
+#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4)
+
+/* SYS:RAM_CTRL:RAM_INIT */
+#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x)
+#define SYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
+
+/* VCAP:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_UPDATE_CTRL(t) __REG(TARGET_VCAP, t, 3, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_UPDATE_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_UPDATE_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_CMD, x)
+#define VCAP_UPDATE_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_CMD, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_ADDR, x)
+#define VCAP_UPDATE_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_UPDATE_SHOT, x)
+#define VCAP_UPDATE_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_CLEAR_CACHE, x)
+#define VCAP_UPDATE_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_UPDATE_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_MV_CFG(t) __REG(TARGET_VCAP, t, 3, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_MV_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_MV_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_MV_CFG_MV_NUM_POS, x)
+#define VCAP_MV_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_MV_CFG_MV_NUM_POS, x)
+
+#define VCAP_MV_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_MV_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_MV_CFG_MV_SIZE, x)
+#define VCAP_MV_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_MV_CFG_MV_SIZE, x)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ENTRY_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_MASK_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ACTION_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_CNT_DAT(t, r) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_CNT_FW_DAT(t) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_TG_DAT(t) __REG(TARGET_VCAP, t, 3, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_CORE_IDX(t) __REG(TARGET_VCAP, t, 3, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_CORE_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_CORE_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_CORE_IDX_CORE_IDX, x)
+#define VCAP_CORE_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_CORE_IDX_CORE_IDX, x)
+
+/* VCAP:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_CORE_MAP(t) __REG(TARGET_VCAP, t, 3, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_CORE_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_CORE_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_CORE_MAP_CORE_MAP, x)
+#define VCAP_CORE_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_CORE_MAP_CORE_MAP, x)
+
+/* VCAP:VCAP_CONST:VCAP_VER */
+#define VCAP_VER(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ENTRY_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ENTRY_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ENTRY_SWCNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ENTRY_TG_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ACTION_DEF_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ACTION_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:CNT_WIDTH */
+#define VCAP_CNT_WIDTH(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:CORE_CNT */
+#define VCAP_CORE_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP:VCAP_CONST:IF_CNT */
+#define VCAP_IF_CNT(t) __REG(TARGET_VCAP, t, 3, 924, 0, 1, 40, 36, 0, 1, 4)
+
+#endif /* _LAN966X_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
new file mode 100644
index 0000000000..1c88120eb2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly;
+
+static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
+ u32 pgid_ip)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 flood_mask_ip;
+
+ flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
+ flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
+
+ /* If mcast snooping is not enabled then use mcast flood mask
+ * to decide to enable multicast flooding or not.
+ */
+ if (!port->mcast_ena) {
+ u32 flood_mask;
+
+ flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
+ flood_mask = ANA_PGID_PGID_GET(flood_mask);
+
+ if (flood_mask & BIT(port->chip_port))
+ flood_mask_ip |= BIT(port->chip_port);
+ else
+ flood_mask_ip &= ~BIT(port->chip_port);
+ } else {
+ flood_mask_ip &= ~BIT(port->chip_port);
+ }
+
+ lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_ip));
+}
+
+static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_MC));
+
+ if (!port->mcast_ena) {
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+ }
+}
+
+static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_UC));
+}
+
+static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_BC));
+}
+
+static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
+{
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
+ ANA_PORT_CFG_LEARN_ENA,
+ port->lan966x, ANA_PORT_CFG(port->chip_port));
+
+ port->learn_ena = enabled;
+}
+
+static void lan966x_port_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ lan966x_port_set_mcast_flood(port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_FLOOD)
+ lan966x_port_set_ucast_flood(port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ lan966x_port_set_bcast_flood(port,
+ !!(flags.val & BR_BCAST_FLOOD));
+
+ if (flags.mask & BR_LEARNING)
+ lan966x_port_set_learning(port,
+ !!(flags.val & BR_LEARNING));
+}
+
+static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
+ BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+void lan966x_update_fwd_mask(struct lan966x *lan966x)
+{
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ struct lan966x_port *port = lan966x->ports[i];
+ unsigned long mask = 0;
+
+ if (port && lan966x->bridge_fwd_mask & BIT(i)) {
+ mask = lan966x->bridge_fwd_mask & ~BIT(i);
+
+ if (port->bond)
+ mask &= ~lan966x_lag_get_mask(lan966x,
+ port->bond);
+ }
+
+ mask |= BIT(CPU_PORT);
+
+ lan_wr(ANA_PGID_PGID_SET(mask),
+ lan966x, ANA_PGID(PGID_SRC + i));
+ }
+}
+
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool learn_ena = false;
+
+ if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
+ port->learn_ena)
+ learn_ena = true;
+
+ if (state == BR_STATE_FORWARDING)
+ lan966x->bridge_fwd_mask |= BIT(port->chip_port);
+ else
+ lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_update_fwd_mask(lan966x);
+}
+
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ lan966x_mac_set_ageing(port->lan966x, ageing_time);
+}
+
+static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ port->mcast_ena = mcast_ena;
+ if (mcast_ena)
+ lan966x_mdb_restore_entries(lan966x);
+ else
+ lan966x_mdb_clear_entries(lan966x);
+
+ lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
+ ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
+ lan966x, ANA_CPU_FWD_CFG(port->chip_port));
+
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+}
+
+static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ lan966x_port_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ lan966x_port_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ lan966x_port_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
+ lan966x_vlan_port_apply(port);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ lan966x_port_mc_set(port, !attr->u.mc_disabled);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ int err;
+
+ if (!lan966x->bridge_mask) {
+ lan966x->bridge = bridge;
+ } else {
+ if (lan966x->bridge != bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
+ return -ENODEV;
+ }
+ }
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ lan966x->bridge_mask |= BIT(port->chip_port);
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ lan966x_port_bridge_flags(port, flags);
+
+ return 0;
+}
+
+static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+ lan966x_port_bridge_flags(port, flags);
+
+ lan966x->bridge_mask &= ~BIT(port->chip_port);
+
+ if (!lan966x->bridge_mask)
+ lan966x->bridge = NULL;
+
+ /* Set the port back to host mode */
+ lan966x_vlan_port_set_vlan_aware(port, false);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+}
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_port_bridge_join(port, brport_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_port_bridge_leave(port, info->upper_dev);
+ }
+
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_lag_port_join(port, info->upper_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_lag_port_leave(port, info->upper_dev);
+ }
+
+ return err;
+}
+
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = NOTIFY_DONE;
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
+ switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ if (netif_is_lag_master(info->upper_dev)) {
+ err = lan966x_lag_port_prechangeupper(dev, info);
+ if (err || info->linking)
+ return err;
+
+ switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ return err;
+}
+
+static int lan966x_foreign_bridging_check(struct net_device *upper,
+ bool *has_foreign,
+ bool *seen_lan966x,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = NULL;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper))
+ return 0;
+
+ netdev_for_each_lower_dev(upper, dev, iter) {
+ if (lan966x_netdevice_check(dev)) {
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (lan966x) {
+ /* Upper already has at least one port of a
+ * lan966x switch inside it, check that it's
+ * the same instance of the driver.
+ */
+ if (port->lan966x != lan966x) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging between multiple lan966x switches disallowed");
+ return -EINVAL;
+ }
+ } else {
+ /* This is the first lan966x port inside this
+ * upper device
+ */
+ lan966x = port->lan966x;
+ *seen_lan966x = true;
+ }
+ } else if (netif_is_lag_master(dev)) {
+ /* Allow to have bond interfaces that have only lan966x
+ * devices
+ */
+ if (lan966x_foreign_bridging_check(dev, has_foreign,
+ seen_lan966x,
+ extack))
+ return -EINVAL;
+ } else {
+ *has_foreign = true;
+ }
+
+ if (*seen_lan966x && *has_foreign) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging lan966x ports with foreign interfaces disallowed");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int lan966x_bridge_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ bool has_foreign = false;
+ bool seen_lan966x = false;
+
+ return lan966x_foreign_bridging_check(info->upper_dev,
+ &has_foreign,
+ &seen_lan966x,
+ info->info.extack);
+}
+
+static int lan966x_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!lan966x_netdevice_check(dev)) {
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ if (netif_is_lag_master(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ err = lan966x_lag_netdev_changeupper(dev,
+ ptr);
+ else
+ err = lan966x_lag_netdev_prechangeupper(dev,
+ ptr);
+
+ return err;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_port_prechangeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ err = lan966x_port_changeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ err = lan966x_lag_port_changelowerstate(dev, ptr);
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static bool lan966x_foreign_dev_check(const struct net_device *dev,
+ const struct net_device *foreign_dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (netif_is_bridge_master(foreign_dev))
+ if (lan966x->bridge == foreign_dev)
+ return false;
+
+ if (netif_is_lag_master(foreign_dev))
+ for (i = 0; i < lan966x->num_phys_ports; ++i)
+ if (lan966x->ports[i] &&
+ lan966x->ports[i]->bond == foreign_dev)
+ return false;
+
+ return true;
+}
+
+static int lan966x_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ lan966x_netdevice_check,
+ lan966x_foreign_dev_check,
+ lan966x_handle_fdb);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_add_vlan(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ else
+ lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_add(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_add(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_del_vlan(port, v->vid);
+ else
+ lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_del(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_del(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly = {
+ .notifier_call = lan966x_netdevice_event,
+};
+
+struct notifier_block lan966x_switchdev_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_event,
+};
+
+struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_blocking_event,
+};
+
+void lan966x_register_notifier_blocks(void)
+{
+ register_netdevice_notifier(&lan966x_netdevice_nb);
+ register_switchdev_notifier(&lan966x_switchdev_nb);
+ register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+}
+
+void lan966x_unregister_notifier_blocks(void)
+{
+ unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&lan966x_switchdev_nb);
+ unregister_netdevice_notifier(&lan966x_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
new file mode 100644
index 0000000000..3f5b212066
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define LAN966X_TAPRIO_TIMEOUT_MS 1000
+#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
+
+/* Minimum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
+
+/* Maximum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
+
+/* Total number of TAS GCL entries */
+#define LAN966X_TAPRIO_NUM_GCL 256
+
+/* TAPRIO link speeds for calculation of guard band */
+enum lan966x_taprio_link_speed {
+ LAN966X_TAPRIO_SPEED_NO_GB,
+ LAN966X_TAPRIO_SPEED_10,
+ LAN966X_TAPRIO_SPEED_100,
+ LAN966X_TAPRIO_SPEED_1000,
+ LAN966X_TAPRIO_SPEED_2500,
+};
+
+/* TAPRIO list states */
+enum lan966x_taprio_state {
+ LAN966X_TAPRIO_STATE_ADMIN,
+ LAN966X_TAPRIO_STATE_ADVANCING,
+ LAN966X_TAPRIO_STATE_PENDING,
+ LAN966X_TAPRIO_STATE_OPERATING,
+ LAN966X_TAPRIO_STATE_TERMINATING,
+ LAN966X_TAPRIO_STATE_MAX,
+};
+
+/* TAPRIO GCL command */
+enum lan966x_taprio_gcl_cmd {
+ LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
+};
+
+static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
+{
+ return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
+}
+
+static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, QSYS_TAS_LST);
+ return QSYS_TAS_LST_LIST_STATE_GET(val);
+}
+
+static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ return lan966x_taprio_list_state_get(port);
+}
+
+static void lan966x_taprio_list_state_set(struct lan966x_port *port,
+ u32 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+}
+
+static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool pending, operating;
+ unsigned long end;
+ u32 state;
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ /* It is required to try multiple times to set the state of list,
+ * because the HW can overwrite this.
+ */
+ do {
+ state = lan966x_taprio_list_state_get(port);
+
+ pending = false;
+ operating = false;
+
+ if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
+ state == LAN966X_TAPRIO_STATE_PENDING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_ADMIN);
+ pending = true;
+ }
+
+ if (state == LAN966X_TAPRIO_STATE_OPERATING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_TERMINATING);
+ operating = true;
+ }
+
+ /* If the entry was in pending and now gets in admin, then there
+ * is nothing else to do, so just bail out
+ */
+ state = lan966x_taprio_list_state_get(port);
+ if (pending &&
+ state == LAN966X_TAPRIO_STATE_ADMIN)
+ return 0;
+
+ /* If the list was in operating and now is in terminating or
+ * admin, then is OK to exit but it needs to wait until the list
+ * will get in admin. It is not required to set the state
+ * again.
+ */
+ if (operating &&
+ (state == LAN966X_TAPRIO_STATE_TERMINATING ||
+ state == LAN966X_TAPRIO_STATE_ADMIN))
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ do {
+ state = lan966x_taprio_list_state_get(port);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ /* If the list was in operating mode, it could be stopped while some
+ * queues where closed, so make sure to restore "all-queues-open"
+ */
+ if (operating) {
+ lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
+ lan966x, QSYS_TAS_GS_CTRL);
+
+ lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
+ lan966x, QSYS_TAS_GATE_STATE);
+ }
+
+ return 0;
+}
+
+static int lan966x_taprio_shutdown(struct lan966x_port *port)
+{
+ u32 i, list, state;
+ int err;
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list = lan966x_taprio_list_index(port, i);
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ err = lan966x_taprio_list_shutdown(port, list);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Find a suitable list for a new schedule. First priority is a list in state
+ * pending. Second priority is a list in state admin.
+ */
+static int lan966x_taprio_find_list(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int *new_list, int *obs_list)
+{
+ int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int err, oper = -1;
+ u32 i;
+
+ *new_list = -1;
+ *obs_list = -1;
+
+ /* If there is already an entry in operating mode, return this list in
+ * obs_list, such that when the new list will get activated the
+ * operating list will be stopped. In this way is possible to have
+ * smooth transitions between the lists
+ */
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list[i] = lan966x_taprio_list_index(port, i);
+ state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
+ if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
+ oper = list[i];
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
+ err = lan966x_taprio_shutdown(port);
+ if (err)
+ return err;
+
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
+{
+ u64 total_time = 0;
+ u32 i;
+
+ /* This is not supported by th HW */
+ if (qopt->cycle_time_extension)
+ return -EOPNOTSUPP;
+
+ /* There is a limited number of gcl entries that can be used, they are
+ * shared by all ports
+ */
+ if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
+ return -EINVAL;
+
+ /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
+ if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ for (i = 0; i < qopt->num_entries; ++i) {
+ struct tc_taprio_sched_entry *entry = &qopt->entries[i];
+
+ /* Don't allow intervals bigger than 1 sec or smaller than 1
+ * usec
+ */
+ if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+
+ total_time += qopt->entries[i].interval;
+ }
+
+ /* Don't allow the total time of intervals be bigger than 1 sec */
+ if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ /* The HW expects that the cycle time to be at least as big as sum of
+ * each interval of gcl
+ */
+ if (qopt->cycle_time < total_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
+ unsigned long *free_list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 num_free, state, list;
+ u32 base, next, max_list;
+
+ /* By default everything is free */
+ bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
+ num_free = LAN966X_TAPRIO_NUM_GCL;
+
+ /* Iterate over all gcl entries and find out which are free. And mark
+ * those that are not free.
+ */
+ max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
+ for (list = 0; list < max_list; ++list) {
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
+ base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
+ next = base;
+
+ do {
+ clear_bit(next, free_list);
+ num_free--;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
+ next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
+ } while (base != next);
+ }
+
+ return num_free;
+}
+
+static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
+ struct tc_taprio_sched_entry *entry,
+ u32 next_entry)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Setup a single gcl entry */
+ lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
+ QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
+ lan966x, QSYS_TAS_GCL_CT_CFG);
+
+ lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
+ lan966x, QSYS_TAS_GCL_CT_CFG2);
+
+ lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
+}
+
+static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int list)
+{
+ DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, base, next;
+
+ if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
+ return -ENOSPC;
+
+ /* Select list */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* Setup the address of the first gcl entry */
+ base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
+ lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
+ QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
+ lan966x, QSYS_TAS_LIST_CFG);
+
+ /* Iterate over entries and add them to the gcl list */
+ next = base;
+ for (i = 0; i < qopt->num_entries; ++i) {
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* If the entry is last, point back to the start of the list */
+ if (i == qopt->num_entries - 1)
+ next = base;
+ else
+ next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
+ next + 1);
+
+ lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
+ }
+
+ return 0;
+}
+
+/* Calculate new base_time based on cycle_time. The HW recommends to have the
+ * new base time at least 2 * cycle type + current time
+ */
+static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
+ const u32 cycle_time,
+ const ktime_t org_base_time,
+ ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time;
+ struct timespec64 ts;
+
+ /* Get the current time and calculate the threshold_time */
+ lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+
+ /* If the org_base_time is in enough in future just use it */
+ if (org_base_time >= threshold_time) {
+ *new_base_time = org_base_time;
+ return;
+ }
+
+ /* If the org_base_time is smaller than current_time, calculate the new
+ * base time as following.
+ */
+ if (org_base_time <= current_time) {
+ u64 tmp = current_time - org_base_time;
+ u32 rem = 0;
+
+ if (tmp > cycle_time)
+ div_u64_rem(tmp, cycle_time, &rem);
+ rem = cycle_time - rem;
+ *new_base_time = threshold_time + rem;
+ return;
+ }
+
+ /* The only left place for org_base_time is between current_time and
+ * threshold_time. In this case the new_base_time is calculated like
+ * org_base_time + 2 * cycletime
+ */
+ *new_base_time = org_base_time + 2 * cycle_time;
+}
+
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 taprio_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ taprio_speed = LAN966X_TAPRIO_SPEED_10;
+ break;
+ case SPEED_100:
+ taprio_speed = LAN966X_TAPRIO_SPEED_100;
+ break;
+ case SPEED_1000:
+ taprio_speed = LAN966X_TAPRIO_SPEED_1000;
+ break;
+ case SPEED_2500:
+ taprio_speed = LAN966X_TAPRIO_SPEED_2500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
+ QSYS_TAS_PROFILE_CFG_LINK_SPEED,
+ lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
+
+ return 0;
+}
+
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err, new_list, obs_list;
+ struct timespec64 ts;
+ ktime_t base_time;
+
+ err = lan966x_taprio_check(qopt);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_gcl_setup(port, qopt, new_list);
+ if (err)
+ return err;
+
+ lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
+ qopt->base_time, &base_time);
+
+ ts = ktime_to_timespec64(base_time);
+ lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
+ lan966x, QSYS_TAS_BT_NSEC);
+
+ lan_wr(lower_32_bits(ts.tv_sec),
+ lan966x, QSYS_TAS_BT_SEC_LSB);
+
+ lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
+ lan966x, QSYS_TAS_BT_SEC_MSB);
+
+ lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
+
+ lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
+ QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
+ lan966x, QSYS_TAS_STARTUP_CFG);
+
+ /* Start list processing */
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+
+ return err;
+}
+
+int lan966x_taprio_del(struct lan966x_port *port)
+{
+ return lan966x_taprio_shutdown(port);
+}
+
+void lan966x_taprio_init(struct lan966x *lan966x)
+{
+ int num_taprio_lists;
+ int p;
+
+ lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
+ lan966x_ptp_get_period_ps()),
+ lan966x, QSYS_TAS_STM_CFG);
+
+ num_taprio_lists = lan966x->num_phys_ports *
+ LAN966X_TAPRIO_ENTRIES_PER_PORT;
+
+ /* For now we always use guard band on all queues */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
+ QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ for (p = 0; p < lan966x->num_phys_ports; p++)
+ lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
+ QSYS_TAS_PROFILE_CFG_PORT_NUM,
+ lan966x, QSYS_TAS_PROFILE_CFG(p));
+}
+
+void lan966x_taprio_deinit(struct lan966x *lan966x)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ lan966x_taprio_del(lan966x->ports[p]);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
new file mode 100644
index 0000000000..4555a35d0d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 cir, cbs;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8;
+ cbs = qopt->replace_params.max_size;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
new file mode 100644
index 0000000000..ee652f2d23
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#include "lan966x_main.h"
+
+static LIST_HEAD(lan966x_tc_block_cb_list);
+
+static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u8 num_tc = mqprio->qopt.num_tc;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return num_tc ? lan966x_mqprio_add(port, num_tc) :
+ lan966x_mqprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ return lan966x_taprio_add(port, taprio);
+ case TAPRIO_CMD_DESTROY:
+ return lan966x_taprio_del(port);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return lan966x_tbf_add(port, qopt);
+ case TC_TBF_DESTROY:
+ return lan966x_tbf_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ return qopt->enable ? lan966x_cbs_add(port, qopt) :
+ lan966x_cbs_del(port, qopt);
+}
+
+static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ return lan966x_ets_add(port, qopt);
+ case TC_ETS_DESTROY:
+ return lan966x_ets_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct lan966x_port *port = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return lan966x_tc_matchall(port, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return lan966x_tc_flower(port, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int lan966x_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int lan966x_tc_setup_block(struct lan966x_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = lan966x_tc_block_cb_ingress;
+ port->tc.ingress_shared_block = f->block_shared;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = lan966x_tc_block_cb_egress;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list,
+ cb, port, port, ingress);
+}
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return lan966x_tc_setup_qdisc_mqprio(port, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return lan966x_tc_setup_qdisc_taprio(port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return lan966x_tc_setup_qdisc_tbf(port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return lan966x_tc_setup_qdisc_cbs(port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return lan966x_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_BLOCK:
+ return lan966x_tc_setup_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
new file mode 100644
index 0000000000..d696cf9dbd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+
+#define LAN966X_FORCE_UNTAGED 3
+
+static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st,
+ u16 etype)
+{
+ switch (st->admin->vtype) {
+ case VCAP_TYPE_IS1:
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ return true;
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ case ETH_P_SNAP:
+ case ETH_P_802_2:
+ return true;
+ }
+ break;
+ case VCAP_TYPE_ES0:
+ return true;
+ default:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "VCAP type not supported");
+ return false;
+ }
+
+ return false;
+}
+
+static int
+lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_control match;
+ int err = 0;
+
+ flow_rule_match_control(st->frule, &match);
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_1);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ }
+
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_0);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic match;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &match);
+ if (match.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(match.key->n_proto);
+ if (!lan966x_tc_is_known_etype(st, st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IPV6 &&
+ st->admin->vtype == VCAP_TYPE_IS1) {
+ /* Don't set any keys in this case */
+ } else if (st->l3_proto == ETH_P_SNAP &&
+ st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+ if (match.mask->ip_proto) {
+ st->l4_proto = match.key->ip_proto;
+
+ if (st->l4_proto == IPPROTO_TCP) {
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ if (st->admin->vtype != VCAP_TYPE_IS1) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
+ }
+
+ return vcap_tc_flower_handler_cvlan_usage(st);
+}
+
+static int
+lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
+ }
+
+ return vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
+}
+
+static int
+(*lan966x_tc_flower_handlers_usage[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_CVLAN] = lan966x_tc_flower_handler_cvlan_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
+};
+
+static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ u16 *l3_proto)
+{
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = f,
+ .vrule = vrule,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
+ int err = 0;
+
+ state.frule = flow_cls_offload_flow_rule(f);
+ for (int i = 0; i < ARRAY_SIZE(lan966x_tc_flower_handlers_usage); ++i) {
+ if (!flow_rule_match_key(state.frule, i) ||
+ !lan966x_tc_flower_handlers_usage[i])
+ continue;
+
+ err = lan966x_tc_flower_handlers_usage[i](&state);
+ if (err)
+ return err;
+ }
+
+ if (l3_proto)
+ *l3_proto = state.l3_proto;
+
+ return err;
+}
+
+static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *dev,
+ struct flow_cls_offload *fco,
+ bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
+ struct flow_action_entry *actent, *last_actent = NULL;
+ struct flow_action *act = &rule->action;
+ u64 action_mask = 0;
+ int idx;
+
+ if (!flow_action_has_entries(act)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
+ return -EINVAL;
+ }
+
+ if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(idx, actent, act) {
+ if (action_mask & BIT(actent->id)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "More actions of the same type");
+ return -EINVAL;
+ }
+ action_mask |= BIT(actent->id);
+ last_actent = actent; /* Save last action for later check */
+ }
+
+ /* Check that last action is a goto
+ * The last chain/lookup does not need to have goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Last action must be 'goto'");
+ return -EINVAL;
+ }
+
+ /* Catch unsupported combinations of actions */
+ if (action_mask & BIT(FLOW_ACTION_TRAP) &&
+ action_mask & BIT(FLOW_ACTION_ACCEPT)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine pass and trap action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Add the actionset that is the default for the VCAP type */
+static int lan966x_tc_set_actionset(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ enum vcap_actionfield_set aset;
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ aset = VCAP_AFS_S1;
+ break;
+ case VCAP_TYPE_IS2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ case VCAP_TYPE_ES0:
+ aset = VCAP_AFS_VID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Do not overwrite any current actionset */
+ if (vrule->actionset == VCAP_AFS_NO_VALUE)
+ err = vcap_set_rule_set_actionset(vrule, aset);
+
+ return err;
+}
+
+static int lan966x_tc_add_rule_link_target(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int target_cid)
+{
+ int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
+ int err;
+
+ if (!link_val)
+ return 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ /* Choose IS1 specific NXT_IDX key (for chaining rules from IS1) */
+ err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ 1, ~0);
+ if (err)
+ return err;
+
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
+ link_val, ~0);
+ case VCAP_TYPE_IS2:
+ /* Add IS2 specific PAG key (for chaining rules from IS1) */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
+ link_val, ~0);
+ case VCAP_TYPE_ES0:
+ /* Add ES0 specific ISDX key (for chaining rules from IS1) */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS,
+ link_val, ~0);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int lan966x_tc_add_rule_link(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *f,
+ int to_cid)
+{
+ struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
+ int diff, err = 0;
+
+ if (!to_admin) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unknown destination chain");
+ return -EINVAL;
+ }
+
+ diff = vcap_chain_offset(vctrl, f->common.chain_index, to_cid);
+ if (!diff)
+ return 0;
+
+ /* Between IS1 and IS2 the PAG value is used */
+ if (admin->vtype == VCAP_TYPE_IS1 && to_admin->vtype == VCAP_TYPE_IS2) {
+ /* This works for IS1->IS2 */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_OVERRIDE_MASK,
+ 0xff);
+ if (err)
+ return err;
+ } else if (admin->vtype == VCAP_TYPE_IS1 &&
+ to_admin->vtype == VCAP_TYPE_ES0) {
+ /* This works for IS1->ES0 */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_ADD_VAL,
+ diff);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_REPLACE_ENA,
+ VCAP_BIT_1);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported chain destination");
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int lan966x_tc_add_rule_counter(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
+ vrule->id);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_tc_flower_add(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+ u16 l3_proto = ETH_P_ALL;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ int err, idx;
+
+ err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl,
+ port->dev, f, ingress);
+ if (err)
+ return err;
+
+ vrule = vcap_alloc_rule(port->lan966x->vcap_ctrl, port->dev,
+ f->common.chain_index, VCAP_USER_TC,
+ f->common.prio, 0);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ vrule->cookie = f->cookie;
+ err = lan966x_tc_flower_use_dissectors(f, admin, vrule, &l3_proto);
+ if (err)
+ goto out;
+
+ err = lan966x_tc_add_rule_link_target(admin, vrule,
+ f->common.chain_index);
+ if (err)
+ goto out;
+
+ frule = flow_cls_offload_flow_rule(f);
+
+ flow_action_for_each(idx, act, &frule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_TRAP:
+ if (admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Trap action not supported in this VCAP");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ err |= vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM,
+ 0);
+ err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ LAN966X_PMM_REPLACE);
+ if (err)
+ goto out;
+
+ break;
+ case FLOW_ACTION_GOTO:
+ err = lan966x_tc_set_actionset(admin, vrule);
+ if (err)
+ goto out;
+
+ err = lan966x_tc_add_rule_link(port->lan966x->vcap_ctrl,
+ admin, vrule,
+ f, act->chain_index);
+ if (err)
+ goto out;
+
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ if (admin->vtype != VCAP_TYPE_ES0) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Cannot use vlan pop on non es0");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Force untag */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PUSH_OUTER_TAG,
+ LAN966X_FORCE_UNTAGED);
+ if (err)
+ goto out;
+
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported TC action");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ err = lan966x_tc_add_rule_counter(admin, vrule);
+ if (err) {
+ vcap_set_tc_exterr(f, vrule);
+ goto out;
+ }
+
+ err = vcap_val_rule(vrule, l3_proto);
+ if (err) {
+ vcap_set_tc_exterr(f, vrule);
+ goto out;
+ }
+
+ err = vcap_add_rule(vrule);
+ if (err)
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Could not add the filter");
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int lan966x_tc_flower_del(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_control *vctrl;
+ int err = -ENOENT, rule_id;
+
+ vctrl = port->lan966x->vcap_ctrl;
+ while (true) {
+ rule_id = vcap_lookup_rule_by_cookie(vctrl, f->cookie);
+ if (rule_id <= 0)
+ break;
+
+ err = vcap_del_rule(vctrl, port->dev, rule_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Cannot delete rule");
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int lan966x_tc_flower_stats(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_counter count = {};
+ int err;
+
+ err = vcap_get_rule_count_by_cookie(port->lan966x->vcap_ctrl,
+ &count, f->cookie);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, 0x0, count.value, 0, 0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ return err;
+}
+
+int lan966x_tc_flower(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ bool ingress)
+{
+ struct vcap_admin *admin;
+
+ admin = vcap_find_admin(port->lan966x->vcap_ctrl,
+ f->common.chain_index);
+ if (!admin) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Invalid chain");
+ return -EINVAL;
+ }
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return lan966x_tc_flower_add(port, f, admin, ingress);
+ case FLOW_CLS_DESTROY:
+ return lan966x_tc_flower_del(port, f, admin);
+ case FLOW_CLS_STATS:
+ return lan966x_tc_flower_stats(port, f, admin);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
new file mode 100644
index 0000000000..20627323d6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_tc_matchall_add(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ return lan966x_police_port_add(port, &f->rule->action, act,
+ f->cookie, ingress,
+ f->common.extack);
+ case FLOW_ACTION_MIRRED:
+ return lan966x_mirror_port_add(port, act, f->cookie,
+ ingress, f->common.extack);
+ case FLOW_ACTION_GOTO:
+ return lan966x_goto_port_add(port, f->common.chain_index,
+ act->chain_index, f->cookie,
+ f->common.extack);
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_del(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ return lan966x_police_port_del(port, f->cookie,
+ f->common.extack);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ return lan966x_mirror_port_del(port, ingress,
+ f->common.extack);
+ } else {
+ return lan966x_goto_port_del(port, f->cookie, f->common.extack);
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_stats(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ lan966x_police_port_stats(port, &f->stats);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ lan966x_mirror_port_stats(port, &f->stats, ingress);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return lan966x_tc_matchall_add(port, f, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return lan966x_tc_matchall_del(port, f, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return lan966x_tc_matchall_stats(port, f, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
new file mode 100644
index 0000000000..fb6851b945
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
@@ -0,0 +1,3270 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "lan966x_vcap_ag_api.h"
+
+/* keyfields */
+static const struct vcap_field is1_normal_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 35,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 101,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 104,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 105,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 32,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 143,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 144,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 145,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 161,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is1_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 35,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_normal_ip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 53,
+ .width = 48,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 107,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 235,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 243,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 244,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_S1_IP6] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 252,
+ .width = 112,
+ },
+};
+
+static const struct vcap_field is1_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 53,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 101,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 150,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 170,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP_MSB] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 193,
+ .width = 64,
+ },
+ [VCAP_KF_L3_IP6_SIP_MSB] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 257,
+ .width = 16,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 273,
+ .width = 64,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 337,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 338,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 339,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 355,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is1_5tuple_ip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 53,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 59,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 187,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 315,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 323,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 324,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 332,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_dbl_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 54,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 70,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 72,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 6,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is1_rt_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 6,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 9,
+ .width = 48,
+ },
+ [VCAP_KF_RT_VLAN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 3,
+ },
+ [VCAP_KF_RT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 60,
+ .width = 2,
+ },
+ [VCAP_KF_RT_FRMID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_dmac_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 33,
+ .width = 48,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 16,
+ },
+ [VCAP_KF_L2_FRM_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 155,
+ .width = 4,
+ },
+ [VCAP_KF_L2_PAYLOAD0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 8,
+ },
+ [VCAP_KF_L2_PAYLOAD2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 183,
+ .width = 3,
+ },
+};
+
+static const struct vcap_field is2_mac_llc_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_L2_LLC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 139,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_mac_snap_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SNAP] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 139,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 48,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 121,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 122,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 138,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 8,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 162,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 163,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 164,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 165,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L4_1588_DOM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 8,
+ },
+ [VCAP_KF_L4_1588_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 4,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 45,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 47,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 48,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 120,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 121,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U56,
+ .offset = 129,
+ .width = 56,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 43,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 44,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 172,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is2_oam_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 43,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 91,
+ .width = 48,
+ },
+ [VCAP_KF_OAM_MEL_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 7,
+ },
+ [VCAP_KF_OAM_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 146,
+ .width = 5,
+ },
+ [VCAP_KF_OAM_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_FLAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 159,
+ .width = 8,
+ },
+ [VCAP_KF_OAM_MEPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 183,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 184,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_DETECTED] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 185,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_ip6_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 37,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 50,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 178,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 306,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 307,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 308,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 324,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 340,
+ .width = 8,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 348,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 349,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 350,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 351,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 352,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 353,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 354,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 355,
+ .width = 1,
+ },
+ [VCAP_KF_L4_1588_DOM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 356,
+ .width = 8,
+ },
+ [VCAP_KF_L4_1588_VER] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 364,
+ .width = 4,
+ },
+};
+
+static const struct vcap_field is2_ip6_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 9,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_KF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 22,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 24,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 37,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 41,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 50,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 178,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 306,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 307,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U56,
+ .offset = 315,
+ .width = 56,
+ },
+};
+
+static const struct vcap_field is2_smac_sip4_keyfield[] = {
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 4,
+ .width = 48,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 52,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_smac_sip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 4,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 8,
+ .width = 48,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 56,
+ .width = 128,
+ },
+};
+
+static const struct vcap_field es0_vid_keyfield[] = {
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 4,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 9,
+ .width = 8,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 35,
+ .width = 1,
+ },
+ [VCAP_KF_RTP_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 10,
+ },
+ [VCAP_KF_PDU_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 4,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is1_keyfield_set[] = {
+ [VCAP_KFS_NORMAL] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_5TUPLE_IP4] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_NORMAL_IP6] = {
+ .type_id = 0,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_5TUPLE_IP6] = {
+ .type_id = 2,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_DBL_VID] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_RT] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_DMAC_VID] = {
+ .type_id = 2,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_MAC_LLC] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_MAC_SNAP] = {
+ .type_id = 2,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_OAM] = {
+ .type_id = 7,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_TCP_UDP] = {
+ .type_id = 0,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_OTHER] = {
+ .type_id = 1,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_SMAC_SIP4] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_SMAC_SIP6] = {
+ .type_id = 8,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_VID] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is1_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL] = is1_normal_keyfield,
+ [VCAP_KFS_5TUPLE_IP4] = is1_5tuple_ip4_keyfield,
+ [VCAP_KFS_NORMAL_IP6] = is1_normal_ip6_keyfield,
+ [VCAP_KFS_7TUPLE] = is1_7tuple_keyfield,
+ [VCAP_KFS_5TUPLE_IP6] = is1_5tuple_ip6_keyfield,
+ [VCAP_KFS_DBL_VID] = is1_dbl_vid_keyfield,
+ [VCAP_KFS_RT] = is1_rt_keyfield,
+ [VCAP_KFS_DMAC_VID] = is1_dmac_vid_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_MAC_LLC] = is2_mac_llc_keyfield,
+ [VCAP_KFS_MAC_SNAP] = is2_mac_snap_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_OAM] = is2_oam_keyfield,
+ [VCAP_KFS_IP6_TCP_UDP] = is2_ip6_tcp_udp_keyfield,
+ [VCAP_KFS_IP6_OTHER] = is2_ip6_other_keyfield,
+ [VCAP_KFS_SMAC_SIP4] = is2_smac_sip4_keyfield,
+ [VCAP_KFS_SMAC_SIP6] = is2_smac_sip6_keyfield,
+};
+
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_VID] = es0_vid_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is1_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL] = ARRAY_SIZE(is1_normal_keyfield),
+ [VCAP_KFS_5TUPLE_IP4] = ARRAY_SIZE(is1_5tuple_ip4_keyfield),
+ [VCAP_KFS_NORMAL_IP6] = ARRAY_SIZE(is1_normal_ip6_keyfield),
+ [VCAP_KFS_7TUPLE] = ARRAY_SIZE(is1_7tuple_keyfield),
+ [VCAP_KFS_5TUPLE_IP6] = ARRAY_SIZE(is1_5tuple_ip6_keyfield),
+ [VCAP_KFS_DBL_VID] = ARRAY_SIZE(is1_dbl_vid_keyfield),
+ [VCAP_KFS_RT] = ARRAY_SIZE(is1_rt_keyfield),
+ [VCAP_KFS_DMAC_VID] = ARRAY_SIZE(is1_dmac_vid_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_MAC_LLC] = ARRAY_SIZE(is2_mac_llc_keyfield),
+ [VCAP_KFS_MAC_SNAP] = ARRAY_SIZE(is2_mac_snap_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_OAM] = ARRAY_SIZE(is2_oam_keyfield),
+ [VCAP_KFS_IP6_TCP_UDP] = ARRAY_SIZE(is2_ip6_tcp_udp_keyfield),
+ [VCAP_KFS_IP6_OTHER] = ARRAY_SIZE(is2_ip6_other_keyfield),
+ [VCAP_KFS_SMAC_SIP4] = ARRAY_SIZE(is2_smac_sip4_keyfield),
+ [VCAP_KFS_SMAC_SIP6] = ARRAY_SIZE(is2_smac_sip6_keyfield),
+};
+
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_VID] = ARRAY_SIZE(es0_vid_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is1_s1_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 9,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 8,
+ },
+ [VCAP_AF_ISDX_REPLACE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 30,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_ADD_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 8,
+ },
+ [VCAP_AF_VID_REPLACE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 72,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 2,
+ },
+ [VCAP_AF_CUSTOM_ACE_TYPE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 4,
+ },
+ [VCAP_AF_SFID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_AF_SFID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 8,
+ },
+ [VCAP_AF_SGID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_AF_SGID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 8,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 98,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 9,
+ },
+ [VCAP_AF_OAM_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_AF_MRP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_DLR_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 2,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 3,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 2,
+ },
+ [VCAP_AF_MIRROR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 9,
+ },
+ [VCAP_AF_POLICE_VCAP_ONLY] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 8,
+ },
+ [VCAP_AF_REW_OP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 16,
+ },
+ [VCAP_AF_ISDX_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 44,
+ .width = 1,
+ },
+ [VCAP_AF_ACL_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 6,
+ },
+};
+
+static const struct vcap_field is2_smac_sip_actionfield[] = {
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 3,
+ },
+ [VCAP_AF_FWD_KILL_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_AF_HOST_MATCH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es0_vid_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 15,
+ .width = 2,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 8,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is1_actionfield_set[] = {
+ [VCAP_AFS_S1] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_AFS_SMAC_SIP] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_VID] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is1_actionfield_set_map[] = {
+ [VCAP_AFS_S1] = is1_s1_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+ [VCAP_AFS_SMAC_SIP] = is2_smac_sip_actionfield,
+};
+
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_VID] = es0_vid_actionfield,
+};
+
+/* actionfield_set map size */
+static int is1_actionfield_set_map_size[] = {
+ [VCAP_AFS_S1] = ARRAY_SIZE(is1_s1_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+ [VCAP_AFS_SMAC_SIP] = ARRAY_SIZE(is2_smac_sip_actionfield),
+};
+
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_VID] = ARRAY_SIZE(es0_vid_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is1_x4_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 192,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 288,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is1_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is1_x1_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x4_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 192,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 288,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is1_keyfield_set_typegroups[] = {
+ [4] = is1_x4_keyfield_set_typegroups,
+ [2] = is1_x2_keyfield_set_typegroups,
+ [1] = is1_x1_keyfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [4] = is2_x4_keyfield_set_typegroups,
+ [2] = is2_x2_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup is1_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 31,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is1_actionfield_set_typegroups[] = {
+ [1] = is1_x1_actionfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [2] = is2_x2_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [5] = NULL,
+};
+
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_5TUPLE_IP4] = "VCAP_KFS_5TUPLE_IP4",
+ [VCAP_KFS_5TUPLE_IP6] = "VCAP_KFS_5TUPLE_IP6",
+ [VCAP_KFS_7TUPLE] = "VCAP_KFS_7TUPLE",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_DBL_VID] = "VCAP_KFS_DBL_VID",
+ [VCAP_KFS_DMAC_VID] = "VCAP_KFS_DMAC_VID",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_NORMAL_IP6] = "VCAP_KFS_NORMAL_IP6",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_RT] = "VCAP_KFS_RT",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+ [VCAP_KFS_VID] = "VCAP_KFS_VID",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_S1] = "VCAP_AFS_S1",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+ [VCAP_AFS_VID] = "VCAP_AFS_VID",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021CB_R_TAGGED_IS] = "8021CB_R_TAGGED_IS",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = "8021Q_VLAN_DBL_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_PAYLOAD_S1_IP6] = "IP_PAYLOAD_S1_IP6",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MAC] = "L2_MAC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_DIP_MSB] = "L3_IP6_DIP_MSB",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP6_SIP_MSB] = "L3_IP6_SIP_MSB",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_INDEX] = "LOOKUP_INDEX",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PDU_TYPE] = "PDU_TYPE",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_RTP_ID] = "RTP_ID",
+ [VCAP_KF_RT_FRMID] = "RT_FRMID",
+ [VCAP_KF_RT_TYPE] = "RT_TYPE",
+ [VCAP_KF_RT_VLAN_IDX] = "RT_VLAN_IDX",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_CUSTOM_ACE_TYPE_ENA] = "CUSTOM_ACE_TYPE_ENA",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DLR_SEL] = "DLR_SEL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ADD_VAL] = "ISDX_ADD_VAL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_REPLACE_ENA] = "ISDX_REPLACE_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_MRP_SEL] = "MRP_SEL",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_OAM_SEL] = "OAM_SEL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SFID_ENA] = "SFID_ENA",
+ [VCAP_AF_SFID_VAL] = "SFID_VAL",
+ [VCAP_AF_SGID_ENA] = "SGID_ENA",
+ [VCAP_AF_SGID_VAL] = "SGID_VAL",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_REPLACE_ENA] = "VID_REPLACE_ENA",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+ [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT",
+ [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA",
+};
+
+/* VCAPs */
+const struct vcap_info lan966x_vcaps[] = {
+ [VCAP_TYPE_IS1] = {
+ .name = "is1",
+ .rows = 192,
+ .sw_count = 4,
+ .sw_width = 96,
+ .sticky_width = 32,
+ .act_width = 123,
+ .default_cnt = 0,
+ .require_cnt_dis = 1,
+ .version = 1,
+ .keyfield_set = is1_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is1_keyfield_set),
+ .actionfield_set = is1_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is1_actionfield_set),
+ .keyfield_set_map = is1_keyfield_set_map,
+ .keyfield_set_map_size = is1_keyfield_set_map_size,
+ .actionfield_set_map = is1_actionfield_set_map,
+ .actionfield_set_map_size = is1_actionfield_set_map_size,
+ .keyfield_set_typegroups = is1_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is1_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 64,
+ .sw_count = 4,
+ .sw_width = 96,
+ .sticky_width = 32,
+ .act_width = 31,
+ .default_cnt = 11,
+ .require_cnt_dis = 1,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 256,
+ .sw_count = 1,
+ .sw_width = 96,
+ .sticky_width = 1,
+ .act_width = 65,
+ .default_cnt = 8,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics lan966x_vcap_stats = {
+ .name = "lan966x",
+ .count = 3,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h
new file mode 100644
index 0000000000..0d8bbee73b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef __LAN966X_VCAP_AG_API_H__
+#define __LAN966X_VCAP_AG_API_H__
+
+#include "vcap_api.h"
+
+extern const struct vcap_info lan966x_vcaps[];
+extern const struct vcap_statistics lan966x_vcap_stats;
+
+#endif /* __LAN966X_VCAP_AG_API_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
new file mode 100644
index 0000000000..ac525ff150
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_CFG_S1_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n other: ");
+ switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
+ case VCAP_IS1_PS_OTHER_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_OTHER_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_OTHER_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_OTHER_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n ipv4: ");
+ switch (ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV4_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_IPV4_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_IPV4_5TUPLE_IP4:
+ out->prf(out->dst, "5tuple_ipv4");
+ break;
+ case VCAP_IS1_PS_IPV4_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_IPV4_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV6_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_IPV6_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP4:
+ out->prf(out->dst, "5tuple_ip4");
+ break;
+ case VCAP_IS1_PS_IPV6_NORMAL_IP6:
+ out->prf(out->dst, "normal_ip6");
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP6:
+ out->prf(out->dst, "5tuple_ip6");
+ break;
+ case VCAP_IS1_PS_IPV6_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_IPV6_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n rt: ");
+ switch (ANA_VCAP_S1_CFG_KEY_RT_CFG_GET(val)) {
+ case VCAP_IS1_PS_RT_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_RT_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_RT_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_RT_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ case VCAP_IS1_PS_RT_FOLLOW_OTHER:
+ out->prf(out->dst, "follow_other");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+static void lan966x_vcap_is2_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_S2_CFG_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n snap: ");
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_llc");
+ else
+ out->prf(out->dst, "mac_snap");
+
+ out->prf(out->dst, "\n oam: ");
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_oam");
+
+ out->prf(out->dst, "\n arp: ");
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_arp");
+
+ out->prf(out->dst, "\n ipv4_other: ");
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ip4_other");
+
+ out->prf(out->dst, "\n ipv4_tcp_udp: ");
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ipv4_tcp_udp");
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << l)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ out->prf(out->dst, "ipv6_tcp_udp ipv6_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ out->prf(out->dst, "ipv6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ out->prf(out->dst, "ipv4_tcp_udp ipv4_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+static void lan966x_vcap_es0_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, REW_PORT_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (REW_PORT_CFG_ES0_EN_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n");
+}
+
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = lan966x->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "%s:\n", vcap->name);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS2:
+ lan966x_vcap_is2_port_keys(port, admin, out);
+ break;
+ case VCAP_TYPE_IS1:
+ lan966x_vcap_is1_port_keys(port, admin, out);
+ break;
+ case VCAP_TYPE_ES0:
+ lan966x_vcap_es0_port_keys(port, admin, out);
+ break;
+ default:
+ out->prf(out->dst, " no info\n");
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
new file mode 100644
index 0000000000..a4414f63c9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
+
+#define STREAMSIZE (64 * 4)
+
+#define LAN966X_IS1_LOOKUPS 3
+#define LAN966X_IS2_LOOKUPS 2
+#define LAN966X_ES0_LOOKUPS 1
+
+#define LAN966X_STAT_ESDX_GRN_BYTES 0x300
+#define LAN966X_STAT_ESDX_GRN_PKTS 0x301
+#define LAN966X_STAT_ESDX_YEL_BYTES 0x302
+#define LAN966X_STAT_ESDX_YEL_PKTS 0x303
+
+static struct lan966x_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int tgt_inst; /* hardware instance number */
+ int lookups; /* number of lookups in this vcap type */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses */
+ bool ingress; /* is vcap in the ingress path */
+} lan966x_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .tgt_inst = 0,
+ .lookups = LAN966X_ES0_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_ES0_L0,
+ .last_cid = LAN966X_VCAP_CID_ES0_MAX,
+ .count = 64,
+ },
+ {
+ .vtype = VCAP_TYPE_IS1, /* IS1-0 */
+ .tgt_inst = 1,
+ .lookups = LAN966X_IS1_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_IS1_L0,
+ .last_cid = LAN966X_VCAP_CID_IS1_MAX,
+ .count = 768,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .tgt_inst = 2,
+ .lookups = LAN966X_IS2_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_IS2_L0,
+ .last_cid = LAN966X_VCAP_CID_IS2_MAX,
+ .count = 256,
+ .ingress = true,
+ },
+};
+
+struct lan966x_vcap_cmd_cb {
+ struct lan966x *lan966x;
+ u32 instance;
+};
+
+static u32 lan966x_vcap_read_update_ctrl(const struct lan966x_vcap_cmd_cb *cb)
+{
+ return lan_rd(cb->lan966x, VCAP_UPDATE_CTRL(cb->instance));
+}
+
+static void lan966x_vcap_wait_update(struct lan966x *lan966x, int instance)
+{
+ const struct lan966x_vcap_cmd_cb cb = { .lan966x = lan966x,
+ .instance = instance };
+ u32 val;
+
+ readx_poll_timeout(lan966x_vcap_read_update_ctrl, &cb, val,
+ (val & VCAP_UPDATE_CTRL_UPDATE_SHOT) == 0, 10,
+ 100000);
+}
+
+static void __lan966x_vcap_range_init(struct lan966x *lan966x,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count)
+{
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(0) |
+ VCAP_MV_CFG_MV_SIZE_SET(count - 1),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT_SET(1),
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static int lan966x_vcap_is1_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= LAN966X_VCAP_CID_IS1_L1 &&
+ cid < LAN966X_VCAP_CID_IS1_L2)
+ lookup = 1;
+ else if (cid >= LAN966X_VCAP_CID_IS1_L2 &&
+ cid < LAN966X_VCAP_CID_IS1_MAX)
+ lookup = 2;
+
+ return lookup;
+}
+
+static int lan966x_vcap_is2_cid_to_lookup(int cid)
+{
+ if (cid >= LAN966X_VCAP_CID_IS2_L1 &&
+ cid < LAN966X_VCAP_CID_IS2_MAX)
+ return 1;
+
+ return 0;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int
+lan966x_vcap_is1_get_port_keysets(struct net_device *ndev, int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ switch (ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV4_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_IPV4_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP4);
+ break;
+ case VCAP_IS1_PS_IPV4_NORMAL:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV6_NORMAL:
+ case VCAP_IS1_PS_IPV6_NORMAL_IP6:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_IP6);
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP6:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP6);
+ break;
+ case VCAP_IS1_PS_IPV6_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP4);
+ break;
+ case VCAP_IS1_PS_IPV6_DMAC_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_DMAC_VID);
+ break;
+ }
+ }
+
+ switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
+ case VCAP_IS1_PS_OTHER_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_OTHER_NORMAL:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool found = false;
+ u32 val;
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL)
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_SNAP) {
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_LLC);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_SNAP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_CFM) {
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_OAM);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << lookup))
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ else
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+
+ found = true;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << lookup)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_OTHER);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_TCP_UDP);
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ }
+
+ found = true;
+ }
+
+ if (!found)
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+
+ return 0;
+}
+
+static enum vcap_keyfield_set
+lan966x_vcap_validate_keyset(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule,
+ struct vcap_keyset_list *kslist,
+ u16 l3_proto)
+{
+ struct vcap_keyset_list keysetlist = {};
+ enum vcap_keyfield_set keysets[10] = {};
+ int lookup;
+ int err;
+
+ if (!kslist || kslist->cnt == 0)
+ return VCAP_KFS_NO_VALUE;
+
+ keysetlist.max = ARRAY_SIZE(keysets);
+ keysetlist.keysets = keysets;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ lookup = lan966x_vcap_is1_cid_to_lookup(rule->vcap_chain_id);
+ err = lan966x_vcap_is1_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = lan966x_vcap_is2_cid_to_lookup(rule->vcap_chain_id);
+ err = lan966x_vcap_is2_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ return kslist->keysets[0];
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ return VCAP_KFS_NO_VALUE;
+ }
+
+ if (err)
+ return VCAP_KFS_NO_VALUE;
+
+ /* Check if there is a match and return the match */
+ for (int i = 0; i < kslist->cnt; ++i)
+ for (int j = 0; j < keysetlist.cnt; ++j)
+ if (kslist->keysets[i] == keysets[j])
+ return kslist->keysets[i];
+
+ return VCAP_KFS_NO_VALUE;
+}
+
+static bool lan966x_vcap_is2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= LAN966X_VCAP_CID_IS2_L0 &&
+ rule->vcap_chain_id < LAN966X_VCAP_CID_IS2_L1);
+}
+
+static void lan966x_vcap_is1_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ u32 value, mask;
+ u32 lookup;
+
+ if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask))
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
+ ~BIT(port->chip_port));
+
+ lookup = lan966x_vcap_is1_cid_to_lookup(rule->vcap_chain_id);
+ vcap_rule_add_key_u32(rule, VCAP_KF_LOOKUP_INDEX, lookup, 0x3);
+}
+
+static void lan966x_vcap_is2_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ u32 value, mask;
+
+ if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask))
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
+ ~BIT(port->chip_port));
+
+ if (lan966x_vcap_is2_is_first_chain(rule))
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void lan966x_vcap_es0_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_NO,
+ port->chip_port, GENMASK(4, 0));
+}
+
+static void lan966x_vcap_add_default_fields(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ lan966x_vcap_is1_add_default_fields(port, admin, rule);
+ break;
+ case VCAP_TYPE_IS2:
+ lan966x_vcap_is2_add_default_fields(port, admin, rule);
+ break;
+ case VCAP_TYPE_ES0:
+ lan966x_vcap_es0_add_default_fields(port, admin, rule);
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ break;
+ }
+}
+
+static void lan966x_vcap_cache_erase(struct vcap_admin *admin)
+{
+ memset(admin->cache.keystream, 0, STREAMSIZE);
+ memset(admin->cache.maskstream, 0, STREAMSIZE);
+ memset(admin->cache.actionstream, 0, STREAMSIZE);
+ memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
+}
+
+/* The ESDX counter is only used/incremented if the frame has been classified
+ * with an ISDX > 0 (e.g by a rule in IS0). This is not mentioned in the
+ * datasheet.
+ */
+static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
+ struct vcap_admin *admin, u32 id)
+{
+ u32 counter;
+
+ id = id & 0xff; /* counter limit */
+ mutex_lock(&lan966x->stats_lock);
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
+ counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
+ lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&lan966x->stats_lock);
+ if (counter)
+ admin->cache.counter = counter;
+}
+
+static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
+ struct vcap_admin *admin, u32 id)
+{
+ id = id & 0xff; /* counter limit */
+
+ mutex_lock(&lan966x->stats_lock);
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
+ lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
+ lan_wr(admin->cache.counter, lan966x,
+ SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
+ lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
+ lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static void lan966x_vcap_cache_write(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 *keystr, *mskstr, *actstr;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (int i = 0; i < count; ++i) {
+ lan_wr(keystr[i] & mskstr[i], lan966x,
+ VCAP_ENTRY_DAT(admin->tgt_inst, i));
+ lan_wr(~mskstr[i], lan966x,
+ VCAP_MASK_DAT(admin->tgt_inst, i));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (int i = 0; i < count; ++i)
+ lan_wr(actstr[i], lan966x,
+ VCAP_ACTION_DAT(admin->tgt_inst, i));
+ break;
+ case VCAP_SEL_COUNTER:
+ admin->cache.sticky = admin->cache.counter > 0;
+ lan_wr(admin->cache.counter, lan966x,
+ VCAP_CNT_DAT(admin->tgt_inst, 0));
+
+ if (admin->vtype == VCAP_TYPE_ES0)
+ lan966x_es0_write_esdx_counter(lan966x, admin, start);
+ break;
+ default:
+ break;
+ }
+}
+
+static void lan966x_vcap_cache_read(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int instance = admin->tgt_inst;
+ u32 *keystr, *mskstr, *actstr;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (int i = 0; i < count; ++i) {
+ keystr[i] =
+ lan_rd(lan966x, VCAP_ENTRY_DAT(instance, i));
+ mskstr[i] =
+ ~lan_rd(lan966x, VCAP_MASK_DAT(instance, i));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (int i = 0; i < count; ++i)
+ actstr[i] =
+ lan_rd(lan966x, VCAP_ACTION_DAT(instance, i));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ lan_rd(lan966x, VCAP_CNT_DAT(instance, 0));
+ admin->cache.sticky = admin->cache.counter > 0;
+
+ if (admin->vtype == VCAP_TYPE_ES0)
+ lan966x_es0_read_esdx_counter(lan966x, admin, start);
+ }
+}
+
+static void lan966x_vcap_range_init(struct net_device *dev,
+ struct vcap_admin *admin,
+ u32 addr,
+ u32 count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ __lan966x_vcap_range_init(lan966x, admin, addr, count);
+}
+
+static void lan966x_vcap_update(struct net_device *dev,
+ struct vcap_admin *admin,
+ enum vcap_command cmd,
+ enum vcap_selection sel,
+ u32 addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool clear;
+
+ clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(0) |
+ VCAP_MV_CFG_MV_SIZE_SET(0),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT,
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static void lan966x_vcap_move(struct net_device *dev,
+ struct vcap_admin *admin,
+ u32 addr, int offset, int count)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
+
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
+
+ lan_wr(VCAP_MV_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_MV_CFG_MV_SIZE_SET(mv_size),
+ lan966x, VCAP_MV_CFG(admin->tgt_inst));
+
+ lan_wr(VCAP_UPDATE_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_UPDATE_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_UPDATE_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_UPDATE_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_UPDATE_CTRL_UPDATE_SHOT,
+ lan966x, VCAP_UPDATE_CTRL(admin->tgt_inst));
+
+ lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
+}
+
+static struct vcap_operations lan966x_vcap_ops = {
+ .validate_keyset = lan966x_vcap_validate_keyset,
+ .add_default_fields = lan966x_vcap_add_default_fields,
+ .cache_erase = lan966x_vcap_cache_erase,
+ .cache_write = lan966x_vcap_cache_write,
+ .cache_read = lan966x_vcap_cache_read,
+ .init = lan966x_vcap_range_init,
+ .update = lan966x_vcap_update,
+ .move = lan966x_vcap_move,
+ .port_info = lan966x_vcap_port_info,
+};
+
+static void lan966x_vcap_admin_free(struct vcap_admin *admin)
+{
+ if (!admin)
+ return;
+
+ kfree(admin->cache.keystream);
+ kfree(admin->cache.maskstream);
+ kfree(admin->cache.actionstream);
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+}
+
+static struct vcap_admin *
+lan966x_vcap_admin_alloc(struct lan966x *lan966x, struct vcap_control *ctrl,
+ const struct lan966x_vcap_inst *cfg)
+{
+ struct vcap_admin *admin;
+
+ admin = kzalloc(sizeof(*admin), GFP_KERNEL);
+ if (!admin)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&admin->lock);
+ INIT_LIST_HEAD(&admin->list);
+ INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+
+ admin->vtype = cfg->vtype;
+ admin->vinst = 0;
+ admin->ingress = cfg->ingress;
+ admin->w32be = true;
+ admin->tgt_inst = cfg->tgt_inst;
+
+ admin->lookups = cfg->lookups;
+ admin->lookups_per_instance = cfg->lookups;
+
+ admin->first_cid = cfg->first_cid;
+ admin->last_cid = cfg->last_cid;
+
+ admin->cache.keystream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.maskstream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ admin->cache.actionstream = kzalloc(STREAMSIZE, GFP_KERNEL);
+ if (!admin->cache.keystream ||
+ !admin->cache.maskstream ||
+ !admin->cache.actionstream) {
+ lan966x_vcap_admin_free(admin);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return admin;
+}
+
+static void lan966x_vcap_block_init(struct lan966x *lan966x,
+ struct vcap_admin *admin,
+ struct lan966x_vcap_inst *cfg)
+{
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+
+ lan_wr(VCAP_CORE_IDX_CORE_IDX_SET(0),
+ lan966x, VCAP_CORE_IDX(admin->tgt_inst));
+ lan_wr(VCAP_CORE_MAP_CORE_MAP_SET(1),
+ lan966x, VCAP_CORE_MAP(admin->tgt_inst));
+
+ __lan966x_vcap_range_init(lan966x, admin, admin->first_valid_addr,
+ admin->last_valid_addr -
+ admin->first_valid_addr);
+}
+
+static void lan966x_vcap_port_key_deselection(struct lan966x *lan966x,
+ struct vcap_admin *admin)
+{
+ u32 val;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ val = ANA_VCAP_S1_CFG_KEY_IP6_CFG_SET(VCAP_IS1_PS_IPV6_5TUPLE_IP6) |
+ ANA_VCAP_S1_CFG_KEY_IP4_CFG_SET(VCAP_IS1_PS_IPV4_5TUPLE_IP4) |
+ ANA_VCAP_S1_CFG_KEY_OTHER_CFG_SET(VCAP_IS1_PS_OTHER_NORMAL);
+
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ for (int l = 0; l < LAN966X_IS1_LOOKUPS; ++l)
+ lan_wr(val, lan966x, ANA_VCAP_S1_CFG(p, l));
+
+ lan_rmw(ANA_VCAP_CFG_S1_ENA_SET(true),
+ ANA_VCAP_CFG_S1_ENA, lan966x,
+ ANA_VCAP_CFG(p));
+ }
+
+ break;
+ case VCAP_TYPE_IS2:
+ for (int p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(0, lan966x, ANA_VCAP_S2_CFG(p));
+
+ break;
+ case VCAP_TYPE_ES0:
+ for (int p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_rmw(REW_PORT_CFG_ES0_EN_SET(false),
+ REW_PORT_CFG_ES0_EN, lan966x,
+ REW_PORT_CFG(p));
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ break;
+ }
+}
+
+int lan966x_vcap_init(struct lan966x *lan966x)
+{
+ struct lan966x_vcap_inst *cfg;
+ struct vcap_control *ctrl;
+ struct vcap_admin *admin;
+ struct dentry *dir;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->vcaps = lan966x_vcaps;
+ ctrl->stats = &lan966x_vcap_stats;
+ ctrl->ops = &lan966x_vcap_ops;
+
+ INIT_LIST_HEAD(&ctrl->list);
+ for (int i = 0; i < ARRAY_SIZE(lan966x_vcap_inst_cfg); ++i) {
+ cfg = &lan966x_vcap_inst_cfg[i];
+
+ admin = lan966x_vcap_admin_alloc(lan966x, ctrl, cfg);
+ if (IS_ERR(admin))
+ return PTR_ERR(admin);
+
+ lan966x_vcap_block_init(lan966x, admin, cfg);
+ lan966x_vcap_port_key_deselection(lan966x, admin);
+
+ list_add_tail(&admin->list, &ctrl->list);
+ }
+
+ dir = vcap_debugfs(lan966x->dev, lan966x->debugfs_root, ctrl);
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (lan966x->ports[p]) {
+ vcap_port_debugfs(lan966x->dev, dir, ctrl,
+ lan966x->ports[p]->dev);
+
+ lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true),
+ ANA_VCAP_S2_CFG_ENA, lan966x,
+ ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port));
+
+ lan_rmw(ANA_VCAP_CFG_S1_ENA_SET(true),
+ ANA_VCAP_CFG_S1_ENA, lan966x,
+ ANA_VCAP_CFG(lan966x->ports[p]->chip_port));
+
+ lan_rmw(REW_PORT_CFG_ES0_EN_SET(true),
+ REW_PORT_CFG_ES0_EN, lan966x,
+ REW_PORT_CFG(lan966x->ports[p]->chip_port));
+ }
+ }
+
+ /* Statistics: Use ESDX from ES0 if hit, otherwise no counting */
+ lan_rmw(REW_STAT_CFG_STAT_MODE_SET(1),
+ REW_STAT_CFG_STAT_MODE, lan966x,
+ REW_STAT_CFG);
+
+ lan966x->vcap_ctrl = ctrl;
+
+ return 0;
+}
+
+void lan966x_vcap_deinit(struct lan966x *lan966x)
+{
+ struct vcap_admin *admin, *admin_next;
+ struct vcap_control *ctrl;
+
+ ctrl = lan966x->vcap_ctrl;
+ if (!ctrl)
+ return;
+
+ list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) {
+ lan966x_vcap_port_key_deselection(lan966x, admin);
+ vcap_del_rules(ctrl, admin);
+ list_del(&admin->list);
+ lan966x_vcap_admin_free(admin);
+ }
+
+ kfree(ctrl);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
new file mode 100644
index 0000000000..3c44660128
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define VLANACCESS_CMD_IDLE 0
+#define VLANACCESS_CMD_READ 1
+#define VLANACCESS_CMD_WRITE 2
+#define VLANACCESS_CMD_INIT 3
+
+static int lan966x_vlan_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_VLANACCESS);
+}
+
+static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout(lan966x_vlan_get_status,
+ lan966x, val,
+ (val & ANA_VLANACCESS_VLAN_TBL_CMD) ==
+ VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid)
+{
+ u16 mask = lan966x->vlan_mask[vid];
+ bool cpu_dis;
+
+ cpu_dis = !(mask & BIT(CPU_PORT));
+
+ /* Set flags and the VID to configure */
+ lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) |
+ ANA_VLANTIDX_V_INDEX_SET(vid),
+ ANA_VLANTIDX_VLAN_PGID_CPU_DIS |
+ ANA_VLANTIDX_V_INDEX,
+ lan966x, ANA_VLANTIDX);
+
+ /* Set the vlan port members mask */
+ lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask),
+ ANA_VLAN_PORT_MASK_VLAN_PORT_MASK,
+ lan966x, ANA_VLAN_PORT_MASK);
+
+ /* Issue a write command */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+
+ if (lan966x_vlan_wait_for_completion(lan966x))
+ dev_err(lan966x->dev, "Vlan set mask failed\n");
+}
+
+static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] |= BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] &= ~BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT));
+}
+
+static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] |= BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __set_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __clear_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return test_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->bridge_mask & BIT(port->chip_port)))
+ return HOST_PVID;
+
+ return port->vlan_aware ? port->pvid : UNAWARE_PVID;
+}
+
+int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(lan966x->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ return 0;
+}
+
+static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid)
+{
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ if (port->vid == vid)
+ port->vid = 0;
+}
+
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware)
+{
+ port->vlan_aware = vlan_aware;
+}
+
+void lan966x_vlan_port_apply(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u16 pvid;
+ u32 val;
+
+ pvid = lan966x_vlan_port_get_pvid(port);
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to classify for untagged frames (may be zero) */
+ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
+ if (port->vlan_aware)
+ val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1);
+
+ lan_rmw(val,
+ ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_VLAN_CFG_VLAN_POP_CNT,
+ lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware),
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA,
+ lan966x, DEV_MAC_TAGS_CFG(port->chip_port));
+
+ /* Drop frames with multicast source address */
+ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
+ if (port->vlan_aware && !pvid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1);
+
+ lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CFG_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CFG_TAG_CFG_SET(3);
+ }
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Set default VLAN and tag type to 8021Q */
+ lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_TPID |
+ REW_PORT_VLAN_CFG_PORT_VID,
+ lan966x, REW_PORT_VLAN_CFG(port->chip_port));
+}
+
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* If the CPU(br) is already part of the vlan then add the fdb
+ * entries in MAC table to copy the frames to the CPU(br).
+ * If the CPU(br) is not part of the vlan then it would
+ * just drop the frames.
+ */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_port_set_vid(port, vid, pvid, untagged);
+ lan966x_vlan_port_add_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+}
+
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_vlan_port_remove_vid(port, vid);
+ lan966x_vlan_port_del_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+
+ /* In case there are no other ports in vlan then remove the CPU from
+ * that vlan but still keep it in the mask because it may be needed
+ * again then another port gets added in that vlan
+ */
+ if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+ }
+}
+
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Add an entry in the MAC table for the CPU
+ * Add the CPU part of the vlan only if there is another port in that
+ * vlan otherwise all the broadcast frames in that vlan will go to CPU
+ * even if none of the ports are in the vlan and then the CPU will just
+ * need to discard these frames. It is required to store this
+ * information so when a front port is added then it would add also the
+ * CPU port.
+ */
+ if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+}
+
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Remove the CPU part of the vlan */
+ lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid);
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+}
+
+void lan966x_vlan_init(struct lan966x *lan966x)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANS */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+ lan966x_vlan_wait_for_completion(lan966x);
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ lan966x->vlan_mask[vid] = 0;
+ lan966x_vlan_set_mask(lan966x, vid);
+ }
+
+ /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */
+ lan966x->vlan_mask[HOST_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, HOST_PVID);
+
+ lan966x->vlan_mask[UNAWARE_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, UNAWARE_PVID);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID);
+
+ /* Configure the CPU port to be vlan aware */
+ lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) |
+ ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1),
+ lan966x, ANA_VLAN_CFG(CPU_PORT));
+
+ /* Set vlan ingress filter mask to all ports */
+ lan_wr(GENMASK(lan966x->num_phys_ports, 0),
+ lan966x, ANA_VLANMASK);
+
+ for (port = 0; port < lan966x->num_phys_ports; port++) {
+ lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port));
+ lan_wr(0, lan966x, REW_TAG_CFG(port));
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
new file mode 100644
index 0000000000..9ee61db869
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+
+#include "lan966x_main.h"
+
+static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct bpf_prog *old_prog;
+ bool old_xdp, new_xdp;
+ int err;
+
+ if (!lan966x->fdma) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Allow to set xdp only when using fdma");
+ return -EOPNOTSUPP;
+ }
+
+ old_xdp = lan966x_xdp_present(lan966x);
+ old_prog = xchg(&port->xdp_prog, xdp->prog);
+ new_xdp = lan966x_xdp_present(lan966x);
+
+ if (old_xdp == new_xdp)
+ goto out;
+
+ err = lan966x_fdma_reload_page_pool(lan966x);
+ if (err) {
+ xchg(&port->xdp_prog, old_prog);
+ return err;
+ }
+
+out:
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return lan966x_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int nxmit = 0;
+
+ for (int i = 0; i < n; ++i) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = lan966x_fdma_xmit_xdpf(port, xdpf, 0);
+ if (err)
+ break;
+
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
+{
+ struct bpf_prog *xdp_prog = port->xdp_prog;
+ struct lan966x *lan966x = port->lan966x;
+ struct xdp_buff xdp;
+ u32 act;
+
+ xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order,
+ &port->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page),
+ IFH_LEN_BYTES + XDP_PACKET_HEADROOM,
+ data_len - IFH_LEN_BYTES, false);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return FDMA_PASS;
+ case XDP_TX:
+ return lan966x_fdma_xmit_xdpf(port, page,
+ data_len - IFH_LEN_BYTES) ?
+ FDMA_DROP : FDMA_TX;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(port->dev, &xdp, xdp_prog))
+ return FDMA_DROP;
+
+ return FDMA_REDIRECT;
+ default:
+ bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(port->dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ return FDMA_DROP;
+ }
+}
+
+bool lan966x_xdp_present(struct lan966x *lan966x)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ if (lan966x_xdp_port_present(lan966x->ports[p]))
+ return true;
+ }
+
+ return false;
+}
+
+int lan966x_xdp_port_init(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ return xdp_rxq_info_reg(&port->xdp_rxq, port->dev, 0,
+ lan966x->napi.napi_id);
+}
+
+void lan966x_xdp_port_deinit(struct lan966x_port *port)
+{
+ if (xdp_rxq_info_is_reg(&port->xdp_rxq))
+ xdp_rxq_info_unreg(&port->xdp_rxq);
+}