summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlxsw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h1281
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c3443
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h674
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c2218
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h110
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c506
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h222
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c1464
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c941
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1601
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c1063
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c769
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h553
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c760
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2068
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h416
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h12937
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c5331
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1493
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c254
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c330
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c1124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c522
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1601
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c318
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c355
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c1867
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h312
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c1786
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c306
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c740
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c1308
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c2023
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1876
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c295
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c796
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c676
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c478
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c1075
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h95
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c615
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c1173
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c468
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c1726
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h263
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c2337
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c10617
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h166
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c1741
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h129
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c4015
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c1969
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h144
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h49
80 files changed, 85504 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 000000000..a510bf2cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+ tristate "Mellanox Technologies Switch ASICs support"
+ select NET_DEVLINK
+ select MLXFW
+ select AUXILIARY_BUS
+ help
+ This driver supports Mellanox Technologies Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_core.
+
+config MLXSW_CORE_HWMON
+ bool "HWMON support for Mellanox Technologies Switch ASICs"
+ depends on MLXSW_CORE && HWMON
+ depends on !(MLXSW_CORE=y && HWMON=m)
+ default y
+ help
+ Say Y here if you want to expose HWMON interface on mlxsw devices.
+
+config MLXSW_CORE_THERMAL
+ bool "Thermal zone support for Mellanox Technologies Switch ASICs"
+ depends on MLXSW_CORE && THERMAL
+ default y
+ help
+ Say Y here if you want to automatically control fans speed according
+ ambient temperature reported by ASIC.
+
+config MLXSW_PCI
+ tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+ depends on PCI && HAS_IOMEM && MLXSW_CORE
+ default m
+ help
+ This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_pci.
+
+config MLXSW_I2C
+ tristate "I2C bus implementation for Mellanox Technologies Switch ASICs"
+ depends on I2C && MLXSW_CORE
+ default m
+ help
+ This is I2C bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_i2c.
+
+config MLXSW_SPECTRUM
+ tristate "Mellanox Technologies Spectrum family support"
+ depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
+ depends on PSAMPLE || PSAMPLE=n
+ depends on BRIDGE || BRIDGE=n
+ depends on IPV6 || IPV6=n
+ depends on NET_IPGRE || NET_IPGRE=n
+ depends on IPV6_GRE || IPV6_GRE=n
+ depends on VXLAN || VXLAN=n
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select GENERIC_ALLOCATOR
+ select PARMAN
+ select OBJAGG
+ select NET_PTP_CLASSIFY if PTP_1588_CLOCK
+ default m
+ help
+ This driver supports Mellanox Technologies
+ Spectrum/Spectrum-2/Spectrum-3/Spectrum-4 Ethernet Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_spectrum.
+
+config MLXSW_SPECTRUM_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on MLXSW_SPECTRUM && DCB
+ default y
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+config MLXSW_MINIMAL
+ tristate "Mellanox Technologies minimal I2C support"
+ depends on MLXSW_CORE && MLXSW_I2C
+ default m
+ help
+ This driver supports I2C access for Mellanox Technologies Switch
+ ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_minimal.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 000000000..3ca9fce75
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
+mlxsw_core-objs := core.o core_acl_flex_keys.o \
+ core_acl_flex_actions.o core_env.o \
+ core_linecards.o core_linecard_dev.o
+mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
+mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
+obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
+mlxsw_pci-objs := pci.o
+obj-$(CONFIG_MLXSW_I2C) += mlxsw_i2c.o
+mlxsw_i2c-objs := i2c.o
+obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
+mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
+ spectrum_switchdev.o spectrum_router.o \
+ spectrum1_kvdl.o spectrum2_kvdl.o \
+ spectrum_kvdl.o \
+ spectrum_acl_tcam.o spectrum_acl_ctcam.o \
+ spectrum_acl_atcam.o spectrum_acl_erp.o \
+ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
+ spectrum_acl_bloom_filter.o spectrum_acl.o \
+ spectrum_flow.o spectrum_matchall.o \
+ spectrum_flower.o spectrum_cnt.o \
+ spectrum_fid.o spectrum_ipip.o \
+ spectrum_acl_flex_actions.o \
+ spectrum_acl_flex_keys.o \
+ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
+ spectrum_mr_tcam.o spectrum_mr.o \
+ spectrum_qdisc.o spectrum_span.o \
+ spectrum_nve.o spectrum_nve_vxlan.o \
+ spectrum_dpipe.o spectrum_trap.o \
+ spectrum_ethtool.o spectrum_policer.o \
+ spectrum_pgt.o
+mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
+mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
+obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
+mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 000000000..09bef04b1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1281 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE 4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+ return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+ kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+ memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct, bool reset_ok,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod, char *in_mbox,
+ size_t in_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ false, in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod,
+ bool out_mbox_direct,
+ char *out_mbox, size_t out_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+ out_mbox_direct, false, NULL, 0,
+ out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ false, NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+ MLXSW_CMD_OPCODE_QUERY_FW = 0x004,
+ MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006,
+ MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003,
+ MLXSW_CMD_OPCODE_MAP_FA = 0xFFF,
+ MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE,
+ MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100,
+ MLXSW_CMD_OPCODE_ACCESS_REG = 0x040,
+ MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201,
+ MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202,
+ MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E,
+ MLXSW_CMD_OPCODE_QUERY_DQ = 0x022,
+ MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016,
+ MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017,
+ MLXSW_CMD_OPCODE_QUERY_CQ = 0x018,
+ MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
+ MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
+ MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+ MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+ switch (opcode) {
+ case MLXSW_CMD_OPCODE_QUERY_FW:
+ return "QUERY_FW";
+ case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+ return "QUERY_BOARDINFO";
+ case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+ return "QUERY_AQ_CAP";
+ case MLXSW_CMD_OPCODE_MAP_FA:
+ return "MAP_FA";
+ case MLXSW_CMD_OPCODE_UNMAP_FA:
+ return "UNMAP_FA";
+ case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+ return "CONFIG_PROFILE";
+ case MLXSW_CMD_OPCODE_ACCESS_REG:
+ return "ACCESS_REG";
+ case MLXSW_CMD_OPCODE_SW2HW_DQ:
+ return "SW2HW_DQ";
+ case MLXSW_CMD_OPCODE_HW2SW_DQ:
+ return "HW2SW_DQ";
+ case MLXSW_CMD_OPCODE_2ERR_DQ:
+ return "2ERR_DQ";
+ case MLXSW_CMD_OPCODE_QUERY_DQ:
+ return "QUERY_DQ";
+ case MLXSW_CMD_OPCODE_SW2HW_CQ:
+ return "SW2HW_CQ";
+ case MLXSW_CMD_OPCODE_HW2SW_CQ:
+ return "HW2SW_CQ";
+ case MLXSW_CMD_OPCODE_QUERY_CQ:
+ return "QUERY_CQ";
+ case MLXSW_CMD_OPCODE_SW2HW_EQ:
+ return "SW2HW_EQ";
+ case MLXSW_CMD_OPCODE_HW2SW_EQ:
+ return "HW2SW_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_EQ:
+ return "QUERY_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_RESOURCES:
+ return "QUERY_RESOURCES";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum mlxsw_cmd_status {
+ /* Command execution succeeded. */
+ MLXSW_CMD_STATUS_OK = 0x00,
+ /* Internal error (e.g. bus error) occurred while processing command. */
+ MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported. */
+ MLXSW_CMD_STATUS_BAD_OP = 0x02,
+ /* Parameter not supported, parameter out of range. */
+ MLXSW_CMD_STATUS_BAD_PARAM = 0x03,
+ /* System was not enabled or bad system state. */
+ MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocated resource, or resource in
+ * inappropriate ownership.
+ */
+ MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command. */
+ MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits. */
+ MLXSW_CMD_STATUS_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership. */
+ MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09,
+ /* Index out of range (might be beyond table size or attempt to
+ * access a reserved resource).
+ */
+ MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
+ /* NVMEM checksum/CRC failed. */
+ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
+ /* Device is currently running reset */
+ MLXSW_CMD_STATUS_RUNNING_RESET = 0x26,
+ /* Bad management packet (silently discarded). */
+ MLXSW_CMD_STATUS_BAD_PKT = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_CMD_STATUS_OK:
+ return "OK";
+ case MLXSW_CMD_STATUS_INTERNAL_ERR:
+ return "INTERNAL_ERR";
+ case MLXSW_CMD_STATUS_BAD_OP:
+ return "BAD_OP";
+ case MLXSW_CMD_STATUS_BAD_PARAM:
+ return "BAD_PARAM";
+ case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+ return "BAD_SYS_STATE";
+ case MLXSW_CMD_STATUS_BAD_RESOURCE:
+ return "BAD_RESOURCE";
+ case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+ return "RESOURCE_BUSY";
+ case MLXSW_CMD_STATUS_EXCEED_LIM:
+ return "EXCEED_LIM";
+ case MLXSW_CMD_STATUS_BAD_RES_STATE:
+ return "BAD_RES_STATE";
+ case MLXSW_CMD_STATUS_BAD_INDEX:
+ return "BAD_INDEX";
+ case MLXSW_CMD_STATUS_BAD_NVMEM:
+ return "BAD_NVMEM";
+ case MLXSW_CMD_STATUS_RUNNING_RESET:
+ return "RUNNING_RESET";
+ case MLXSW_CMD_STATUS_BAD_PKT:
+ return "BAD_PKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* cmd_mbox_query_fw_free_running_clock_offset
+ * The offset of the free running clock page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
+
+/* cmd_mbox_query_fw_fr_rn_clk_bar
+ * PCI base address register (BAR) of the free running clock page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
+
+/* cmd_mbox_query_fw_utc_sec_offset
+ * The offset of the UTC_Sec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_sec_offset, 0x70, 0, 64);
+
+/* cmd_mbox_query_fw_utc_sec_bar
+ * PCI base address register (BAR) of the UTC_Sec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_sec_bar, 0x78, 30, 2);
+
+/* cmd_mbox_query_fw_utc_nsec_offset
+ * The offset of the UTC_nSec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_nsec_offset, 0x80, 0, 64);
+
+/* cmd_mbox_query_fw_utc_nsec_bar
+ * PCI base address register (BAR) of the UTC_nSec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_nsec_bar, 0x88, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
+ * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 vpm_entries_count)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+ 0, vpm_entries_count,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* QUERY_RESOURCES - Query chip resources
+ * --------------------------------------
+ * OpMod == 0 (N/A) , INMmod is index
+ * ----------------------------------
+ * The QUERY_RESOURCES command retrieves information related to chip resources
+ * by resource ID. Every command returns 32 entries. INmod is being use as base.
+ * for example, index 1 will return entries 32-63. When the tables end and there
+ * are no more sources in the table, will return resource id 0xFFF to indicate
+ * it.
+ */
+
+#define MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID 0xffff
+#define MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES 100
+#define MLXSW_CMD_QUERY_RESOURCES_PER_QUERY 32
+
+static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, int index)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES,
+ 0, index, false, out_mbox,
+ MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_resource_id
+ * The resource id. 0xFFFF indicates table's end.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false);
+
+/* cmd_mbox_query_resource_data
+ * The resource
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data,
+ 0x00, 0, 40, 0x8, 0, false);
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+ char *in_mbox)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+ 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_flood_mode
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_set_ubridge
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
+
+/* cmd_mbox_config_set_kvd_linear_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
+
+/* cmd_mbox_config_set_kvd_hash_single_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
+
+/* cmd_mbox_config_set_kvd_hash_double_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
+
+/* cmd_mbox_config_set_cqe_version
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
+
+/* cmd_mbox_config_set_cqe_time_stamp_type
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ * Reserved when Spectrum-1/2/3, supported from Spectrum-4 and above.
+ * For Spectrum-4, firmware sets 128 for values between 1-128 and 256 for values
+ * between 129-256.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of single-entry flooding tables. Different flooding tables
+ * can be associated with different packet types.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+enum mlxsw_cmd_mbox_config_profile_flood_mode {
+ /* Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset
+ * tables. max_fid_flood_tables indicates the number of per-FID tables.
+ * Reserved when unified bridge model is used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
+ /* Controlled flood tables. Reserved when legacy bridge model is
+ * used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+};
+
+/* cmd_mbox_config_profile_flood_mode
+ * Flooding mode to use.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
+
+/* cmd_mbox_config_profile_max_fid_offset_flood_tables
+ * Maximum number of FID-offset flooding tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ max_fid_offset_flood_tables, 0x34, 24, 4);
+
+/* cmd_mbox_config_profile_fid_offset_flood_table_size
+ * The size (number of entries) of each FID-offset flood table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ fid_offset_flood_table_size, 0x34, 0, 16);
+
+/* cmd_mbox_config_profile_max_fid_flood_tables
+ * Maximum number of per-FID flooding tables.
+ *
+ * Note: This flooding tables cover special FIDs only (vFIDs), starting at
+ * FID value 4K and higher.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
+
+/* cmd_mbox_config_profile_fid_flood_table_size
+ * The size (number of entries) of each per-FID table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_ubridge
+ * Unified Bridge
+ * 0 - non unified bridge
+ * 1 - unified bridge
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
+
+/* cmd_mbox_config_kvd_linear_size
+ * KVD Linear Size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_single_size
+ * KVD Hash single-entries size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be greater or equal to cap_min_kvd_hash_single_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_double_size
+ * KVD Hash double-entries size (units of single-size entries)
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+ 0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+ 0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+ 0x60, 0, 8, 0x08, 0x00, false);
+
+enum mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type {
+ /* uSec - 1.024uSec (default). Only bits 15:0 are valid. */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_USEC,
+ /* FRC - Free Running Clock, units of 1nSec.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_FRC,
+ /* UTC. time_stamp[37:30] = Sec, time_stamp[29:0] = nSec.
+ * Reserved when SwitchX/2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* cmd_mbox_config_profile_cqe_time_stamp_type
+ * CQE time_stamp_type for non-mirror-packets.
+ * Configured if set_cqe_time_stamp_type is set.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_time_stamp_type, 0xB0, 8, 2);
+
+/* cmd_mbox_config_profile_cqe_version
+ * CQE version:
+ * 0: CQE version is 0
+ * 1: CQE version is either 1 or 2
+ * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+ bool reset_ok,
+ char *in_mbox, char *out_mbox)
+{
+ return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+ 0, 0, false, reset_ok,
+ in_mbox, MLXSW_CMD_MBOX_SIZE,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+ opcode_mod, dq_number,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+ MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+ MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp {
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE,
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE,
+};
+
+/* cmd_mbox_sw2hw_dq_sdq_lp
+ * SDQ local Processing
+ * 0: local processing by wqe.lp
+ * 1: local processing (ignoring wqe.lp)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+ 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
+};
+
+/* cmd_mbox_sw2hw_cq_cqe_ver
+ * CQE Version.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+ u32 cq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+ 0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+ 0, cq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+ 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+ u32 eq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+ 0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+ 0, eq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 000000000..e2a985ec2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,3443 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <asm/byteorder.h>
+#include <net/devlink.h>
+#include <trace/events/devlink.h>
+
+#include "core.h"
+#include "core_env.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+#include "resources.h"
+#include "../mlxfw/mlxfw.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct workqueue_struct *mlxsw_wq;
+static struct workqueue_struct *mlxsw_owq;
+
+struct mlxsw_core_port {
+ struct devlink_port devlink_port;
+ void *port_driver_priv;
+ u16 local_port;
+ struct mlxsw_linecard *linecard;
+};
+
+void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+ return mlxsw_core_port->port_driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
+
+static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
+{
+ return mlxsw_core_port->port_driver_priv != NULL;
+}
+
+struct mlxsw_core {
+ struct mlxsw_driver *driver;
+ const struct mlxsw_bus *bus;
+ void *bus_priv;
+ const struct mlxsw_bus_info *bus_info;
+ struct workqueue_struct *emad_wq;
+ struct list_head rx_listener_list;
+ struct list_head event_listener_list;
+ struct list_head irq_event_handler_list;
+ struct mutex irq_event_handler_lock; /* Locks access to handlers list */
+ struct {
+ atomic64_t tid;
+ struct list_head trans_list;
+ spinlock_t trans_list_lock; /* protects trans_list writes */
+ bool use_emad;
+ bool enable_string_tlv;
+ } emad;
+ struct {
+ u16 *mapping; /* lag_id+port_index to local_port mapping */
+ } lag;
+ struct mlxsw_res res;
+ struct mlxsw_hwmon *hwmon;
+ struct mlxsw_thermal *thermal;
+ struct mlxsw_linecards *linecards;
+ struct mlxsw_core_port *ports;
+ unsigned int max_ports;
+ atomic_t active_ports_count;
+ bool fw_flash_in_progress;
+ struct {
+ struct devlink_health_reporter *fw_fatal;
+ } health;
+ struct mlxsw_env *env;
+ unsigned long driver_priv[];
+ /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->linecards;
+}
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ mlxsw_core->linecards = linecards;
+}
+
+#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
+
+static u64 mlxsw_ports_occ_get(void *priv)
+{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ return atomic_read(&mlxsw_core->active_ports_count);
+}
+
+static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params ports_num_params;
+ u32 max_ports;
+
+ max_ports = mlxsw_core->max_ports - 1;
+ devlink_resource_size_params_init(&ports_num_params, max_ports,
+ max_ports, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
+}
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ int err;
+
+ /* Switch ports are numbered from 1 to queried value */
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
+ mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_SYSTEM_PORT) + 1;
+ else
+ mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
+
+ mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
+ sizeof(struct mlxsw_core_port), GFP_KERNEL);
+ if (!mlxsw_core->ports)
+ return -ENOMEM;
+
+ if (!reload) {
+ err = mlxsw_core_resources_ports_register(mlxsw_core);
+ if (err)
+ goto err_resources_ports_register;
+ }
+ atomic_set(&mlxsw_core->active_ports_count, 0);
+ devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
+
+ return 0;
+
+err_resources_ports_register:
+ kfree(mlxsw_core->ports);
+ return err;
+}
+
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+
+ devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ if (!reload)
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
+
+ kfree(mlxsw_core->ports);
+}
+
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->max_ports;
+}
+EXPORT_SYMBOL(mlxsw_core_max_ports);
+
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (driver->profile->used_max_lag) {
+ *p_max_lag = driver->profile->max_lag;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG))
+ return -EIO;
+
+ *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_max_lag);
+
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_priv);
+
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev)
+{
+ return rev->minor > req_rev->minor ||
+ (rev->minor == req_rev->minor &&
+ rev->subminor >= req_rev->subminor);
+}
+EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
+
+struct mlxsw_rx_listener_item {
+ struct list_head list;
+ struct mlxsw_rx_listener rxl;
+ void *priv;
+ bool enabled;
+};
+
+struct mlxsw_event_listener_item {
+ struct list_head list;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_event_listener el;
+ void *priv;
+};
+
+static const u8 mlxsw_core_trap_groups[] = {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
+};
+
+static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
+ int i;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
+ mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_string_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x2 (string TLV).
+ */
+MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
+
+/* emad_string_tlv_len
+ * Length of the string TLV in u32.
+ */
+MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
+
+#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
+
+/* emad_string_tlv_string
+ * String provided by the device's firmware in case of erroneous register access
+ */
+MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+ switch (type) {
+ case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+ return "query";
+ case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+ return "write";
+ }
+ BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+ mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+ mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+ const struct mlxsw_reg_info *reg,
+ char *payload)
+{
+ mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+ mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+ memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_string_tlv(char *string_tlv)
+{
+ mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
+ mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+ const struct mlxsw_reg_info *reg,
+ enum mlxsw_core_reg_access_type type,
+ u64 tid)
+{
+ mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+ mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+ mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+ mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+ if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+ else
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+ mlxsw_emad_op_tlv_class_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+ char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+ mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+ mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+ mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+ mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+ mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+ skb_reset_mac_header(skb);
+
+ return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ u64 tid, bool enable_string_tlv)
+{
+ char *buf;
+
+ buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_end_tlv(buf);
+
+ buf = skb_push(skb, reg->len + sizeof(u32));
+ mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+ if (enable_string_tlv) {
+ buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_string_tlv(buf);
+ }
+
+ buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
+
+ mlxsw_emad_construct_eth_hdr(skb);
+}
+
+struct mlxsw_emad_tlv_offsets {
+ u16 op_tlv;
+ u16 string_tlv;
+ u16 reg_tlv;
+};
+
+static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
+{
+ u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
+
+ return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
+}
+
+static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
+ offsets->string_tlv = 0;
+ offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+
+ /* If string TLV is present, it must come after the operation TLV. */
+ if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
+ offsets->string_tlv = offsets->reg_tlv;
+ offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ }
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->op_tlv));
+}
+
+static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ if (!offsets->string_tlv)
+ return NULL;
+
+ return ((char *) (skb->data + offsets->string_tlv));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+ struct mlxsw_emad_tlv_offsets *offsets =
+ (struct mlxsw_emad_tlv_offsets *) skb->cb;
+
+ return ((char *) (skb->data + offsets->reg_tlv));
+}
+
+static char *mlxsw_emad_reg_payload(const char *reg_tlv)
+{
+ return ((char *) (reg_tlv + sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
+{
+ return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
+}
+
+static int mlxsw_emad_process_status(char *op_tlv,
+ enum mlxsw_emad_op_tlv_status *p_status)
+{
+ *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
+
+ switch (*p_status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return 0;
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ return -EAGAIN;
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ default:
+ return -EIO;
+ }
+}
+
+static int
+mlxsw_emad_process_status_skb(struct sk_buff *skb,
+ enum mlxsw_emad_op_tlv_status *p_status)
+{
+ return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
+}
+
+struct mlxsw_reg_trans {
+ struct list_head list;
+ struct list_head bulk_list;
+ struct mlxsw_core *core;
+ struct sk_buff *tx_skb;
+ struct mlxsw_tx_info tx_info;
+ struct delayed_work timeout_dw;
+ unsigned int retries;
+ u64 tid;
+ struct completion completion;
+ atomic_t active;
+ mlxsw_reg_trans_cb_t *cb;
+ unsigned long cb_priv;
+ const struct mlxsw_reg_info *reg;
+ enum mlxsw_core_reg_access_type type;
+ int err;
+ char *emad_err_string;
+ enum mlxsw_emad_op_tlv_status emad_status;
+ struct rcu_head rcu;
+};
+
+static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
+ struct mlxsw_reg_trans *trans)
+{
+ char *string_tlv;
+ char *string;
+
+ string_tlv = mlxsw_emad_string_tlv(skb);
+ if (!string_tlv)
+ return;
+
+ trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
+ GFP_ATOMIC);
+ if (!trans->emad_err_string)
+ return;
+
+ string = mlxsw_emad_string_tlv_string_data(string_tlv);
+ strscpy(trans->emad_err_string, string,
+ MLXSW_EMAD_STRING_TLV_STRING_LEN);
+}
+
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
+{
+ unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
+ if (trans->core->fw_flash_in_progress)
+ timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
+ timeout << trans->retries);
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = skb_clone(trans->tx_skb, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
+ skb->data + mlxsw_core->driver->txhdr_len,
+ skb->len - mlxsw_core->driver->txhdr_len);
+
+ atomic_set(&trans->active, 1);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ if (err) {
+ dev_kfree_skb(skb);
+ return err;
+ }
+ mlxsw_emad_trans_timeout_schedule(trans);
+ return 0;
+}
+
+static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+
+ dev_kfree_skb(trans->tx_skb);
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ trans->err = err;
+ complete(&trans->completion);
+}
+
+static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans)
+{
+ int err;
+
+ if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
+ trans->retries++;
+ err = mlxsw_emad_transmit(trans->core, trans);
+ if (err == 0)
+ return;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+ } else {
+ err = -EIO;
+ }
+ mlxsw_emad_trans_finish(trans, err);
+}
+
+static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
+{
+ struct mlxsw_reg_trans *trans = container_of(work,
+ struct mlxsw_reg_trans,
+ timeout_dw.work);
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ mlxsw_emad_transmit_retry(trans->core, trans);
+}
+
+static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans,
+ struct sk_buff *skb)
+{
+ int err;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
+ if (err == -EAGAIN) {
+ mlxsw_emad_transmit_retry(mlxsw_core, trans);
+ } else {
+ if (err == 0) {
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+ if (trans->cb)
+ trans->cb(mlxsw_core,
+ mlxsw_emad_reg_payload(reg_tlv),
+ trans->reg->len, trans->cb_priv);
+ } else {
+ mlxsw_emad_process_string_tlv(skb, trans);
+ }
+ mlxsw_emad_trans_finish(trans, err);
+ }
+}
+
+/* called with rcu read lock held */
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
+ void *priv)
+{
+ struct mlxsw_core *mlxsw_core = priv;
+ struct mlxsw_reg_trans *trans;
+
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
+ skb->data, skb->len);
+
+ mlxsw_emad_tlv_parse(skb);
+
+ if (!mlxsw_emad_is_resp(skb))
+ goto free_skb;
+
+ list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
+ if (mlxsw_emad_get_tid(skb) == trans->tid) {
+ mlxsw_emad_process_response(mlxsw_core, trans, skb);
+ break;
+ }
+ }
+
+free_skb:
+ dev_kfree_skb(skb);
+}
+
+static const struct mlxsw_listener mlxsw_emad_rx_listener =
+ MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
+ EMAD, DISCARD);
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+ struct workqueue_struct *emad_wq;
+ u64 tid;
+ int err;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
+ if (!emad_wq)
+ return -ENOMEM;
+ mlxsw_core->emad_wq = emad_wq;
+
+ /* Set the upper 32 bits of the transaction ID field to a random
+ * number. This allows us to discard EMADs addressed to other
+ * devices.
+ */
+ get_random_bytes(&tid, 4);
+ tid <<= 32;
+ atomic64_set(&mlxsw_core->emad.tid, tid);
+
+ INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
+ spin_lock_init(&mlxsw_core->emad.trans_list_lock);
+
+ err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ if (err)
+ goto err_trap_register;
+
+ mlxsw_core->emad.use_emad = true;
+
+ return 0;
+
+err_trap_register:
+ destroy_workqueue(mlxsw_core->emad_wq);
+ return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
+
+ mlxsw_core->emad.use_emad = false;
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ destroy_workqueue(mlxsw_core->emad_wq);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+ u16 reg_len, bool enable_string_tlv)
+{
+ struct sk_buff *skb;
+ u16 emad_len;
+
+ emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+ (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+ sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (enable_string_tlv)
+ emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+ return NULL;
+
+ skb = netdev_alloc_skb(NULL, emad_len);
+ if (!skb)
+ return NULL;
+ memset(skb->data, 0, emad_len);
+ skb_reserve(skb, emad_len);
+
+ return skb;
+}
+
+static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_reg_trans *trans,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv, u64 tid)
+{
+ bool enable_string_tlv;
+ struct sk_buff *skb;
+ int err;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ /* Since this can be changed during emad_reg_access, read it once and
+ * use the value all the way.
+ */
+ enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
+ if (!skb)
+ return -ENOMEM;
+
+ list_add_tail(&trans->bulk_list, bulk_list);
+ trans->core = mlxsw_core;
+ trans->tx_skb = skb;
+ trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->tx_info.is_emad = true;
+ INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
+ trans->tid = tid;
+ init_completion(&trans->completion);
+ trans->cb = cb;
+ trans->cb_priv = cb_priv;
+ trans->reg = reg;
+ trans->type = type;
+
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
+ enable_string_tlv);
+ mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
+
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ err = mlxsw_emad_transmit(mlxsw_core, trans);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del(&trans->bulk_list);
+ dev_kfree_skb(trans->tx_skb);
+ return err;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_del(&mlxsw_driver->list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+ if (strcmp(mlxsw_driver->kind, kind) == 0)
+ return mlxsw_driver;
+ }
+ return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return mlxsw_driver;
+}
+
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
+struct mlxsw_core_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+};
+
+static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
+ .component_query = mlxsw_core_fw_component_query,
+ .fsm_lock = mlxsw_core_fw_fsm_lock,
+ .fsm_component_update = mlxsw_core_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_core_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_core_fw_fsm_activate,
+ .fsm_query_state = mlxsw_core_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_core_fw_fsm_cancel,
+ .fsm_release = mlxsw_core_fw_fsm_release,
+};
+
+static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core_fw_info mlxsw_core_fw_info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_core_fw_mlxsw_dev_ops,
+ .psid = mlxsw_core->bus_info->psid,
+ .psid_size = strlen(mlxsw_core->bus_info->psid),
+ .devlink = priv_to_devlink(mlxsw_core),
+ },
+ .mlxsw_core = mlxsw_core
+ };
+
+ return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev,
+ firmware, extack);
+}
+
+static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_fw_rev *req_rev,
+ const char *filename)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
+ union devlink_param_value value;
+ const struct firmware *firmware;
+ int err;
+
+ /* Don't check if driver does not require it */
+ if (!req_rev || !filename)
+ return 0;
+
+ /* Don't check if devlink 'fw_load_policy' param is 'flash' */
+ err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
+ if (err)
+ return err;
+ if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
+ return 0;
+
+ /* Validate driver & FW are compatible */
+ if (rev->major != req_rev->major) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, req_rev->major);
+ return -EINVAL;
+ }
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ return 0;
+
+ dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
+ dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
+
+ err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
+ return err;
+ }
+
+ err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
+ release_firmware(firmware);
+ if (err)
+ dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
+}
+
+static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack);
+}
+
+static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
+ val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
+ NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlxsw_core_devlink_param_fw_load_policy_validate),
+};
+
+static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ if (err)
+ return err;
+
+ value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
+ devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
+ return 0;
+}
+
+static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+}
+
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
+static int mlxsw_devlink_port_split(struct devlink *devlink,
+ struct devlink_port *port,
+ unsigned int count,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_core->driver->port_split)
+ return -EOPNOTSUPP;
+ return mlxsw_core->driver->port_split(mlxsw_core,
+ mlxsw_core_port->local_port,
+ count, extack);
+}
+
+static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
+ struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_core->driver->port_unsplit)
+ return -EOPNOTSUPP;
+ return mlxsw_core->driver->port_unsplit(mlxsw_core,
+ mlxsw_core_port->local_port,
+ extack);
+}
+
+static int
+mlxsw_devlink_sb_pool_get(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
+ pool_index, pool_info);
+}
+
+static int
+mlxsw_devlink_sb_pool_set(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
+ pool_index, size, threshold_type,
+ extack);
+}
+
+static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_threshold);
+}
+
+static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_set ||
+ !mlxsw_core_port_check(mlxsw_core_port))
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
+ pool_index, threshold, extack);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ p_pool_index, p_threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_set ||
+ !mlxsw_core_port_check(mlxsw_core_port))
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ pool_index, threshold, extack);
+}
+
+static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_snapshot)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
+}
+
+static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_max_clear)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
+}
+
+static int
+mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_port_pool_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
+ !mlxsw_core_port_check(mlxsw_core_port))
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
+ sb_index, tc_index,
+ pool_type, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+ u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
+ char mgir_pl[MLXSW_REG_MGIR_LEN];
+ char buf[32];
+ int err;
+
+ err = devlink_info_driver_name_put(req,
+ mlxsw_core->bus_info->device_kind);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
+ &fw_minor, &fw_sub_minor);
+
+ sprintf(buf, "%X", hw_rev);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ return err;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ fw_info_psid);
+ if (err)
+ return err;
+
+ sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
+ err = devlink_info_version_running_put(req, "fw.version", buf);
+ if (err)
+ return err;
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static int
+mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
+ bool netns_change, enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
+ return -EOPNOTSUPP;
+
+ mlxsw_core_bus_device_unregister(mlxsw_core, true);
+ return 0;
+}
+
+static int
+mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ int err;
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink, extack);
+ return err;
+}
+
+static int mlxsw_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
+}
+
+static int mlxsw_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
+}
+
+static void mlxsw_devlink_trap_fini(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_fini)
+ return;
+ mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
+}
+
+static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_action_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
+}
+
+static int
+mlxsw_devlink_trap_group_init(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_init(mlxsw_core, group);
+}
+
+static int
+mlxsw_devlink_trap_group_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
+}
+
+static int
+mlxsw_devlink_trap_policer_init(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
+}
+
+static void
+mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_fini)
+ return;
+ mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
+}
+
+static int
+mlxsw_devlink_trap_policer_set(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
+ extack);
+}
+
+static int
+mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_counter_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
+ p_drops);
+}
+
+static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = mlxsw_devlink_core_bus_device_reload_down,
+ .reload_up = mlxsw_devlink_core_bus_device_reload_up,
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+ .sb_pool_get = mlxsw_devlink_sb_pool_get,
+ .sb_pool_set = mlxsw_devlink_sb_pool_set,
+ .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
+ .info_get = mlxsw_devlink_info_get,
+ .flash_update = mlxsw_devlink_flash_update,
+ .trap_init = mlxsw_devlink_trap_init,
+ .trap_fini = mlxsw_devlink_trap_fini,
+ .trap_action_set = mlxsw_devlink_trap_action_set,
+ .trap_group_init = mlxsw_devlink_trap_group_init,
+ .trap_group_set = mlxsw_devlink_trap_group_set,
+ .trap_policer_init = mlxsw_devlink_trap_policer_init,
+ .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
+ .trap_policer_set = mlxsw_devlink_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
+};
+
+static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ err = mlxsw_core_fw_params_register(mlxsw_core);
+ if (err)
+ return err;
+
+ if (mlxsw_core->driver->params_register) {
+ err = mlxsw_core->driver->params_register(mlxsw_core);
+ if (err)
+ goto err_params_register;
+ }
+ return 0;
+
+err_params_register:
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ return err;
+}
+
+static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ if (mlxsw_core->driver->params_register)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
+}
+
+struct mlxsw_core_health_event {
+ struct mlxsw_core *mlxsw_core;
+ char mfde_pl[MLXSW_REG_MFDE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_core_health_event_work(struct work_struct *work)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_core_health_event, work);
+ mlxsw_core = event->mlxsw_core;
+ devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
+ event->mfde_pl);
+ kfree(event);
+}
+
+static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
+ char *mfde_pl, void *priv)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
+ INIT_WORK(&event->work, mlxsw_core_health_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_core_health_listener =
+ MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
+
+static int
+mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+ val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
+ return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
+ err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ char *mfde_pl = priv_ctx;
+ char *val_str;
+ u8 event_id;
+ u32 val;
+ int err;
+
+ if (!priv_ctx)
+ /* User-triggered dumps are not possible */
+ return -EOPNOTSUPP;
+
+ val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
+ if (err)
+ return err;
+
+ event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
+ if (err)
+ return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ val_str = "CR space timeout";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ val_str = "KVD insertion machine stopped";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_TEST:
+ val_str = "Test";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ val_str = "FW assert";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ val_str = "Fatal cause";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_severity_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
+ if (err)
+ return err;
+ switch (val) {
+ case MLXSW_REG_MFDE_SEVERITY_FATL:
+ val_str = "Fatal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_NRML:
+ val_str = "Normal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_INTR:
+ val_str = "Debug";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_method_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_METHOD_QUERY:
+ val_str = "query";
+ break;
+ case MLXSW_REG_MFDE_METHOD_WRITE:
+ val_str = "write";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_long_process_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_command_type_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
+ val_str = "mad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
+ val_str = "emad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
+ val_str = "cmdif";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
+ if (err)
+ return err;
+
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
+ fmsg);
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static const struct devlink_health_reporter_ops
+mlxsw_core_health_fw_fatal_ops = {
+ .name = "fw_fatal",
+ .dump = mlxsw_core_health_fw_fatal_dump,
+ .test = mlxsw_core_health_fw_fatal_test,
+};
+
+static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
+ bool enable)
+{
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_health_reporter *fw_fatal;
+ int err;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
+ 0, mlxsw_core);
+ if (IS_ERR(fw_fatal)) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
+ return PTR_ERR(fw_fatal);
+ }
+ mlxsw_core->health.fw_fatal = fw_fatal;
+
+ err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ if (err)
+ goto err_trap_register;
+
+ err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
+ if (err)
+ goto err_fw_fatal_config;
+
+ return 0;
+
+err_fw_fatal_config:
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+err_trap_register:
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ return err;
+}
+
+static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
+{
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
+
+ mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ /* Make sure there is no more event work scheduled */
+ mlxsw_core_flush_owq();
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+}
+
+static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
+{
+ INIT_LIST_HEAD(&mlxsw_core->irq_event_handler_list);
+ mutex_init(&mlxsw_core->irq_event_handler_lock);
+}
+
+static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core)
+{
+ mutex_destroy(&mlxsw_core->irq_event_handler_lock);
+ WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list));
+}
+
+static int
+__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ const char *device_kind = mlxsw_bus_info->device_kind;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_driver *mlxsw_driver;
+ size_t alloc_size;
+ u16 max_lag;
+ int err;
+
+ mlxsw_driver = mlxsw_core_driver_get(device_kind);
+ if (!mlxsw_driver)
+ return -EINVAL;
+
+ if (!reload) {
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size,
+ mlxsw_bus_info->dev);
+ if (!devlink) {
+ err = -ENOMEM;
+ goto err_devlink_alloc;
+ }
+ devl_lock(devlink);
+ }
+
+ mlxsw_core = devlink_priv(devlink);
+ INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+ INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+ mlxsw_core->driver = mlxsw_driver;
+ mlxsw_core->bus = mlxsw_bus;
+ mlxsw_core->bus_priv = bus_priv;
+ mlxsw_core->bus_info = mlxsw_bus_info;
+ mlxsw_core_irq_event_handler_init(mlxsw_core);
+
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->res);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_driver->resources_register && !reload) {
+ err = mlxsw_driver->resources_register(mlxsw_core);
+ if (err)
+ goto err_register_resources;
+ }
+
+ err = mlxsw_ports_init(mlxsw_core, reload);
+ if (err)
+ goto err_ports_init;
+
+ err = mlxsw_core_max_lag(mlxsw_core, &max_lag);
+ if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag *
+ MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
+ mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_core->lag.mapping) {
+ err = -ENOMEM;
+ goto err_alloc_lag_mapping;
+ }
+ }
+
+ err = mlxsw_core_trap_groups_set(mlxsw_core);
+ if (err)
+ goto err_trap_groups_set;
+
+ err = mlxsw_emad_init(mlxsw_core);
+ if (err)
+ goto err_emad_init;
+
+ if (!reload) {
+ err = mlxsw_core_params_register(mlxsw_core);
+ if (err)
+ goto err_register_params;
+ }
+
+ err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
+ mlxsw_driver->fw_filename);
+ if (err)
+ goto err_fw_rev_validate;
+
+ err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_linecards_init;
+
+ err = mlxsw_core_health_init(mlxsw_core);
+ if (err)
+ goto err_health_init;
+
+ err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
+ if (err)
+ goto err_hwmon_init;
+
+ err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
+ &mlxsw_core->thermal);
+ if (err)
+ goto err_thermal_init;
+
+ err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
+ if (err)
+ goto err_env_init;
+
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
+ if (err)
+ goto err_driver_init;
+ }
+
+ if (!reload) {
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
+ devlink_register(devlink);
+ }
+ return 0;
+
+err_driver_init:
+ mlxsw_env_fini(mlxsw_core->env);
+err_env_init:
+ mlxsw_thermal_fini(mlxsw_core->thermal);
+err_thermal_init:
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
+err_hwmon_init:
+ mlxsw_core_health_fini(mlxsw_core);
+err_health_init:
+ mlxsw_linecards_fini(mlxsw_core);
+err_linecards_init:
+err_fw_rev_validate:
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
+err_register_params:
+ mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+err_trap_groups_set:
+ kfree(mlxsw_core->lag.mapping);
+err_alloc_lag_mapping:
+ mlxsw_ports_fini(mlxsw_core, reload);
+err_ports_init:
+ if (!reload)
+ devl_resources_unregister(devlink);
+err_register_resources:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
+ if (!reload) {
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ }
+err_devlink_alloc:
+ return err;
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ bool called_again = false;
+ int err;
+
+again:
+ err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
+ bus_priv, reload,
+ devlink, extack);
+ /* -EAGAIN is returned in case the FW was updated. FW needs
+ * a reset, so lets try to call __mlxsw_core_bus_device_register()
+ * again.
+ */
+ if (err == -EAGAIN && !called_again) {
+ called_again = true;
+ goto again;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ bool reload)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+
+ if (!reload) {
+ devlink_unregister(devlink);
+ devl_lock(devlink);
+ }
+
+ if (devlink_is_reload_failed(devlink)) {
+ if (!reload)
+ /* Only the parts that were not de-initialized in the
+ * failed reload attempt need to be de-initialized.
+ */
+ goto reload_fail_deinit;
+ else
+ return;
+ }
+
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
+ mlxsw_env_fini(mlxsw_core->env);
+ mlxsw_thermal_fini(mlxsw_core->thermal);
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
+ mlxsw_core_health_fini(mlxsw_core);
+ mlxsw_linecards_fini(mlxsw_core);
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
+ mlxsw_emad_fini(mlxsw_core);
+ kfree(mlxsw_core->lag.mapping);
+ mlxsw_ports_fini(mlxsw_core, reload);
+ if (!reload)
+ devl_resources_unregister(devlink);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
+ if (!reload) {
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ }
+
+ return;
+
+reload_fail_deinit:
+ mlxsw_core_params_unregister(mlxsw_core);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
+ devlink_free(devlink);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u16 local_port)
+{
+ if (mlxsw_core->driver->ptp_transmitted)
+ mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
+ local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+ const struct mlxsw_rx_listener *rxl_b)
+{
+ return (rxl_a->func == rxl_b->func &&
+ rxl_a->local_port == rxl_b->local_port &&
+ rxl_a->trap_id == rxl_b->trap_id &&
+ rxl_a->mirror_reason == rxl_b->mirror_reason);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
+ return rxl_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv, bool enabled)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
+ if (rxl_item)
+ return -EEXIST;
+ rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+ if (!rxl_item)
+ return -ENOMEM;
+ rxl_item->rxl = *rxl;
+ rxl_item->priv = priv;
+ rxl_item->enabled = enabled;
+
+ list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
+ if (!rxl_item)
+ return;
+ list_del_rcu(&rxl_item->list);
+ synchronize_rcu();
+ kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void
+mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ bool enabled)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
+ if (WARN_ON(!rxl_item))
+ return;
+ rxl_item->enabled = enabled;
+}
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_reg_info reg;
+ char *payload;
+ char *reg_tlv;
+ char *op_tlv;
+
+ mlxsw_core = event_listener_item->mlxsw_core;
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
+ skb->data, skb->len);
+
+ mlxsw_emad_tlv_parse(skb);
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+ reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+ reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+ payload = mlxsw_emad_reg_payload(reg_tlv);
+ event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+ dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+ const struct mlxsw_event_listener *el_b)
+{
+ return (el_a->func == el_b->func &&
+ el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el)
+{
+ struct mlxsw_event_listener_item *el_item;
+
+ list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+ if (__is_event_listener_equal(&el_item->el, el))
+ return el_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ int err;
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el);
+ if (el_item)
+ return -EEXIST;
+ el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+ if (!el_item)
+ return -ENOMEM;
+ el_item->mlxsw_core = mlxsw_core;
+ el_item->el = *el;
+ el_item->priv = priv;
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
+ if (err)
+ goto err_rx_listener_register;
+
+ /* No reason to save item if we did not manage to register an RX
+ * listener for it.
+ */
+ list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+ return 0;
+
+err_rx_listener_register:
+ kfree(el_item);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el)
+{
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el);
+ if (!el_item)
+ return;
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
+ list_del(&el_item->list);
+ kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv, bool enabled)
+{
+ if (listener->is_event) {
+ WARN_ON(!enabled);
+ return mlxsw_core_event_listener_register(mlxsw_core,
+ &listener->event_listener,
+ priv);
+ } else {
+ return mlxsw_core_rx_listener_register(mlxsw_core,
+ &listener->rx_listener,
+ priv, enabled);
+ }
+}
+
+static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv)
+{
+ if (listener->is_event)
+ mlxsw_core_event_listener_unregister(mlxsw_core,
+ &listener->event_listener);
+ else
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &listener->rx_listener);
+}
+
+int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener, void *priv)
+{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+ enum mlxsw_reg_hpkt_action action;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
+ listener->enabled_on_register);
+ if (err)
+ return err;
+
+ action = listener->enabled_on_register ? listener->en_action :
+ listener->dis_action;
+ trap_group = listener->enabled_on_register ? listener->en_trap_group :
+ listener->dis_trap_group;
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ trap_group, listener->is_ctrl);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_trap_set;
+
+ return 0;
+
+err_trap_set:
+ mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_trap_register);
+
+void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
+
+ if (!listener->is_event) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
+ listener->trap_id, listener->dis_trap_group,
+ listener->is_ctrl);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ }
+
+ mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
+}
+EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i, err;
+
+ for (i = 0; i < listeners_count; i++) {
+ err = mlxsw_core_trap_register(mlxsw_core,
+ &listeners[i],
+ priv);
+ if (err)
+ goto err_listener_register;
+ }
+ return 0;
+
+err_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_traps_register);
+
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i;
+
+ for (i = 0; i < listeners_count; i++) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_traps_unregister);
+
+int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ bool enabled)
+{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+ enum mlxsw_reg_hpkt_action action;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ /* Not supported for event listener */
+ if (WARN_ON(listener->is_event))
+ return -EINVAL;
+
+ action = enabled ? listener->en_action : listener->dis_action;
+ trap_group = enabled ? listener->en_trap_group :
+ listener->dis_trap_group;
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ trap_group, listener->is_ctrl);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ return err;
+
+ mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
+ enabled);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_trap_state_set);
+
+static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
+{
+ return atomic64_inc_return(&mlxsw_core->emad.tid);
+}
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv)
+{
+ u64 tid = mlxsw_core_tid_get(mlxsw_core);
+ struct mlxsw_reg_trans *trans;
+ int err;
+
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans)
+ return -ENOMEM;
+
+ err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+ bulk_list, cb, cb_priv, tid);
+ if (err) {
+ kfree_rcu(trans, rcu);
+ return err;
+ }
+ return 0;
+}
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_query);
+
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_write);
+
+#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
+
+static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
+{
+ char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
+ struct mlxsw_core *mlxsw_core = trans->core;
+ int err;
+
+ wait_for_completion(&trans->completion);
+ cancel_delayed_work_sync(&trans->timeout_dw);
+ err = trans->err;
+
+ if (trans->retries)
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
+ trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
+ if (err) {
+ dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
+ trans->tid, trans->reg->id,
+ mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_core_reg_access_type_str(trans->type),
+ trans->emad_status,
+ mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
+ "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
+ trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_emad_op_tlv_status_str(trans->emad_status),
+ trans->emad_err_string ? trans->emad_err_string : "");
+
+ trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
+ trans->emad_status, err_string);
+
+ kfree(trans->emad_err_string);
+ }
+
+ list_del(&trans->bulk_list);
+ kfree_rcu(trans, rcu);
+ return err;
+}
+
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
+{
+ struct mlxsw_reg_trans *trans;
+ struct mlxsw_reg_trans *tmp;
+ int sum_err = 0;
+ int err;
+
+ list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
+ err = mlxsw_reg_trans_wait(trans);
+ if (err && sum_err == 0)
+ sum_err = err; /* first error to be returned */
+ }
+ return sum_err;
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+
+struct mlxsw_core_irq_event_handler_item {
+ struct list_head list;
+ void (*cb)(struct mlxsw_core *mlxsw_core);
+};
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->cb = cb;
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_add_tail(&item->list, &mlxsw_core->irq_event_handler_list);
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register);
+
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item, *tmp;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry_safe(item, tmp,
+ &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb == cb) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister);
+
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb)
+ item->cb(mlxsw_core);
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call);
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ enum mlxsw_emad_op_tlv_status status;
+ int err, n_retry;
+ bool reset_ok;
+ char *in_mbox, *out_mbox, *tmp;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ in_mbox = mlxsw_cmd_mbox_alloc();
+ if (!in_mbox)
+ return -ENOMEM;
+
+ out_mbox = mlxsw_cmd_mbox_alloc();
+ if (!out_mbox) {
+ err = -ENOMEM;
+ goto free_in_mbox;
+ }
+
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
+ mlxsw_core_tid_get(mlxsw_core));
+ tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+ mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+ /* There is a special treatment needed for MRSR (reset) register.
+ * The command interface will return error after the command
+ * is executed, so tell the lower layer to expect it
+ * and cope accordingly.
+ */
+ reset_ok = reg->id == MLXSW_REG_MRSR_ID;
+
+ n_retry = 0;
+retry:
+ err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
+ if (!err) {
+ err = mlxsw_emad_process_status(out_mbox, &status);
+ if (err) {
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
+ status, mlxsw_emad_op_tlv_status_str(status));
+ }
+ }
+
+ if (!err)
+ memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
+ reg->len);
+
+ mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+ mlxsw_cmd_mbox_free(in_mbox);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+ return err;
+}
+
+static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
+ char *payload, size_t payload_len,
+ unsigned long cb_priv)
+{
+ char *orig_payload = (char *) cb_priv;
+
+ memcpy(orig_payload, payload, payload_len);
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ LIST_HEAD(bulk_list);
+ int err;
+
+ /* During initialization EMAD interface is not available to us,
+ * so we default to command interface. We switch to EMAD interface
+ * after setting the appropriate traps.
+ */
+ if (!mlxsw_core->emad.use_emad)
+ return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+ payload, type);
+
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type, &bulk_list,
+ mlxsw_core_reg_access_cb,
+ (unsigned long) payload);
+ if (err)
+ return err;
+ return mlxsw_reg_trans_bulk_wait(&bulk_list);
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+ const struct mlxsw_rx_listener *rxl;
+ u16 local_port;
+ bool found = false;
+
+ if (rx_info->is_lag) {
+ dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
+ __func__, rx_info->u.lag_id,
+ rx_info->trap_id);
+ /* Upper layer does not care if the skb came from LAG or not,
+ * so just get the local_port for the lag port and push it up.
+ */
+ local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
+ rx_info->u.lag_id,
+ rx_info->lag_port_index);
+ } else {
+ local_port = rx_info->u.sys_port;
+ }
+
+ dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
+ __func__, local_port, rx_info->trap_id);
+
+ if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+ (local_port >= mlxsw_core->max_ports))
+ goto drop;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ rxl = &rxl_item->rxl;
+ if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+ rxl->local_port == local_port) &&
+ rxl->trap_id == rx_info->trap_id &&
+ rxl->mirror_reason == rx_info->mirror_reason) {
+ if (rxl_item->enabled)
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ rcu_read_unlock();
+ goto drop;
+ }
+
+ rxl->func(skb, local_port, rxl_item->priv);
+ rcu_read_unlock();
+ return;
+
+drop:
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index)
+{
+ return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
+ port_index;
+}
+
+void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index, u16 local_port)
+{
+ int index = mlxsw_core_lag_mapping_index(mlxsw_core,
+ lag_id, port_index);
+
+ mlxsw_core->lag.mapping[index] = local_port;
+}
+EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
+
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index)
+{
+ int index = mlxsw_core_lag_mapping_index(mlxsw_core,
+ lag_id, port_index);
+
+ return mlxsw_core->lag.mapping[index];
+}
+EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
+
+void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u16 local_port)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
+ int index = mlxsw_core_lag_mapping_index(mlxsw_core,
+ lag_id, i);
+
+ if (mlxsw_core->lag.mapping[index] == local_port)
+ mlxsw_core->lag.mapping[index] = 0;
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
+
+bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id)
+{
+ return mlxsw_res_valid(&mlxsw_core->res, res_id);
+}
+EXPORT_SYMBOL(mlxsw_core_res_valid);
+
+u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id)
+{
+ return mlxsw_res_get(&mlxsw_core->res, res_id);
+}
+EXPORT_SYMBOL(mlxsw_core_res_get);
+
+static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
+ enum devlink_port_flavour flavour,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
+ bool splittable, u32 lanes,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.split = split;
+ attrs.lanes = lanes;
+ attrs.splittable = splittable;
+ attrs.flavour = flavour;
+ attrs.phys.port_number = port_number;
+ attrs.phys.split_subport_number = split_port_subnumber;
+ memcpy(attrs.switch_id.id, switch_id, switch_id_len);
+ attrs.switch_id.id_len = switch_id_len;
+ mlxsw_core_port->local_port = local_port;
+ devlink_port_attrs_set(devlink_port, &attrs);
+ if (slot_index) {
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(mlxsw_core->linecards,
+ slot_index);
+ mlxsw_core_port->linecard = linecard;
+ devlink_port_linecard_set(devlink_port,
+ linecard->devlink_linecard);
+ }
+ err = devl_port_register(devlink, devlink_port, local_port);
+ if (err)
+ memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
+ return err;
+}
+
+static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ devl_port_unregister(devlink_port);
+ memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
+}
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
+ bool splittable, u32 lanes,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
+{
+ int err;
+
+ err = __mlxsw_core_port_init(mlxsw_core, local_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
+ port_number, split, split_port_subnumber,
+ splittable, lanes,
+ switch_id, switch_id_len);
+ if (err)
+ return err;
+
+ atomic_inc(&mlxsw_core->active_ports_count);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_port_init);
+
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
+{
+ atomic_dec(&mlxsw_core->active_ports_count);
+
+ __mlxsw_core_port_fini(mlxsw_core, local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_fini);
+
+int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
+ void *port_driver_priv,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
+ int err;
+
+ err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
+ DEVLINK_PORT_FLAVOUR_CPU,
+ 0, 0, false, 0, false, 0,
+ switch_id, switch_id_len);
+ if (err)
+ return err;
+
+ mlxsw_core_port->port_driver_priv = port_driver_priv;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
+
+void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
+{
+ __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
+}
+EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
+
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
+ void *port_driver_priv, struct net_device *dev)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ mlxsw_core_port->port_driver_priv = port_driver_priv;
+ devlink_port_type_eth_set(devlink_port, dev);
+}
+EXPORT_SYMBOL(mlxsw_core_port_eth_set);
+
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
+ void *port_driver_priv)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ mlxsw_core_port->port_driver_priv = port_driver_priv;
+ devlink_port_type_clear(devlink_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_clear);
+
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ return devlink_port;
+}
+EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+
+ return mlxsw_core_port->linecard;
+}
+
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
+ return;
+ mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
+}
+
+struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->env;
+}
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct, bool reset_ok,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size)
+{
+ u8 status;
+ int err;
+
+ BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+ if (!mlxsw_core->bus->cmd_exec)
+ return -EOPNOTSUPP;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+ if (in_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+ }
+
+ err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+ opcode_mod, in_mod, out_mbox_direct,
+ in_mbox, in_mbox_size,
+ out_mbox, out_mbox_size, &status);
+
+ if (!err && out_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+ }
+
+ if (reset_ok && err == -EIO &&
+ status == MLXSW_CMD_STATUS_RUNNING_RESET) {
+ err = 0;
+ } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod, status, mlxsw_cmd_status_str(status));
+ } else if (err == -ETIMEDOUT) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(mlxsw_wq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+
+bool mlxsw_core_schedule_work(struct work_struct *work)
+{
+ return queue_work(mlxsw_owq, work);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_work);
+
+void mlxsw_core_flush_owq(void)
+{
+ flush_workqueue(mlxsw_owq);
+}
+EXPORT_SYMBOL(mlxsw_core_flush_owq);
+
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (!driver->kvd_sizes_get)
+ return -EINVAL;
+
+ return driver->kvd_sizes_get(mlxsw_core, profile,
+ p_single_size, p_double_size,
+ p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
+int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
+ struct mlxsw_res *res)
+{
+ int index, i;
+ u64 data;
+ u16 id;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
+ index++) {
+ err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
+ id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
+ data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
+
+ if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
+ return 0;
+
+ mlxsw_res_parse(res, id, data);
+ }
+ }
+
+ /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
+ * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
+ */
+ return -EIO;
+}
+EXPORT_SYMBOL(mlxsw_core_resources_query);
+
+u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_frc_h);
+
+u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->emad.enable_string_tlv = true;
+}
+EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
+
+static int __init mlxsw_core_module_init(void)
+{
+ int err;
+
+ err = mlxsw_linecard_driver_register();
+ if (err)
+ return err;
+
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
+ if (!mlxsw_wq) {
+ err = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
+ mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
+ mlxsw_core_driver_name);
+ if (!mlxsw_owq) {
+ err = -ENOMEM;
+ goto err_alloc_ordered_workqueue;
+ }
+ return 0;
+
+err_alloc_ordered_workqueue:
+ destroy_workqueue(mlxsw_wq);
+err_alloc_workqueue:
+ mlxsw_linecard_driver_unregister();
+ return err;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+ destroy_workqueue(mlxsw_owq);
+ destroy_workqueue(mlxsw_wq);
+ mlxsw_linecard_driver_unregister();
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 000000000..ca0c3d2be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/net_namespace.h>
+#include <linux/auxiliary_bus.h>
+#include <net/devlink.h>
+
+#include "trap.h"
+#include "reg.h"
+#include "cmd.h"
+#include "resources.h"
+#include "../mlxfw/mlxfw.h"
+
+enum mlxsw_core_resource_id {
+ MLXSW_CORE_RESOURCE_PORTS = 1,
+ MLXSW_CORE_RESOURCE_MAX,
+};
+
+struct mlxsw_core;
+struct mlxsw_core_port;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+struct mlxsw_fw_rev;
+
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
+
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecard);
+
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev);
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
+
+struct mlxsw_tx_info {
+ u16 local_port;
+ bool is_emad;
+};
+
+struct mlxsw_rx_md_info {
+ u32 cookie_index;
+ u32 latency;
+ u32 tx_congestion;
+ union {
+ /* Valid when 'tx_port_valid' is set. */
+ u16 tx_sys_port;
+ u16 tx_lag_id;
+ };
+ u16 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
+ u8 tx_tc;
+ u8 latency_valid:1,
+ tx_congestion_valid:1,
+ tx_tc_valid:1,
+ tx_port_valid:1,
+ tx_port_is_lag:1,
+ unused:3;
+};
+
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u16 local_port);
+
+struct mlxsw_rx_listener {
+ void (*func)(struct sk_buff *skb, u16 local_port, void *priv);
+ u16 local_port;
+ u8 mirror_reason;
+ u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+ void (*func)(const struct mlxsw_reg_info *reg,
+ char *payload, void *priv);
+ enum mlxsw_event_trap_id trap_id;
+};
+
+struct mlxsw_listener {
+ u16 trap_id;
+ union {
+ struct mlxsw_rx_listener rx_listener;
+ struct mlxsw_event_listener event_listener;
+ };
+ enum mlxsw_reg_hpkt_action en_action; /* Action when enabled */
+ enum mlxsw_reg_hpkt_action dis_action; /* Action when disabled */
+ u8 en_trap_group; /* Trap group when enabled */
+ u8 dis_trap_group; /* Trap group when disabled */
+ u8 is_ctrl:1, /* should go via control buffer or not */
+ is_event:1,
+ enabled_on_register:1; /* Trap should be enabled when listener
+ * is registered.
+ */
+};
+
+#define __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, _enabled_on_register, _dis_trap_group, \
+ _mirror_reason) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .rx_listener = \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .mirror_reason = _mirror_reason, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .en_action = MLXSW_REG_HPKT_ACTION_##_en_action, \
+ .dis_action = MLXSW_REG_HPKT_ACTION_##_dis_action, \
+ .en_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_en_trap_group, \
+ .dis_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_dis_trap_group, \
+ .is_ctrl = _is_ctrl, \
+ .enabled_on_register = _enabled_on_register, \
+ }
+
+#define MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
+ _dis_action) \
+ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
+ _dis_action, true, _trap_group, 0)
+
+#define MLXSW_RXL_DIS(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, _dis_trap_group) \
+ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, false, _dis_trap_group, 0)
+
+#define MLXSW_RXL_MIRROR(_func, _session_id, _trap_group, _mirror_reason) \
+ __MLXSW_RXL(_func, MIRROR_SESSION##_session_id, TRAP_TO_CPU, false, \
+ _trap_group, TRAP_TO_CPU, true, _trap_group, \
+ _mirror_reason)
+
+#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .event_listener = \
+ { \
+ .func = _func, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .en_action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \
+ .en_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
+ .is_event = true, \
+ .enabled_on_register = true, \
+ }
+
+#define MLXSW_CORE_EVENTL(_func, _trap_id) \
+ MLXSW_EVENTL(_func, _trap_id, CORE_EVENT)
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv, bool enabled);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el);
+
+int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv);
+void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ void *priv);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
+int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ bool enabled);
+
+typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
+ size_t payload_len, unsigned long cb_priv);
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+
+typedef void mlxsw_irq_event_cb_t(struct mlxsw_core *mlxsw_core);
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+ bool is_lag;
+ union {
+ u16 sys_port;
+ u16 lag_id;
+ } u;
+ u16 lag_port_index;
+ u8 mirror_reason;
+ int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info);
+
+void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index, u16 local_port);
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index);
+void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u16 local_port);
+
+void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
+ bool splittable, u32 lanes,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len);
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port);
+int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
+ void *port_driver_priv,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len);
+void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
+ void *port_driver_priv, struct net_device *dev);
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
+ void *port_driver_priv);
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
+struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
+
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+bool mlxsw_core_schedule_work(struct work_struct *work);
+void mlxsw_core_flush_owq(void);
+int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
+ struct mlxsw_res *res);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+ u8 used_type:1,
+ used_properties:1;
+ u8 type;
+ u8 properties;
+};
+
+struct mlxsw_config_profile {
+ u16 used_max_vepa_channels:1,
+ used_max_lag:1,
+ used_max_mid:1,
+ used_max_pgt:1,
+ used_max_system_port:1,
+ used_max_vlan_groups:1,
+ used_max_regions:1,
+ used_flood_tables:1,
+ used_flood_mode:1,
+ used_max_ib_mc:1,
+ used_max_pkey:1,
+ used_ar_sec:1,
+ used_adaptive_routing_group_cap:1,
+ used_ubridge:1,
+ used_kvd_sizes:1,
+ used_cqe_time_stamp_type:1;
+ u8 max_vepa_channels;
+ u16 max_lag;
+ u16 max_mid;
+ u16 max_pgt;
+ u16 max_system_port;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u8 max_flood_tables;
+ u8 max_vid_flood_tables;
+ u8 flood_mode;
+ u8 max_fid_offset_flood_tables;
+ u16 fid_offset_flood_table_size;
+ u8 max_fid_flood_tables;
+ u16 fid_flood_table_size;
+ u16 max_ib_mc;
+ u16 max_pkey;
+ u8 ar_sec;
+ u16 adaptive_routing_group_cap;
+ u8 arn;
+ u8 ubridge;
+ u32 kvd_linear_size;
+ u8 kvd_hash_single_parts;
+ u8 kvd_hash_double_parts;
+ u8 cqe_time_stamp_type;
+ struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+ struct list_head list;
+ const char *kind;
+ size_t priv_size;
+ const struct mlxsw_fw_rev *fw_req_rev;
+ const char *fw_filename;
+ int (*init)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack);
+ void (*fini)(struct mlxsw_core *mlxsw_core);
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
+ unsigned int count, struct netlink_ext_ack *extack);
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
+ struct netlink_ext_ack *extack);
+ void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
+ int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+ int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack);
+ int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+ int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+ int (*trap_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ void (*trap_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ int (*trap_action_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
+ int (*trap_group_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
+ int (*trap_policer_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+ void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+ int (*trap_policer_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack);
+ int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
+ void (*txhdr_construct)(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ int (*resources_register)(struct mlxsw_core *mlxsw_core);
+ int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+ int (*params_register)(struct mlxsw_core *mlxsw_core);
+ void (*params_unregister)(struct mlxsw_core *mlxsw_core);
+
+ /* Notify a driver that a timestamped packet was transmitted. Driver
+ * is responsible for freeing the passed-in SKB.
+ */
+ void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u16 local_port);
+
+ u8 txhdr_len;
+ const struct mlxsw_config_profile *profile;
+ bool sdq_supports_cqe_v2;
+};
+
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
+u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
+void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id);
+
+#define MLXSW_CORE_RES_VALID(mlxsw_core, short_res_id) \
+ mlxsw_core_res_valid(mlxsw_core, MLXSW_RES_ID_##short_res_id)
+
+u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id);
+
+#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \
+ mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
+
+static inline struct net *mlxsw_core_net(struct mlxsw_core *mlxsw_core)
+{
+ return devlink_net(priv_to_devlink(mlxsw_core));
+}
+
+#define MLXSW_BUS_F_TXRX BIT(0)
+#define MLXSW_BUS_F_RESET BIT(1)
+
+struct mlxsw_bus {
+ const char *kind;
+ int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *res);
+ void (*fini)(void *bus_priv);
+ bool (*skb_transmit_busy)(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info);
+ int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status);
+ u32 (*read_frc_h)(void *bus_priv);
+ u32 (*read_frc_l)(void *bus_priv);
+ u32 (*read_utc_sec)(void *bus_priv);
+ u32 (*read_utc_nsec)(void *bus_priv);
+ u8 features;
+};
+
+struct mlxsw_fw_rev {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+ u16 can_reset_minor;
+};
+
+struct mlxsw_bus_info {
+ const char *device_kind;
+ const char *device_name;
+ struct device *dev;
+ struct mlxsw_fw_rev fw_rev;
+ u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+ u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+ u8 low_frequency:1,
+ read_clock_capable:1;
+};
+
+struct mlxsw_hwmon;
+
+#ifdef CONFIG_MLXSW_CORE_HWMON
+
+int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_hwmon **p_hwmon);
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon);
+
+#else
+
+static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_hwmon **p_hwmon)
+{
+ return 0;
+}
+
+static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+}
+
+#endif
+
+struct mlxsw_thermal;
+
+#ifdef CONFIG_MLXSW_CORE_THERMAL
+
+int mlxsw_thermal_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_thermal **p_thermal);
+void mlxsw_thermal_fini(struct mlxsw_thermal *thermal);
+
+#else
+
+static inline int mlxsw_thermal_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_thermal **p_thermal)
+{
+ return 0;
+}
+
+static inline void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
+{
+}
+
+#endif
+
+enum mlxsw_devlink_param_id {
+ MLXSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+};
+
+struct mlxsw_cqe_ts {
+ u8 sec;
+ u32 nsec;
+};
+
+struct mlxsw_skb_cb {
+ union {
+ struct mlxsw_tx_info tx_info;
+ struct mlxsw_rx_md_info rx_md_info;
+ };
+ struct mlxsw_cqe_ts cqe_ts;
+};
+
+static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(mlxsw_skb_cb) > sizeof(skb->cb));
+ return (struct mlxsw_skb_cb *) skb->cb;
+}
+
+struct mlxsw_linecards;
+
+enum mlxsw_linecard_status_event_type {
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
+};
+
+struct mlxsw_linecard_bdev;
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+ char psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+};
+
+struct mlxsw_linecard {
+ u8 slot_index;
+ struct mlxsw_linecards *linecards;
+ struct devlink_linecard *devlink_linecard;
+ struct mutex lock; /* Locks accesses to the linecard structure */
+ char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN];
+ char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */
+ enum mlxsw_linecard_status_event_type status_event_type_to;
+ struct delayed_work status_event_to_dw;
+ u8 provisioned:1,
+ ready:1,
+ active:1;
+ u16 hw_revision;
+ u16 ini_version;
+ struct mlxsw_linecard_bdev *bdev;
+ struct {
+ struct mlxsw_linecard_device_info info;
+ u8 index;
+ } device;
+};
+
+struct mlxsw_linecard_types_info;
+
+struct mlxsw_linecards {
+ struct mlxsw_core *mlxsw_core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 count;
+ struct mlxsw_linecard_types_info *types_info;
+ struct list_head event_ops_list;
+ struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
+ struct mlxsw_linecard linecards[];
+};
+
+static inline struct mlxsw_linecard *
+mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
+{
+ return &linecards->linecards[slot_index - 1];
+}
+
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info);
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
+
+typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, void *priv);
+
+struct mlxsw_linecards_event_ops {
+ mlxsw_linecards_event_op_t *got_active;
+ mlxsw_linecards_event_op_t *got_inactive;
+};
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard);
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard);
+
+int mlxsw_linecard_driver_register(void);
+void mlxsw_linecard_driver_unregister(void);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
new file mode 100644
index 000000000..9dfe71481
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -0,0 +1,2218 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/rhashtable.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/refcount.h>
+#include <net/flow_offload.h>
+
+#include "item.h"
+#include "trap.h"
+#include "core_acl_flex_actions.h"
+
+enum mlxsw_afa_set_type {
+ MLXSW_AFA_SET_TYPE_NEXT,
+ MLXSW_AFA_SET_TYPE_GOTO,
+};
+
+/* afa_set_type
+ * Type of the record at the end of the action set.
+ */
+MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
+
+/* afa_set_next_action_set_ptr
+ * A pointer to the next action set in the KVD Centralized database.
+ */
+MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
+
+/* afa_set_goto_g
+ * group - When set, the binding is of an ACL group. When cleared,
+ * the binding is of an ACL.
+ * Must be set to 1 for Spectrum.
+ */
+MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
+
+enum mlxsw_afa_set_goto_binding_cmd {
+ /* continue go the next binding point */
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
+ /* jump to the next binding point no return */
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
+ /* terminate the acl binding */
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
+};
+
+/* afa_set_goto_binding_cmd */
+MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
+
+/* afa_set_goto_next_binding
+ * ACL/ACL group identifier. If the g bit is set, this field should hold
+ * the acl_group_id, else it should hold the acl_id.
+ */
+MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
+
+/* afa_all_action_type
+ * Action Type.
+ */
+MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
+
+struct mlxsw_afa {
+ unsigned int max_acts_per_set;
+ const struct mlxsw_afa_ops *ops;
+ void *ops_priv;
+ struct rhashtable set_ht;
+ struct rhashtable fwd_entry_ht;
+ struct rhashtable cookie_ht;
+ struct rhashtable policer_ht;
+ struct idr cookie_idr;
+ struct list_head policer_list;
+};
+
+#define MLXSW_AFA_SET_LEN 0xA8
+
+struct mlxsw_afa_set_ht_key {
+ char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
+ bool is_first;
+};
+
+/* Set structure holds one action set record. It contains up to three
+ * actions (depends on size of particular actions). The set is either
+ * put directly to a rule, or it is stored in KVD linear area.
+ * To prevent duplicate entries in KVD linear area, a hashtable is
+ * used to track sets that were previously inserted and may be shared.
+ */
+
+struct mlxsw_afa_set {
+ struct rhash_head ht_node;
+ struct mlxsw_afa_set_ht_key ht_key;
+ u32 kvdl_index;
+ u8 shared:1, /* Inserted in hashtable (doesn't mean that
+ * kvdl_index is valid).
+ */
+ has_trap:1,
+ has_police:1;
+ unsigned int ref_count;
+ struct mlxsw_afa_set *next; /* Pointer to the next set. */
+ struct mlxsw_afa_set *prev; /* Pointer to the previous set,
+ * note that set may have multiple
+ * sets from multiple blocks
+ * pointing at it. This is only
+ * usable until commit.
+ */
+};
+
+static const struct rhashtable_params mlxsw_afa_set_ht_params = {
+ .key_len = sizeof(struct mlxsw_afa_set_ht_key),
+ .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
+ .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_afa_fwd_entry_ht_key {
+ u16 local_port;
+};
+
+struct mlxsw_afa_fwd_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_afa_fwd_entry_ht_key ht_key;
+ u32 kvdl_index;
+ unsigned int ref_count;
+};
+
+static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
+ .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_afa_cookie {
+ struct rhash_head ht_node;
+ refcount_t ref_count;
+ struct rcu_head rcu;
+ u32 cookie_index;
+ struct flow_action_cookie fa_cookie;
+};
+
+static u32 mlxsw_afa_cookie_hash(const struct flow_action_cookie *fa_cookie,
+ u32 seed)
+{
+ return jhash2((u32 *) fa_cookie->cookie,
+ fa_cookie->cookie_len / sizeof(u32), seed);
+}
+
+static u32 mlxsw_afa_cookie_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct flow_action_cookie *fa_cookie = data;
+
+ return mlxsw_afa_cookie_hash(fa_cookie, seed);
+}
+
+static u32 mlxsw_afa_cookie_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct mlxsw_afa_cookie *cookie = data;
+
+ return mlxsw_afa_cookie_hash(&cookie->fa_cookie, seed);
+}
+
+static int mlxsw_afa_cookie_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct flow_action_cookie *fa_cookie = arg->key;
+ const struct mlxsw_afa_cookie *cookie = obj;
+
+ if (cookie->fa_cookie.cookie_len == fa_cookie->cookie_len)
+ return memcmp(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
+ return 1;
+}
+
+static const struct rhashtable_params mlxsw_afa_cookie_ht_params = {
+ .head_offset = offsetof(struct mlxsw_afa_cookie, ht_node),
+ .hashfn = mlxsw_afa_cookie_key_hashfn,
+ .obj_hashfn = mlxsw_afa_cookie_obj_hashfn,
+ .obj_cmpfn = mlxsw_afa_cookie_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_afa_policer {
+ struct rhash_head ht_node;
+ struct list_head list; /* Member of policer_list */
+ refcount_t ref_count;
+ u32 fa_index;
+ u16 policer_index;
+};
+
+static const struct rhashtable_params mlxsw_afa_policer_ht_params = {
+ .key_len = sizeof(u32),
+ .key_offset = offsetof(struct mlxsw_afa_policer, fa_index),
+ .head_offset = offsetof(struct mlxsw_afa_policer, ht_node),
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
+ const struct mlxsw_afa_ops *ops,
+ void *ops_priv)
+{
+ struct mlxsw_afa *mlxsw_afa;
+ int err;
+
+ mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
+ if (!mlxsw_afa)
+ return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
+ if (err)
+ goto err_set_rhashtable_init;
+ err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
+ &mlxsw_afa_fwd_entry_ht_params);
+ if (err)
+ goto err_fwd_entry_rhashtable_init;
+ err = rhashtable_init(&mlxsw_afa->cookie_ht,
+ &mlxsw_afa_cookie_ht_params);
+ if (err)
+ goto err_cookie_rhashtable_init;
+ err = rhashtable_init(&mlxsw_afa->policer_ht,
+ &mlxsw_afa_policer_ht_params);
+ if (err)
+ goto err_policer_rhashtable_init;
+ idr_init(&mlxsw_afa->cookie_idr);
+ INIT_LIST_HEAD(&mlxsw_afa->policer_list);
+ mlxsw_afa->max_acts_per_set = max_acts_per_set;
+ mlxsw_afa->ops = ops;
+ mlxsw_afa->ops_priv = ops_priv;
+ return mlxsw_afa;
+
+err_policer_rhashtable_init:
+ rhashtable_destroy(&mlxsw_afa->cookie_ht);
+err_cookie_rhashtable_init:
+ rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
+err_fwd_entry_rhashtable_init:
+ rhashtable_destroy(&mlxsw_afa->set_ht);
+err_set_rhashtable_init:
+ kfree(mlxsw_afa);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(mlxsw_afa_create);
+
+void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
+{
+ WARN_ON(!list_empty(&mlxsw_afa->policer_list));
+ WARN_ON(!idr_is_empty(&mlxsw_afa->cookie_idr));
+ idr_destroy(&mlxsw_afa->cookie_idr);
+ rhashtable_destroy(&mlxsw_afa->policer_ht);
+ rhashtable_destroy(&mlxsw_afa->cookie_ht);
+ rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
+ rhashtable_destroy(&mlxsw_afa->set_ht);
+ kfree(mlxsw_afa);
+}
+EXPORT_SYMBOL(mlxsw_afa_destroy);
+
+static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
+ enum mlxsw_afa_set_goto_binding_cmd cmd,
+ u16 group_id)
+{
+ char *actions = set->ht_key.enc_actions;
+
+ mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
+ mlxsw_afa_set_goto_g_set(actions, true);
+ mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
+ mlxsw_afa_set_goto_next_binding_set(actions, group_id);
+}
+
+static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
+ u32 next_set_kvdl_index)
+{
+ char *actions = set->ht_key.enc_actions;
+
+ mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
+ mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
+}
+
+static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
+{
+ struct mlxsw_afa_set *set;
+
+ set = kzalloc(sizeof(*set), GFP_KERNEL);
+ if (!set)
+ return NULL;
+ /* Need to initialize the set to pass by default */
+ mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+ set->ht_key.is_first = is_first;
+ set->ref_count = 1;
+ return set;
+}
+
+static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
+{
+ kfree(set);
+}
+
+static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+{
+ int err;
+
+ err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
+ mlxsw_afa_set_ht_params);
+ if (err)
+ return err;
+ err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
+ &set->kvdl_index,
+ set->ht_key.enc_actions,
+ set->ht_key.is_first);
+ if (err)
+ goto err_kvdl_set_add;
+ set->shared = true;
+ set->prev = NULL;
+ return 0;
+
+err_kvdl_set_add:
+ rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
+ mlxsw_afa_set_ht_params);
+ return err;
+}
+
+static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+{
+ mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
+ set->kvdl_index,
+ set->ht_key.is_first);
+ rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
+ mlxsw_afa_set_ht_params);
+ set->shared = false;
+}
+
+static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+{
+ if (--set->ref_count)
+ return;
+ if (set->shared)
+ mlxsw_afa_set_unshare(mlxsw_afa, set);
+ mlxsw_afa_set_destroy(set);
+}
+
+static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *orig_set)
+{
+ struct mlxsw_afa_set *set;
+ int err;
+
+ /* There is a hashtable of sets maintained. If a set with the exact
+ * same encoding exists, we reuse it. Otherwise, the current set
+ * is shared by making it available to others using the hash table.
+ */
+ set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
+ mlxsw_afa_set_ht_params);
+ if (set) {
+ set->ref_count++;
+ mlxsw_afa_set_put(mlxsw_afa, orig_set);
+ } else {
+ set = orig_set;
+ err = mlxsw_afa_set_share(mlxsw_afa, set);
+ if (err)
+ return ERR_PTR(err);
+ }
+ return set;
+}
+
+/* Block structure holds a list of action sets. One action block
+ * represents one chain of actions executed upon match of a rule.
+ */
+
+struct mlxsw_afa_block {
+ struct mlxsw_afa *afa;
+ bool finished;
+ struct mlxsw_afa_set *first_set;
+ struct mlxsw_afa_set *cur_set;
+ unsigned int cur_act_index; /* In current set. */
+ struct list_head resource_list; /* List of resources held by actions
+ * in this block.
+ */
+};
+
+struct mlxsw_afa_resource {
+ struct list_head list;
+ void (*destructor)(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource);
+};
+
+static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ list_add(&resource->list, &block->resource_list);
+}
+
+static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
+{
+ list_del(&resource->list);
+}
+
+static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_resource *resource, *tmp;
+
+ list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
+ resource->destructor(block, resource);
+ }
+}
+
+struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
+{
+ struct mlxsw_afa_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&block->resource_list);
+ block->afa = mlxsw_afa;
+
+ /* At least one action set is always present, so just create it here */
+ block->first_set = mlxsw_afa_set_create(true);
+ if (!block->first_set)
+ goto err_first_set_create;
+
+ /* In case user instructs to have dummy first set, we leave it
+ * empty here and create another, real, set right away.
+ */
+ if (mlxsw_afa->ops->dummy_first_set) {
+ block->cur_set = mlxsw_afa_set_create(false);
+ if (!block->cur_set)
+ goto err_second_set_create;
+ block->cur_set->prev = block->first_set;
+ block->first_set->next = block->cur_set;
+ } else {
+ block->cur_set = block->first_set;
+ }
+
+ return block;
+
+err_second_set_create:
+ mlxsw_afa_set_destroy(block->first_set);
+err_first_set_create:
+ kfree(block);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_create);
+
+void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_set *set = block->first_set;
+ struct mlxsw_afa_set *next_set;
+
+ do {
+ next_set = set->next;
+ mlxsw_afa_set_put(block->afa, set);
+ set = next_set;
+ } while (set);
+ mlxsw_afa_resources_destroy(block);
+ kfree(block);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_destroy);
+
+int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_set *set = block->cur_set;
+ struct mlxsw_afa_set *prev_set;
+
+ block->cur_set = NULL;
+ block->finished = true;
+
+ /* Go over all linked sets starting from last
+ * and try to find existing set in the hash table.
+ * In case it is not there, assign a KVD linear index
+ * and insert it.
+ */
+ do {
+ prev_set = set->prev;
+ set = mlxsw_afa_set_get(block->afa, set);
+ if (IS_ERR(set))
+ /* No rollback is needed since the chain is
+ * in consistent state and mlxsw_afa_block_destroy
+ * will take care of putting it away.
+ */
+ return PTR_ERR(set);
+ if (prev_set) {
+ prev_set->next = set;
+ mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
+ set = prev_set;
+ }
+ } while (prev_set);
+
+ block->first_set = set;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_commit);
+
+char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
+{
+ return block->first_set->ht_key.enc_actions;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_set);
+
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
+{
+ return block->cur_set->ht_key.enc_actions;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
+
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
+{
+ /* First set is never in KVD linear. So the first set
+ * with valid KVD linear index is always the second one.
+ */
+ if (WARN_ON(!block->first_set->next))
+ return 0;
+ return block->first_set->next->kvdl_index;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
+
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
+{
+ u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
+
+ return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
+ kvdl_index, activity);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
+
+int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
+{
+ if (block->finished)
+ return -EINVAL;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
+ block->finished = true;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_continue);
+
+int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
+{
+ if (block->finished)
+ return -EINVAL;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
+ block->finished = true;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_jump);
+
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
+{
+ if (block->finished)
+ return -EINVAL;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+ block->finished = true;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_terminate);
+
+static struct mlxsw_afa_fwd_entry *
+mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
+{
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+ int err;
+
+ fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
+ if (!fwd_entry)
+ return ERR_PTR(-ENOMEM);
+ fwd_entry->ht_key.local_port = local_port;
+ fwd_entry->ref_count = 1;
+
+ err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
+ &fwd_entry->ht_node,
+ mlxsw_afa_fwd_entry_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
+ &fwd_entry->kvdl_index,
+ local_port);
+ if (err)
+ goto err_kvdl_fwd_entry_add;
+ return fwd_entry;
+
+err_kvdl_fwd_entry_add:
+ rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
+ mlxsw_afa_fwd_entry_ht_params);
+err_rhashtable_insert:
+ kfree(fwd_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_fwd_entry *fwd_entry)
+{
+ mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
+ fwd_entry->kvdl_index);
+ rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
+ mlxsw_afa_fwd_entry_ht_params);
+ kfree(fwd_entry);
+}
+
+static struct mlxsw_afa_fwd_entry *
+mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
+{
+ struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+
+ ht_key.local_port = local_port;
+ fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
+ mlxsw_afa_fwd_entry_ht_params);
+ if (fwd_entry) {
+ fwd_entry->ref_count++;
+ return fwd_entry;
+ }
+ return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
+}
+
+static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_fwd_entry *fwd_entry)
+{
+ if (--fwd_entry->ref_count)
+ return;
+ mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
+}
+
+struct mlxsw_afa_fwd_entry_ref {
+ struct mlxsw_afa_resource resource;
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+};
+
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+ mlxsw_afa_resource_del(&fwd_entry_ref->resource);
+ mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+ kfree(fwd_entry_ref);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+
+ fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
+ resource);
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
+static struct mlxsw_afa_fwd_entry_ref *
+mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u16 local_port)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+ struct mlxsw_afa_fwd_entry *fwd_entry;
+ int err;
+
+ fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
+ if (!fwd_entry_ref)
+ return ERR_PTR(-ENOMEM);
+ fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
+ if (IS_ERR(fwd_entry)) {
+ err = PTR_ERR(fwd_entry);
+ goto err_fwd_entry_get;
+ }
+ fwd_entry_ref->fwd_entry = fwd_entry;
+ fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
+ mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
+ return fwd_entry_ref;
+
+err_fwd_entry_get:
+ kfree(fwd_entry_ref);
+ return ERR_PTR(err);
+}
+
+struct mlxsw_afa_counter {
+ struct mlxsw_afa_resource resource;
+ u32 counter_index;
+};
+
+static void
+mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_counter *counter)
+{
+ mlxsw_afa_resource_del(&counter->resource);
+ block->afa->ops->counter_index_put(block->afa->ops_priv,
+ counter->counter_index);
+ kfree(counter);
+}
+
+static void
+mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_counter *counter;
+
+ counter = container_of(resource, struct mlxsw_afa_counter, resource);
+ mlxsw_afa_counter_destroy(block, counter);
+}
+
+static struct mlxsw_afa_counter *
+mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_counter *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->counter_index_get(block->afa->ops_priv,
+ &counter->counter_index);
+ if (err)
+ goto err_counter_index_get;
+ counter->resource.destructor = mlxsw_afa_counter_destructor;
+ mlxsw_afa_resource_add(block, &counter->resource);
+ return counter;
+
+err_counter_index_get:
+ kfree(counter);
+ return ERR_PTR(err);
+}
+
+/* 20 bits is a maximum that hardware can handle in trap with userdef action
+ * and carry along with the trapped packet.
+ */
+#define MLXSW_AFA_COOKIE_INDEX_BITS 20
+#define MLXSW_AFA_COOKIE_INDEX_MAX ((1 << MLXSW_AFA_COOKIE_INDEX_BITS) - 1)
+
+static struct mlxsw_afa_cookie *
+mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie *cookie;
+ u32 cookie_index;
+ int err;
+
+ cookie = kzalloc(sizeof(*cookie) + fa_cookie->cookie_len, GFP_KERNEL);
+ if (!cookie)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&cookie->ref_count, 1);
+ cookie->fa_cookie = *fa_cookie;
+ memcpy(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
+
+ err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Start cookie indexes with 1. Leave the 0 index unused. Packets
+ * that come from the HW which are not dropped by drop-with-cookie
+ * action are going to pass cookie_index 0 to lookup.
+ */
+ cookie_index = 1;
+ err = idr_alloc_u32(&mlxsw_afa->cookie_idr, cookie, &cookie_index,
+ MLXSW_AFA_COOKIE_INDEX_MAX, GFP_KERNEL);
+ if (err)
+ goto err_idr_alloc;
+ cookie->cookie_index = cookie_index;
+ return cookie;
+
+err_idr_alloc:
+ rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+err_rhashtable_insert:
+ kfree(cookie);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afa_cookie_destroy(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_cookie *cookie)
+{
+ idr_remove(&mlxsw_afa->cookie_idr, cookie->cookie_index);
+ rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+ kfree_rcu(cookie, rcu);
+}
+
+static struct mlxsw_afa_cookie *
+mlxsw_afa_cookie_get(struct mlxsw_afa *mlxsw_afa,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie *cookie;
+
+ cookie = rhashtable_lookup_fast(&mlxsw_afa->cookie_ht, fa_cookie,
+ mlxsw_afa_cookie_ht_params);
+ if (cookie) {
+ refcount_inc(&cookie->ref_count);
+ return cookie;
+ }
+ return mlxsw_afa_cookie_create(mlxsw_afa, fa_cookie);
+}
+
+static void mlxsw_afa_cookie_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_cookie *cookie)
+{
+ if (!refcount_dec_and_test(&cookie->ref_count))
+ return;
+ mlxsw_afa_cookie_destroy(mlxsw_afa, cookie);
+}
+
+/* RCU read lock must be held */
+const struct flow_action_cookie *
+mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index)
+{
+ struct mlxsw_afa_cookie *cookie;
+
+ /* 0 index means no cookie */
+ if (!cookie_index)
+ return NULL;
+ cookie = idr_find(&mlxsw_afa->cookie_idr, cookie_index);
+ if (!cookie)
+ return NULL;
+ return &cookie->fa_cookie;
+}
+EXPORT_SYMBOL(mlxsw_afa_cookie_lookup);
+
+struct mlxsw_afa_cookie_ref {
+ struct mlxsw_afa_resource resource;
+ struct mlxsw_afa_cookie *cookie;
+};
+
+static void
+mlxsw_afa_cookie_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_cookie_ref *cookie_ref)
+{
+ mlxsw_afa_resource_del(&cookie_ref->resource);
+ mlxsw_afa_cookie_put(block->afa, cookie_ref->cookie);
+ kfree(cookie_ref);
+}
+
+static void
+mlxsw_afa_cookie_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+
+ cookie_ref = container_of(resource, struct mlxsw_afa_cookie_ref,
+ resource);
+ mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
+}
+
+static struct mlxsw_afa_cookie_ref *
+mlxsw_afa_cookie_ref_create(struct mlxsw_afa_block *block,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+ struct mlxsw_afa_cookie *cookie;
+ int err;
+
+ cookie_ref = kzalloc(sizeof(*cookie_ref), GFP_KERNEL);
+ if (!cookie_ref)
+ return ERR_PTR(-ENOMEM);
+ cookie = mlxsw_afa_cookie_get(block->afa, fa_cookie);
+ if (IS_ERR(cookie)) {
+ err = PTR_ERR(cookie);
+ goto err_cookie_get;
+ }
+ cookie_ref->cookie = cookie;
+ cookie_ref->resource.destructor = mlxsw_afa_cookie_ref_destructor;
+ mlxsw_afa_resource_add(block, &cookie_ref->resource);
+ return cookie_ref;
+
+err_cookie_get:
+ kfree(cookie_ref);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_afa_policer *
+mlxsw_afa_policer_create(struct mlxsw_afa *mlxsw_afa, u32 fa_index,
+ u64 rate_bytes_ps, u32 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_policer *policer;
+ int err;
+
+ policer = kzalloc(sizeof(*policer), GFP_KERNEL);
+ if (!policer)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_afa->ops->policer_add(mlxsw_afa->ops_priv, rate_bytes_ps,
+ burst, &policer->policer_index,
+ extack);
+ if (err)
+ goto err_policer_add;
+
+ refcount_set(&policer->ref_count, 1);
+ policer->fa_index = fa_index;
+
+ err = rhashtable_insert_fast(&mlxsw_afa->policer_ht, &policer->ht_node,
+ mlxsw_afa_policer_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&policer->list, &mlxsw_afa->policer_list);
+
+ return policer;
+
+err_rhashtable_insert:
+ mlxsw_afa->ops->policer_del(mlxsw_afa->ops_priv,
+ policer->policer_index);
+err_policer_add:
+ kfree(policer);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afa_policer_destroy(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_policer *policer)
+{
+ list_del(&policer->list);
+ rhashtable_remove_fast(&mlxsw_afa->policer_ht, &policer->ht_node,
+ mlxsw_afa_policer_ht_params);
+ mlxsw_afa->ops->policer_del(mlxsw_afa->ops_priv,
+ policer->policer_index);
+ kfree(policer);
+}
+
+static struct mlxsw_afa_policer *
+mlxsw_afa_policer_get(struct mlxsw_afa *mlxsw_afa, u32 fa_index,
+ u64 rate_bytes_ps, u32 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_policer *policer;
+
+ policer = rhashtable_lookup_fast(&mlxsw_afa->policer_ht, &fa_index,
+ mlxsw_afa_policer_ht_params);
+ if (policer) {
+ refcount_inc(&policer->ref_count);
+ return policer;
+ }
+
+ return mlxsw_afa_policer_create(mlxsw_afa, fa_index, rate_bytes_ps,
+ burst, extack);
+}
+
+static void mlxsw_afa_policer_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_policer *policer)
+{
+ if (!refcount_dec_and_test(&policer->ref_count))
+ return;
+ mlxsw_afa_policer_destroy(mlxsw_afa, policer);
+}
+
+struct mlxsw_afa_policer_ref {
+ struct mlxsw_afa_resource resource;
+ struct mlxsw_afa_policer *policer;
+};
+
+static void
+mlxsw_afa_policer_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_policer_ref *policer_ref)
+{
+ mlxsw_afa_resource_del(&policer_ref->resource);
+ mlxsw_afa_policer_put(block->afa, policer_ref->policer);
+ kfree(policer_ref);
+}
+
+static void
+mlxsw_afa_policer_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_policer_ref *policer_ref;
+
+ policer_ref = container_of(resource, struct mlxsw_afa_policer_ref,
+ resource);
+ mlxsw_afa_policer_ref_destroy(block, policer_ref);
+}
+
+static struct mlxsw_afa_policer_ref *
+mlxsw_afa_policer_ref_create(struct mlxsw_afa_block *block, u32 fa_index,
+ u64 rate_bytes_ps, u32 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_policer_ref *policer_ref;
+ struct mlxsw_afa_policer *policer;
+ int err;
+
+ policer_ref = kzalloc(sizeof(*policer_ref), GFP_KERNEL);
+ if (!policer_ref)
+ return ERR_PTR(-ENOMEM);
+
+ policer = mlxsw_afa_policer_get(block->afa, fa_index, rate_bytes_ps,
+ burst, extack);
+ if (IS_ERR(policer)) {
+ err = PTR_ERR(policer);
+ goto err_policer_get;
+ }
+
+ policer_ref->policer = policer;
+ policer_ref->resource.destructor = mlxsw_afa_policer_ref_destructor;
+ mlxsw_afa_resource_add(block, &policer_ref->resource);
+
+ return policer_ref;
+
+err_policer_get:
+ kfree(policer_ref);
+ return ERR_PTR(err);
+}
+
+#define MLXSW_AFA_ONE_ACTION_LEN 32
+#define MLXSW_AFA_PAYLOAD_OFFSET 4
+
+enum mlxsw_afa_action_type {
+ MLXSW_AFA_ACTION_TYPE_TRAP,
+ MLXSW_AFA_ACTION_TYPE_POLICE,
+ MLXSW_AFA_ACTION_TYPE_OTHER,
+};
+
+static bool
+mlxsw_afa_block_need_split(const struct mlxsw_afa_block *block,
+ enum mlxsw_afa_action_type type)
+{
+ struct mlxsw_afa_set *cur_set = block->cur_set;
+
+ /* Due to a hardware limitation, police action cannot be in the same
+ * action set with MLXSW_AFA_TRAP_CODE or MLXSW_AFA_TRAPWU_CODE
+ * actions. Work around this limitation by creating a new action set
+ * and place the new action there.
+ */
+ return (cur_set->has_trap && type == MLXSW_AFA_ACTION_TYPE_POLICE) ||
+ (cur_set->has_police && type == MLXSW_AFA_ACTION_TYPE_TRAP);
+}
+
+static char *mlxsw_afa_block_append_action_ext(struct mlxsw_afa_block *block,
+ u8 action_code, u8 action_size,
+ enum mlxsw_afa_action_type type)
+{
+ char *oneact;
+ char *actions;
+
+ if (block->finished)
+ return ERR_PTR(-EINVAL);
+ if (block->cur_act_index + action_size > block->afa->max_acts_per_set ||
+ mlxsw_afa_block_need_split(block, type)) {
+ struct mlxsw_afa_set *set;
+
+ /* The appended action won't fit into the current action set,
+ * so create a new set.
+ */
+ set = mlxsw_afa_set_create(false);
+ if (!set)
+ return ERR_PTR(-ENOBUFS);
+ set->prev = block->cur_set;
+ block->cur_act_index = 0;
+ block->cur_set->next = set;
+ block->cur_set = set;
+ }
+
+ switch (type) {
+ case MLXSW_AFA_ACTION_TYPE_TRAP:
+ block->cur_set->has_trap = true;
+ break;
+ case MLXSW_AFA_ACTION_TYPE_POLICE:
+ block->cur_set->has_police = true;
+ break;
+ default:
+ break;
+ }
+
+ actions = block->cur_set->ht_key.enc_actions;
+ oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
+ block->cur_act_index += action_size;
+ mlxsw_afa_all_action_type_set(oneact, action_code);
+ return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
+}
+
+static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
+ u8 action_code, u8 action_size)
+{
+ return mlxsw_afa_block_append_action_ext(block, action_code,
+ action_size,
+ MLXSW_AFA_ACTION_TYPE_OTHER);
+}
+
+/* VLAN Action
+ * -----------
+ * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
+ * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
+ * and more.
+ */
+
+#define MLXSW_AFA_VLAN_CODE 0x02
+#define MLXSW_AFA_VLAN_SIZE 1
+
+enum mlxsw_afa_vlan_vlan_tag_cmd {
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
+};
+
+enum mlxsw_afa_vlan_cmd {
+ MLXSW_AFA_VLAN_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER,
+ MLXSW_AFA_VLAN_CMD_SET_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
+ MLXSW_AFA_VLAN_CMD_SWAP,
+};
+
+/* afa_vlan_vlan_tag_cmd
+ * Tag command: push, pop, nop VLAN header.
+ */
+MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
+
+/* afa_vlan_vid_cmd */
+MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
+
+/* afa_vlan_vid */
+MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
+
+/* afa_vlan_ethertype_cmd */
+MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
+
+/* afa_vlan_ethertype
+ * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
+ */
+MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
+
+/* afa_vlan_pcp_cmd */
+MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
+
+/* afa_vlan_pcp */
+MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
+
+static inline void
+mlxsw_afa_vlan_pack(char *payload,
+ enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
+ enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
+ enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
+ enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
+{
+ mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
+ mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
+ mlxsw_afa_vlan_vid_set(payload, vid);
+ mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
+ mlxsw_afa_vlan_pcp_set(payload, pcp);
+ mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
+ mlxsw_afa_vlan_ethertype_set(payload, ethertype);
+}
+
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VLAN_CODE,
+ MLXSW_AFA_VLAN_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
+ return PTR_ERR(act);
+ }
+ mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
+
+/* Trap Action / Trap With Userdef Action
+ * --------------------------------------
+ * The Trap action enables trapping / mirroring packets to the CPU
+ * as well as discarding packets.
+ * The ACL Trap / Discard separates the forward/discard control from CPU
+ * trap control. In addition, the Trap / Discard action enables activating
+ * SPAN (port mirroring).
+ *
+ * The Trap with userdef action has the same functionality as
+ * the Trap action with addition of user defined value that can be set
+ * and used by higher layer applications.
+ */
+
+#define MLXSW_AFA_TRAP_CODE 0x03
+#define MLXSW_AFA_TRAP_SIZE 1
+
+#define MLXSW_AFA_TRAPWU_CODE 0x04
+#define MLXSW_AFA_TRAPWU_SIZE 2
+
+enum mlxsw_afa_trap_trap_action {
+ MLXSW_AFA_TRAP_TRAP_ACTION_NOP = 0,
+ MLXSW_AFA_TRAP_TRAP_ACTION_TRAP = 2,
+};
+
+/* afa_trap_trap_action
+ * Trap Action.
+ */
+MLXSW_ITEM32(afa, trap, trap_action, 0x00, 24, 4);
+
+enum mlxsw_afa_trap_forward_action {
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD = 1,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD = 3,
+};
+
+/* afa_trap_forward_action
+ * Forward Action.
+ */
+MLXSW_ITEM32(afa, trap, forward_action, 0x00, 0, 4);
+
+/* afa_trap_trap_id
+ * Trap ID to configure.
+ */
+MLXSW_ITEM32(afa, trap, trap_id, 0x04, 0, 9);
+
+/* afa_trap_mirror_agent
+ * Mirror agent.
+ */
+MLXSW_ITEM32(afa, trap, mirror_agent, 0x08, 29, 3);
+
+/* afa_trap_mirror_enable
+ * Mirror enable.
+ */
+MLXSW_ITEM32(afa, trap, mirror_enable, 0x08, 24, 1);
+
+/* user_def_val
+ * Value for the SW usage. Can be used to pass information of which
+ * rule has caused a trap. This may be overwritten by later traps.
+ * This field does a set on the packet's user_def_val only if this
+ * is the first trap_id or if the trap_id has replaced the previous
+ * packet's trap_id.
+ */
+MLXSW_ITEM32(afa, trap, user_def_val, 0x0C, 0, 20);
+
+static inline void
+mlxsw_afa_trap_pack(char *payload,
+ enum mlxsw_afa_trap_trap_action trap_action,
+ enum mlxsw_afa_trap_forward_action forward_action,
+ u16 trap_id)
+{
+ mlxsw_afa_trap_trap_action_set(payload, trap_action);
+ mlxsw_afa_trap_forward_action_set(payload, forward_action);
+ mlxsw_afa_trap_trap_id_set(payload, trap_id);
+}
+
+static inline void
+mlxsw_afa_trapwu_pack(char *payload,
+ enum mlxsw_afa_trap_trap_action trap_action,
+ enum mlxsw_afa_trap_forward_action forward_action,
+ u16 trap_id, u32 user_def_val)
+{
+ mlxsw_afa_trap_pack(payload, trap_action, forward_action, trap_id);
+ mlxsw_afa_trap_user_def_val_set(payload, user_def_val);
+}
+
+static inline void
+mlxsw_afa_trap_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
+{
+ mlxsw_afa_trap_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trap_mirror_agent_set(payload, mirror_agent);
+}
+
+static char *mlxsw_afa_block_append_action_trap(struct mlxsw_afa_block *block,
+ u8 action_code, u8 action_size)
+{
+ return mlxsw_afa_block_append_action_ext(block, action_code,
+ action_size,
+ MLXSW_AFA_ACTION_TYPE_TRAP);
+}
+
+static int mlxsw_afa_block_append_drop_plain(struct mlxsw_afa_block *block,
+ bool ingress)
+{
+ char *act = mlxsw_afa_block_append_action_trap(block,
+ MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
+ ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL);
+ return 0;
+}
+
+static int
+mlxsw_afa_block_append_drop_with_cookie(struct mlxsw_afa_block *block,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+ u32 cookie_index;
+ char *act;
+ int err;
+
+ cookie_ref = mlxsw_afa_cookie_ref_create(block, fa_cookie);
+ if (IS_ERR(cookie_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create cookie for drop action");
+ return PTR_ERR(cookie_ref);
+ }
+ cookie_index = cookie_ref->cookie->cookie_index;
+
+ act = mlxsw_afa_block_append_action_trap(block, MLXSW_AFA_TRAPWU_CODE,
+ MLXSW_AFA_TRAPWU_SIZE);
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop with cookie action");
+ err = PTR_ERR(act);
+ goto err_append_action;
+ }
+ mlxsw_afa_trapwu_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
+ ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL,
+ cookie_index);
+ return 0;
+
+err_append_action:
+ mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
+ return err;
+}
+
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ return fa_cookie ?
+ mlxsw_afa_block_append_drop_with_cookie(block, ingress,
+ fa_cookie, extack) :
+ mlxsw_afa_block_append_drop_plain(block, ingress);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
+
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
+{
+ char *act = mlxsw_afa_block_append_action_trap(block,
+ MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD, trap_id);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
+
+int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
+ u16 trap_id)
+{
+ char *act = mlxsw_afa_block_append_action_trap(block,
+ MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, trap_id);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+
+struct mlxsw_afa_mirror {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u16 local_in_port;
+ bool ingress;
+};
+
+static void
+mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_mirror *mirror)
+{
+ mlxsw_afa_resource_del(&mirror->resource);
+ block->afa->ops->mirror_del(block->afa->ops_priv,
+ mirror->local_in_port,
+ mirror->span_id,
+ mirror->ingress);
+ kfree(mirror);
+}
+
+static void
+mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_mirror *mirror;
+
+ mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
+ mlxsw_afa_mirror_destroy(block, mirror);
+}
+
+static struct mlxsw_afa_mirror *
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u16 local_in_port,
+ const struct net_device *out_dev, bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
+ if (!mirror)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->mirror_add(block->afa->ops_priv,
+ local_in_port, out_dev,
+ ingress, &mirror->span_id);
+ if (err)
+ goto err_mirror_add;
+
+ mirror->ingress = ingress;
+ mirror->local_in_port = local_in_port;
+ mirror->resource.destructor = mlxsw_afa_mirror_destructor;
+ mlxsw_afa_resource_add(block, &mirror->resource);
+ return mirror;
+
+err_mirror_add:
+ kfree(mirror);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
+ u8 mirror_agent)
+{
+ char *act = mlxsw_afa_block_append_action_trap(block,
+ MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trap_mirror_pack(act, true, mirror_agent);
+ return 0;
+}
+
+int
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u16 local_in_port,
+ const struct net_device *out_dev, bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
+ ingress);
+ if (IS_ERR(mirror)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action");
+ return PTR_ERR(mirror);
+ }
+ err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action");
+ goto err_append_allocated_mirror;
+ }
+
+ return 0;
+
+err_append_allocated_mirror:
+ mlxsw_afa_mirror_destroy(block, mirror);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+
+/* QoS Action
+ * ----------
+ * The QOS_ACTION is used for manipulating the QoS attributes of a packet. It
+ * can be used to change the DCSP, ECN, Color and Switch Priority of the packet.
+ * Note that PCP field can be changed using the VLAN action.
+ */
+
+#define MLXSW_AFA_QOS_CODE 0x06
+#define MLXSW_AFA_QOS_SIZE 1
+
+enum mlxsw_afa_qos_ecn_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_ECN_CMD_NOP,
+ /* Set ECN to afa_qos_ecn */
+ MLXSW_AFA_QOS_ECN_CMD_SET,
+};
+
+/* afa_qos_ecn_cmd
+ */
+MLXSW_ITEM32(afa, qos, ecn_cmd, 0x04, 29, 3);
+
+/* afa_qos_ecn
+ * ECN value.
+ */
+MLXSW_ITEM32(afa, qos, ecn, 0x04, 24, 2);
+
+enum mlxsw_afa_qos_dscp_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_DSCP_CMD_NOP,
+ /* Set DSCP 3 LSB bits according to dscp[2:0] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_3LSB,
+ /* Set DSCP 3 MSB bits according to dscp[5:3] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_3MSB,
+ /* Set DSCP 6 bits according to dscp[5:0] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
+};
+
+/* afa_qos_dscp_cmd
+ * DSCP command.
+ */
+MLXSW_ITEM32(afa, qos, dscp_cmd, 0x04, 14, 2);
+
+/* afa_qos_dscp
+ * DSCP value.
+ */
+MLXSW_ITEM32(afa, qos, dscp, 0x04, 0, 6);
+
+enum mlxsw_afa_qos_switch_prio_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_SWITCH_PRIO_CMD_NOP,
+ /* Set Switch Priority to afa_qos_switch_prio */
+ MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
+};
+
+/* afa_qos_switch_prio_cmd
+ */
+MLXSW_ITEM32(afa, qos, switch_prio_cmd, 0x08, 14, 2);
+
+/* afa_qos_switch_prio
+ * Switch Priority.
+ */
+MLXSW_ITEM32(afa, qos, switch_prio, 0x08, 0, 4);
+
+enum mlxsw_afa_qos_dscp_rw {
+ MLXSW_AFA_QOS_DSCP_RW_PRESERVE,
+ MLXSW_AFA_QOS_DSCP_RW_SET,
+ MLXSW_AFA_QOS_DSCP_RW_CLEAR,
+};
+
+/* afa_qos_dscp_rw
+ * DSCP Re-write Enable. Controlling the rewrite_enable for DSCP.
+ */
+MLXSW_ITEM32(afa, qos, dscp_rw, 0x0C, 30, 2);
+
+static inline void
+mlxsw_afa_qos_ecn_pack(char *payload,
+ enum mlxsw_afa_qos_ecn_cmd ecn_cmd, u8 ecn)
+{
+ mlxsw_afa_qos_ecn_cmd_set(payload, ecn_cmd);
+ mlxsw_afa_qos_ecn_set(payload, ecn);
+}
+
+static inline void
+mlxsw_afa_qos_dscp_pack(char *payload,
+ enum mlxsw_afa_qos_dscp_cmd dscp_cmd, u8 dscp)
+{
+ mlxsw_afa_qos_dscp_cmd_set(payload, dscp_cmd);
+ mlxsw_afa_qos_dscp_set(payload, dscp);
+}
+
+static inline void
+mlxsw_afa_qos_switch_prio_pack(char *payload,
+ enum mlxsw_afa_qos_switch_prio_cmd prio_cmd,
+ u8 prio)
+{
+ mlxsw_afa_qos_switch_prio_cmd_set(payload, prio_cmd);
+ mlxsw_afa_qos_switch_prio_set(payload, prio);
+}
+
+static int __mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ bool set_dscp, u8 dscp,
+ bool set_ecn, u8 ecn,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_QOS_CODE,
+ MLXSW_AFA_QOS_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
+ return PTR_ERR(act);
+ }
+
+ if (set_ecn)
+ mlxsw_afa_qos_ecn_pack(act, MLXSW_AFA_QOS_ECN_CMD_SET, ecn);
+ if (set_dscp) {
+ mlxsw_afa_qos_dscp_pack(act, MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
+ dscp);
+ mlxsw_afa_qos_dscp_rw_set(act, MLXSW_AFA_QOS_DSCP_RW_CLEAR);
+ }
+
+ return 0;
+}
+
+int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ u8 dsfield,
+ struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ true, dsfield >> 2,
+ true, dsfield & 0x03,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dsfield);
+
+int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
+ u8 dscp, struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ true, dscp,
+ false, 0,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dscp);
+
+int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
+ u8 ecn, struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ false, 0,
+ true, ecn,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_ecn);
+
+int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
+ u8 prio,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_QOS_CODE,
+ MLXSW_AFA_QOS_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
+ return PTR_ERR(act);
+ }
+ mlxsw_afa_qos_switch_prio_pack(act, MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
+ prio);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_switch_prio);
+
+/* Forwarding Action
+ * -----------------
+ * Forwarding Action can be used to implement Policy Based Switching (PBS)
+ * as well as OpenFlow related "Output" action.
+ */
+
+#define MLXSW_AFA_FORWARD_CODE 0x07
+#define MLXSW_AFA_FORWARD_SIZE 1
+
+enum mlxsw_afa_forward_type {
+ /* PBS, Policy Based Switching */
+ MLXSW_AFA_FORWARD_TYPE_PBS,
+ /* Output, OpenFlow output type */
+ MLXSW_AFA_FORWARD_TYPE_OUTPUT,
+};
+
+/* afa_forward_type */
+MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
+
+/* afa_forward_pbs_ptr
+ * A pointer to the PBS entry configured by PPBS register.
+ * Reserved when in_port is set.
+ */
+MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
+
+/* afa_forward_in_port
+ * Packet is forwarded back to the ingress port.
+ */
+MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
+
+static inline void
+mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
+ u32 pbs_ptr, bool in_port)
+{
+ mlxsw_afa_forward_type_set(payload, type);
+ mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
+ mlxsw_afa_forward_in_port_set(payload, in_port);
+}
+
+int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
+ u16 local_port, bool in_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+ u32 kvdl_index;
+ char *act;
+ int err;
+
+ if (in_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported");
+ return -EOPNOTSUPP;
+ }
+ fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
+ if (IS_ERR(fwd_entry_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action");
+ return PTR_ERR(fwd_entry_ref);
+ }
+ kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
+
+ act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
+ MLXSW_AFA_FORWARD_SIZE);
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
+ err = PTR_ERR(act);
+ goto err_append_action;
+ }
+ mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
+ kvdl_index, in_port);
+ return 0;
+
+err_append_action:
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
+
+/* Policing and Counting Action
+ * ----------------------------
+ * Policing and Counting action is used for binding policer and counter
+ * to ACL rules.
+ */
+
+#define MLXSW_AFA_POLCNT_CODE 0x08
+#define MLXSW_AFA_POLCNT_SIZE 1
+
+enum {
+ MLXSW_AFA_POLCNT_COUNTER,
+ MLXSW_AFA_POLCNT_POLICER,
+};
+
+/* afa_polcnt_c_p
+ * Counter or policer.
+ * Indicates whether the action binds a policer or a counter to the flow.
+ * 0: Counter
+ * 1: Policer
+ */
+MLXSW_ITEM32(afa, polcnt, c_p, 0x00, 31, 1);
+
+enum mlxsw_afa_polcnt_counter_set_type {
+ /* No count */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* afa_polcnt_counter_set_type
+ * Counter set type for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
+
+/* afa_polcnt_counter_index
+ * Counter index for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
+
+/* afa_polcnt_pid
+ * Policer ID.
+ * Reserved when c_p = 0
+ */
+MLXSW_ITEM32(afa, polcnt, pid, 0x08, 0, 14);
+
+static inline void
+mlxsw_afa_polcnt_pack(char *payload,
+ enum mlxsw_afa_polcnt_counter_set_type set_type,
+ u32 counter_index)
+{
+ mlxsw_afa_polcnt_c_p_set(payload, MLXSW_AFA_POLCNT_COUNTER);
+ mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
+ mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
+}
+
+static void mlxsw_afa_polcnt_policer_pack(char *payload, u16 policer_index)
+{
+ mlxsw_afa_polcnt_c_p_set(payload, MLXSW_AFA_POLCNT_POLICER);
+ mlxsw_afa_polcnt_pid_set(payload, policer_index);
+}
+
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
+ MLXSW_AFA_POLCNT_SIZE);
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
+ counter_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_counter *counter;
+ u32 counter_index;
+ int err;
+
+ counter = mlxsw_afa_counter_create(block);
+ if (IS_ERR(counter)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create count action");
+ return PTR_ERR(counter);
+ }
+ counter_index = counter->counter_index;
+
+ err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append count action");
+ goto err_append_allocated_counter;
+ }
+ if (p_counter_index)
+ *p_counter_index = counter_index;
+ return 0;
+
+err_append_allocated_counter:
+ mlxsw_afa_counter_destroy(block, counter);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
+
+int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
+ u32 fa_index, u64 rate_bytes_ps, u32 burst,
+ u16 *p_policer_index,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_policer_ref *policer_ref;
+ char *act;
+ int err;
+
+ policer_ref = mlxsw_afa_policer_ref_create(block, fa_index,
+ rate_bytes_ps,
+ burst, extack);
+ if (IS_ERR(policer_ref))
+ return PTR_ERR(policer_ref);
+ *p_policer_index = policer_ref->policer->policer_index;
+
+ act = mlxsw_afa_block_append_action_ext(block, MLXSW_AFA_POLCNT_CODE,
+ MLXSW_AFA_POLCNT_SIZE,
+ MLXSW_AFA_ACTION_TYPE_POLICE);
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append police action");
+ err = PTR_ERR(act);
+ goto err_append_action;
+ }
+ mlxsw_afa_polcnt_policer_pack(act, *p_policer_index);
+
+ return 0;
+
+err_append_action:
+ mlxsw_afa_policer_ref_destroy(block, policer_ref);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_police);
+
+/* Virtual Router and Forwarding Domain Action
+ * -------------------------------------------
+ * Virtual Switch action is used for manipulate the Virtual Router (VR),
+ * MPLS label space and the Forwarding Identifier (FID).
+ */
+
+#define MLXSW_AFA_VIRFWD_CODE 0x0E
+#define MLXSW_AFA_VIRFWD_SIZE 1
+
+enum mlxsw_afa_virfwd_fid_cmd {
+ /* Do nothing */
+ MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
+ /* Set the Forwarding Identifier (FID) to fid */
+ MLXSW_AFA_VIRFWD_FID_CMD_SET,
+};
+
+/* afa_virfwd_fid_cmd */
+MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
+
+/* afa_virfwd_fid
+ * The FID value.
+ */
+MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
+
+static inline void mlxsw_afa_virfwd_pack(char *payload,
+ enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
+ u16 fid)
+{
+ mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
+ mlxsw_afa_virfwd_fid_set(payload, fid);
+}
+
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VIRFWD_CODE,
+ MLXSW_AFA_VIRFWD_SIZE);
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
+ return PTR_ERR(act);
+ }
+ mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
+
+/* MC Routing Action
+ * -----------------
+ * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
+ * Forwarding Table Version 2 Register.
+ */
+
+#define MLXSW_AFA_MCROUTER_CODE 0x10
+#define MLXSW_AFA_MCROUTER_SIZE 2
+
+enum mlxsw_afa_mcrouter_rpf_action {
+ MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
+ MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
+ MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
+};
+
+/* afa_mcrouter_rpf_action */
+MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
+
+/* afa_mcrouter_expected_irif */
+MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
+
+/* afa_mcrouter_min_mtu */
+MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
+
+enum mlxsw_afa_mrouter_vrmid {
+ MLXSW_AFA_MCROUTER_VRMID_INVALID,
+ MLXSW_AFA_MCROUTER_VRMID_VALID
+};
+
+/* afa_mcrouter_vrmid
+ * Valid RMID: rigr_rmid_index is used as RMID
+ */
+MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
+
+/* afa_mcrouter_rigr_rmid_index
+ * When the vrmid field is set to invalid, the field is used as pointer to
+ * Router Interface Group (RIGR) Table in the KVD linear.
+ * When the vrmid is set to valid, the field is used as RMID index, ranged
+ * from 0 to max_mid - 1. The index is to the Port Group Table.
+ */
+MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_afa_mcrouter_pack(char *payload,
+ enum mlxsw_afa_mcrouter_rpf_action rpf_action,
+ u16 expected_irif, u16 min_mtu,
+ enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
+
+{
+ mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
+ mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
+ mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
+ mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
+ mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
+}
+
+int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
+ u16 expected_irif, u16 min_mtu,
+ bool rmid_valid, u32 kvdl_index)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_MCROUTER_CODE,
+ MLXSW_AFA_MCROUTER_SIZE);
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
+ expected_irif, min_mtu, rmid_valid, kvdl_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
+
+/* SIP DIP Action
+ * --------------
+ * The SIP_DIP_ACTION is used for modifying the SIP and DIP fields of the
+ * packet, e.g. for NAT. The L3 checksum is updated. Also, if the L4 is TCP or
+ * if the L4 is UDP and the checksum field is not zero, then the L4 checksum is
+ * updated.
+ */
+
+#define MLXSW_AFA_IP_CODE 0x11
+#define MLXSW_AFA_IP_SIZE 2
+
+enum mlxsw_afa_ip_s_d {
+ /* ip refers to dip */
+ MLXSW_AFA_IP_S_D_DIP,
+ /* ip refers to sip */
+ MLXSW_AFA_IP_S_D_SIP,
+};
+
+/* afa_ip_s_d
+ * Source or destination.
+ */
+MLXSW_ITEM32(afa, ip, s_d, 0x00, 31, 1);
+
+enum mlxsw_afa_ip_m_l {
+ /* LSB: ip[63:0] refers to ip[63:0] */
+ MLXSW_AFA_IP_M_L_LSB,
+ /* MSB: ip[63:0] refers to ip[127:64] */
+ MLXSW_AFA_IP_M_L_MSB,
+};
+
+/* afa_ip_m_l
+ * MSB or LSB.
+ */
+MLXSW_ITEM32(afa, ip, m_l, 0x00, 30, 1);
+
+/* afa_ip_ip_63_32
+ * Bits [63:32] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_63_32, 0x08, 0, 32);
+
+/* afa_ip_ip_31_0
+ * Bits [31:0] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_31_0, 0x0C, 0, 32);
+
+static void mlxsw_afa_ip_pack(char *payload, enum mlxsw_afa_ip_s_d s_d,
+ enum mlxsw_afa_ip_m_l m_l, u32 ip_31_0,
+ u32 ip_63_32)
+{
+ mlxsw_afa_ip_s_d_set(payload, s_d);
+ mlxsw_afa_ip_m_l_set(payload, m_l);
+ mlxsw_afa_ip_ip_31_0_set(payload, ip_31_0);
+ mlxsw_afa_ip_ip_63_32_set(payload, ip_63_32);
+}
+
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_afa_ip_s_d s_d = is_dip ? MLXSW_AFA_IP_S_D_DIP :
+ MLXSW_AFA_IP_S_D_SIP;
+ enum mlxsw_afa_ip_m_l m_l = is_lsb ? MLXSW_AFA_IP_M_L_LSB :
+ MLXSW_AFA_IP_M_L_MSB;
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_IP_CODE,
+ MLXSW_AFA_IP_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append IP action");
+ return PTR_ERR(act);
+ }
+
+ mlxsw_afa_ip_pack(act, s_d, m_l, val_31_0, val_63_32);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_ip);
+
+/* L4 Port Action
+ * --------------
+ * The L4_PORT_ACTION is used for modifying the sport and dport fields of the packet, e.g. for NAT.
+ * If (the L4 is TCP) or if (the L4 is UDP and checksum field!=0) then the L4 checksum is updated.
+ */
+
+#define MLXSW_AFA_L4PORT_CODE 0x12
+#define MLXSW_AFA_L4PORT_SIZE 1
+
+enum mlxsw_afa_l4port_s_d {
+ /* configure src_l4_port */
+ MLXSW_AFA_L4PORT_S_D_SRC,
+ /* configure dst_l4_port */
+ MLXSW_AFA_L4PORT_S_D_DST,
+};
+
+/* afa_l4port_s_d
+ * Source or destination.
+ */
+MLXSW_ITEM32(afa, l4port, s_d, 0x00, 31, 1);
+
+/* afa_l4port_l4_port
+ * Number of port to change to.
+ */
+MLXSW_ITEM32(afa, l4port, l4_port, 0x08, 0, 16);
+
+static void mlxsw_afa_l4port_pack(char *payload, enum mlxsw_afa_l4port_s_d s_d, u16 l4_port)
+{
+ mlxsw_afa_l4port_s_d_set(payload, s_d);
+ mlxsw_afa_l4port_l4_port_set(payload, l4_port);
+}
+
+int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_afa_l4port_s_d s_d = is_dport ? MLXSW_AFA_L4PORT_S_D_DST :
+ MLXSW_AFA_L4PORT_S_D_SRC;
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_L4PORT_CODE,
+ MLXSW_AFA_L4PORT_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append L4_PORT action");
+ return PTR_ERR(act);
+ }
+
+ mlxsw_afa_l4port_pack(act, s_d, l4_port);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_l4port);
+
+/* Mirror Sampler Action
+ * ---------------------
+ * The SAMPLER_ACTION is used to mirror packets with a probability (sampling).
+ */
+
+#define MLXSW_AFA_SAMPLER_CODE 0x13
+#define MLXSW_AFA_SAMPLER_SIZE 1
+
+/* afa_sampler_mirror_agent
+ * Mirror (SPAN) agent.
+ */
+MLXSW_ITEM32(afa, sampler, mirror_agent, 0x04, 0, 3);
+
+#define MLXSW_AFA_SAMPLER_RATE_MAX (BIT(24) - 1)
+
+/* afa_sampler_mirror_probability_rate
+ * Mirroring probability.
+ * Valid values are 1 to 2^24 - 1
+ */
+MLXSW_ITEM32(afa, sampler, mirror_probability_rate, 0x08, 0, 24);
+
+static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate)
+{
+ mlxsw_afa_sampler_mirror_agent_set(payload, mirror_agent);
+ mlxsw_afa_sampler_mirror_probability_rate_set(payload, rate);
+}
+
+struct mlxsw_afa_sampler {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u16 local_port;
+ bool ingress;
+};
+
+static void mlxsw_afa_sampler_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_sampler *sampler)
+{
+ mlxsw_afa_resource_del(&sampler->resource);
+ block->afa->ops->sampler_del(block->afa->ops_priv, sampler->local_port,
+ sampler->span_id, sampler->ingress);
+ kfree(sampler);
+}
+
+static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_sampler *sampler;
+
+ sampler = container_of(resource, struct mlxsw_afa_sampler, resource);
+ mlxsw_afa_sampler_destroy(block, sampler);
+}
+
+static struct mlxsw_afa_sampler *
+mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u16 local_port,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate, bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_sampler *sampler;
+ int err;
+
+ sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
+ if (!sampler)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->sampler_add(block->afa->ops_priv, local_port,
+ psample_group, rate, trunc_size,
+ truncate, ingress, &sampler->span_id,
+ extack);
+ if (err)
+ goto err_sampler_add;
+
+ sampler->ingress = ingress;
+ sampler->local_port = local_port;
+ sampler->resource.destructor = mlxsw_afa_sampler_destructor;
+ mlxsw_afa_resource_add(block, &sampler->resource);
+ return sampler;
+
+err_sampler_add:
+ kfree(sampler);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block,
+ u8 mirror_agent, u32 rate)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_SAMPLER_CODE,
+ MLXSW_AFA_SAMPLER_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_sampler_pack(act, mirror_agent, rate);
+ return 0;
+}
+
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_sampler *sampler;
+ int err;
+
+ if (rate > MLXSW_AFA_SAMPLER_RATE_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling rate is too high");
+ return -EINVAL;
+ }
+
+ sampler = mlxsw_afa_sampler_create(block, local_port, psample_group,
+ rate, trunc_size, truncate, ingress,
+ extack);
+ if (IS_ERR(sampler))
+ return PTR_ERR(sampler);
+
+ err = mlxsw_afa_block_append_allocated_sampler(block, sampler->span_id,
+ rate);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append sampler action");
+ goto err_append_allocated_sampler;
+ }
+
+ return 0;
+
+err_append_allocated_sampler:
+ mlxsw_afa_sampler_destroy(block, sampler);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_sampler);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
new file mode 100644
index 000000000..db58037be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H
+#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+struct mlxsw_afa;
+struct mlxsw_afa_block;
+
+struct mlxsw_afa_ops {
+ int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first);
+ void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+ int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
+ bool *activity);
+ int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u16 local_port);
+ void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+ int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
+ void (*counter_index_put)(void *priv, unsigned int counter_index);
+ int (*mirror_add)(void *priv, u16 local_in_port,
+ const struct net_device *out_dev,
+ bool ingress, int *p_span_id);
+ void (*mirror_del)(void *priv, u16 local_in_port, int span_id,
+ bool ingress);
+ int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst,
+ u16 *p_policer_index,
+ struct netlink_ext_ack *extack);
+ void (*policer_del)(void *priv, u16 policer_index);
+ int (*sampler_add)(void *priv, u16 local_port,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate, bool ingress,
+ int *p_span_id, struct netlink_ext_ack *extack);
+ void (*sampler_del)(void *priv, u16 local_port, int span_id,
+ bool ingress);
+ bool dummy_first_set;
+};
+
+struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
+ const struct mlxsw_afa_ops *ops,
+ void *ops_priv);
+void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa);
+struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
+void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
+int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
+const struct flow_action_cookie *
+mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index);
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
+int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
+ u16 trap_id);
+int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u16 local_in_port,
+ const struct net_device *out_dev,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
+ u16 local_port, bool in_port,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
+ u8 prio,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ u8 dsfield,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
+ u8 dscp, struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
+ u8 ecn, struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
+ u16 expected_irif, u16 min_mtu,
+ bool rmid_valid, u32 kvdl_index);
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
+ u32 fa_index, u64 rate_bytes_ps, u32 burst,
+ u16 *p_policer_index,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
new file mode 100644
index 000000000..f208a237d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+/* For the purpose of the driver, define an internal storage scratchpad
+ * that will be used to store key/mask values. For each defined element type
+ * define an internal storage geometry.
+ *
+ * When adding new elements, MLXSW_AFK_ELEMENT_STORAGE_SIZE must be increased
+ * accordingly.
+ */
+static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+ MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
+ MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
+};
+
+struct mlxsw_afk {
+ struct list_head key_info_list;
+ unsigned int max_blocks;
+ const struct mlxsw_afk_ops *ops;
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+};
+
+static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+
+ for (j = 0; j < block->instances_count; j++) {
+ const struct mlxsw_afk_element_info *elinfo;
+ struct mlxsw_afk_element_inst *elinst;
+
+ elinst = &block->instances[j];
+ elinfo = &mlxsw_afk_element_infos[elinst->element];
+ if (elinst->type != elinfo->type ||
+ (!elinst->avoid_size_check &&
+ elinst->item.size.bits !=
+ elinfo->item.size.bits))
+ return false;
+ }
+ }
+ return true;
+}
+
+struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
+ const struct mlxsw_afk_ops *ops)
+{
+ struct mlxsw_afk *mlxsw_afk;
+
+ mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL);
+ if (!mlxsw_afk)
+ return NULL;
+ INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
+ mlxsw_afk->max_blocks = max_blocks;
+ mlxsw_afk->ops = ops;
+ mlxsw_afk->blocks = ops->blocks;
+ mlxsw_afk->blocks_count = ops->blocks_count;
+ WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
+ return mlxsw_afk;
+}
+EXPORT_SYMBOL(mlxsw_afk_create);
+
+void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk)
+{
+ WARN_ON(!list_empty(&mlxsw_afk->key_info_list));
+ kfree(mlxsw_afk);
+}
+EXPORT_SYMBOL(mlxsw_afk_destroy);
+
+struct mlxsw_afk_key_info {
+ struct list_head list;
+ unsigned int ref_count;
+ unsigned int blocks_count;
+ int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
+ * is index inside "blocks"
+ */
+ struct mlxsw_afk_element_usage elusage;
+ const struct mlxsw_afk_block *blocks[];
+};
+
+static bool
+mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0;
+}
+
+static struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_key_info *key_info;
+
+ list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) {
+ if (mlxsw_afk_key_info_elements_eq(key_info, elusage))
+ return key_info;
+ }
+ return NULL;
+}
+
+struct mlxsw_afk_picker {
+ DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
+ unsigned int total;
+};
+
+static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker,
+ enum mlxsw_afk_element element)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+
+ for (j = 0; j < block->instances_count; j++) {
+ struct mlxsw_afk_element_inst *elinst;
+
+ elinst = &block->instances[j];
+ if (elinst->element == element) {
+ __set_bit(element, picker[i].element);
+ picker[i].total++;
+ }
+ }
+ }
+}
+
+static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker,
+ int block_index)
+{
+ DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX);
+ int i;
+ int j;
+
+ memcpy(&hits_element, &picker[block_index].element,
+ sizeof(hits_element));
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) {
+ if (__test_and_clear_bit(j, picker[i].element))
+ picker[i].total--;
+ }
+ }
+}
+
+static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker)
+{
+ int most_index = -EINVAL; /* Should never happen to return this */
+ int most_hits = 0;
+ int i;
+
+ for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+ if (picker[i].total > most_hits) {
+ most_hits = picker[i].total;
+ most_index = i;
+ }
+ }
+ return most_index;
+}
+
+static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_picker *picker,
+ int block_index,
+ struct mlxsw_afk_key_info *key_info)
+{
+ enum mlxsw_afk_element element;
+
+ if (key_info->blocks_count == mlxsw_afk->max_blocks)
+ return -EINVAL;
+
+ for_each_set_bit(element, picker[block_index].element,
+ MLXSW_AFK_ELEMENT_MAX) {
+ key_info->element_to_block[element] = key_info->blocks_count;
+ mlxsw_afk_element_usage_add(&key_info->elusage, element);
+ }
+
+ key_info->blocks[key_info->blocks_count] =
+ &mlxsw_afk->blocks[block_index];
+ key_info->blocks_count++;
+ return 0;
+}
+
+static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_picker *picker;
+ enum mlxsw_afk_element element;
+ int err;
+
+ picker = kcalloc(mlxsw_afk->blocks_count, sizeof(*picker), GFP_KERNEL);
+ if (!picker)
+ return -ENOMEM;
+
+ /* Since the same elements could be present in multiple blocks,
+ * we must find out optimal block list in order to make the
+ * block count as low as possible.
+ *
+ * First, we count hits. We go over all available blocks and count
+ * how many of requested elements are covered by each.
+ *
+ * Then in loop, we find block with most hits and add it to
+ * output key_info. Then we have to subtract this block hits so
+ * the next iteration will find most suitable block for
+ * the rest of requested elements.
+ */
+
+ mlxsw_afk_element_usage_for_each(element, elusage)
+ mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element);
+
+ do {
+ int block_index;
+
+ block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker);
+ if (block_index < 0) {
+ err = block_index;
+ goto out;
+ }
+ err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker,
+ block_index, key_info);
+ if (err)
+ goto out;
+ mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index);
+ } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage));
+
+ err = 0;
+out:
+ kfree(picker);
+ return err;
+}
+
+static struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_key_info *key_info;
+ int err;
+
+ key_info = kzalloc(struct_size(key_info, blocks, mlxsw_afk->max_blocks),
+ GFP_KERNEL);
+ if (!key_info)
+ return ERR_PTR(-ENOMEM);
+ err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage);
+ if (err)
+ goto err_picker;
+ list_add(&key_info->list, &mlxsw_afk->key_info_list);
+ key_info->ref_count = 1;
+ return key_info;
+
+err_picker:
+ kfree(key_info);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info)
+{
+ list_del(&key_info->list);
+ kfree(key_info);
+}
+
+struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_key_info *key_info;
+
+ key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
+ if (key_info) {
+ key_info->ref_count++;
+ return key_info;
+ }
+ return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_get);
+
+void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
+{
+ if (--key_info->ref_count)
+ return;
+ mlxsw_afk_key_info_destroy(key_info);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_put);
+
+bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_subset);
+
+static const struct mlxsw_afk_element_inst *
+mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
+ enum mlxsw_afk_element element)
+{
+ int i;
+
+ for (i = 0; i < block->instances_count; i++) {
+ struct mlxsw_afk_element_inst *elinst;
+
+ elinst = &block->instances[i];
+ if (elinst->element == element)
+ return elinst;
+ }
+ return NULL;
+}
+
+static const struct mlxsw_afk_element_inst *
+mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info,
+ enum mlxsw_afk_element element,
+ int *p_block_index)
+{
+ const struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_block *block;
+ int block_index;
+
+ if (WARN_ON(!test_bit(element, key_info->elusage.usage)))
+ return NULL;
+ block_index = key_info->element_to_block[element];
+ block = key_info->blocks[block_index];
+
+ elinst = mlxsw_afk_block_elinst_get(block, element);
+ if (WARN_ON(!elinst))
+ return NULL;
+
+ *p_block_index = block_index;
+ return elinst;
+}
+
+u16
+mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info,
+ int block_index)
+{
+ return key_info->blocks[block_index]->encoding;
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get);
+
+unsigned int
+mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info)
+{
+ return key_info->blocks_count;
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get);
+
+void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value)
+{
+ const struct mlxsw_afk_element_info *elinfo =
+ &mlxsw_afk_element_infos[element];
+ const struct mlxsw_item *storage_item = &elinfo->item;
+
+ if (!mask_value)
+ return;
+ if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32))
+ return;
+ __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value);
+ __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value);
+ mlxsw_afk_element_usage_add(&values->elusage, element);
+}
+EXPORT_SYMBOL(mlxsw_afk_values_add_u32);
+
+void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ const char *key_value, const char *mask_value,
+ unsigned int len)
+{
+ const struct mlxsw_afk_element_info *elinfo =
+ &mlxsw_afk_element_infos[element];
+ const struct mlxsw_item *storage_item = &elinfo->item;
+
+ if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */
+ return;
+ if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) ||
+ WARN_ON(elinfo->item.size.bytes != len))
+ return;
+ __mlxsw_item_memcpy_to(values->storage.key, key_value,
+ storage_item, 0);
+ __mlxsw_item_memcpy_to(values->storage.mask, mask_value,
+ storage_item, 0);
+ mlxsw_afk_element_usage_add(&values->elusage, element);
+}
+EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
+
+static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output, int diff)
+{
+ u32 value;
+
+ value = __mlxsw_item_get32(storage, storage_item, 0);
+ __mlxsw_item_set32(output, output_item, 0, value + diff);
+}
+
+static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
+{
+ char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
+ char *output_data = __mlxsw_item_data(output, output_item, 0);
+ size_t len = output_item->size.bytes;
+
+ memcpy(output_data, storage_data, len);
+}
+
+static void
+mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+ char *output, char *storage, int u32_diff)
+{
+ const struct mlxsw_item *output_item = &elinst->item;
+ const struct mlxsw_afk_element_info *elinfo;
+ const struct mlxsw_item *storage_item;
+
+ elinfo = &mlxsw_afk_element_infos[elinst->element];
+ storage_item = &elinfo->item;
+ if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
+ mlxsw_sp_afk_encode_u32(storage_item, output_item,
+ storage, output, u32_diff);
+ else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
+ mlxsw_sp_afk_encode_buf(storage_item, output_item,
+ storage, output);
+}
+
+#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16
+
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_values *values,
+ char *key, char *mask)
+{
+ unsigned int blocks_count =
+ mlxsw_afk_key_info_blocks_count_get(key_info);
+ char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
+ char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
+ const struct mlxsw_afk_element_inst *elinst;
+ enum mlxsw_afk_element element;
+ int block_index, i;
+
+ for (i = 0; i < blocks_count; i++) {
+ memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+ memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+
+ mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+ elinst = mlxsw_afk_key_info_elinst_get(key_info,
+ element,
+ &block_index);
+ if (!elinst || block_index != i)
+ continue;
+
+ mlxsw_sp_afk_encode_one(elinst, block_key,
+ values->storage.key,
+ elinst->u32_key_diff);
+ mlxsw_sp_afk_encode_one(elinst, block_mask,
+ values->storage.mask, 0);
+ }
+
+ mlxsw_afk->ops->encode_block(key, i, block_key);
+ mlxsw_afk->ops->encode_block(mask, i, block_mask);
+ }
+}
+EXPORT_SYMBOL(mlxsw_afk_encode);
+
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end)
+{
+ int i;
+
+ for (i = block_start; i <= block_end; i++)
+ mlxsw_afk->ops->clear_block(key, i);
+}
+EXPORT_SYMBOL(mlxsw_afk_clear);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
new file mode 100644
index 000000000..3a037fe47
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H
+#define _MLXSW_CORE_ACL_FLEX_KEYS_H
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#include "item.h"
+
+enum mlxsw_afk_element {
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
+ MLXSW_AFK_ELEMENT_IP_TTL_,
+ MLXSW_AFK_ELEMENT_IP_ECN,
+ MLXSW_AFK_ELEMENT_IP_DSCP,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_MAX,
+};
+
+enum mlxsw_afk_element_type {
+ MLXSW_AFK_ELEMENT_TYPE_U32,
+ MLXSW_AFK_ELEMENT_TYPE_BUF,
+};
+
+struct mlxsw_afk_element_info {
+ enum mlxsw_afk_element element; /* element ID */
+ enum mlxsw_afk_element_type type;
+ struct mlxsw_item item; /* element geometry in internal storage */
+};
+
+#define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size) \
+ [MLXSW_AFK_ELEMENT_##_element] = { \
+ .element = MLXSW_AFK_ELEMENT_##_element, \
+ .type = _type, \
+ .item = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _size}, \
+ .name = #_element, \
+ }, \
+ }
+
+#define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size) \
+ MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32, \
+ _element, _offset, _shift, _size)
+
+#define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size) \
+ MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
+ _element, _offset, 0, _size)
+
+#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
+
+struct mlxsw_afk_element_inst { /* element instance in actual block */
+ enum mlxsw_afk_element element;
+ enum mlxsw_afk_element_type type;
+ struct mlxsw_item item; /* element geometry in block */
+ int u32_key_diff; /* in case value needs to be adjusted before write
+ * this diff is here to handle that
+ */
+ bool avoid_size_check;
+};
+
+#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, \
+ _shift, _size, _u32_key_diff, _avoid_size_check) \
+ { \
+ .element = MLXSW_AFK_ELEMENT_##_element, \
+ .type = _type, \
+ .item = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _size}, \
+ .name = #_element, \
+ }, \
+ .u32_key_diff = _u32_key_diff, \
+ .avoid_size_check = _avoid_size_check, \
+ }
+
+#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \
+ MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \
+ _element, _offset, _shift, _size, 0, false)
+
+#define MLXSW_AFK_ELEMENT_INST_EXT_U32(_element, _offset, \
+ _shift, _size, _key_diff, \
+ _avoid_size_check) \
+ MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \
+ _element, _offset, _shift, _size, \
+ _key_diff, _avoid_size_check)
+
+#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \
+ MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \
+ _element, _offset, 0, _size, 0, false)
+
+struct mlxsw_afk_block {
+ u16 encoding; /* block ID */
+ struct mlxsw_afk_element_inst *instances;
+ unsigned int instances_count;
+};
+
+#define MLXSW_AFK_BLOCK(_encoding, _instances) \
+ { \
+ .encoding = _encoding, \
+ .instances = _instances, \
+ .instances_count = ARRAY_SIZE(_instances), \
+ }
+
+struct mlxsw_afk_element_usage {
+ DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX);
+};
+
+#define mlxsw_afk_element_usage_for_each(element, elusage) \
+ for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX)
+
+static inline void
+mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage,
+ enum mlxsw_afk_element element)
+{
+ __set_bit(element, elusage->usage);
+}
+
+static inline void
+mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage)
+{
+ bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX);
+}
+
+static inline void
+mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage,
+ const enum mlxsw_afk_element *elements,
+ unsigned int elements_count)
+{
+ int i;
+
+ mlxsw_afk_element_usage_zero(elusage);
+ for (i = 0; i < elements_count; i++)
+ mlxsw_afk_element_usage_add(elusage, elements[i]);
+}
+
+static inline bool
+mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
+ struct mlxsw_afk_element_usage *elusage_big)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++)
+ if (test_bit(i, elusage_small->usage) &&
+ !test_bit(i, elusage_big->usage))
+ return false;
+ return true;
+}
+
+struct mlxsw_afk;
+
+struct mlxsw_afk_ops {
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+ void (*encode_block)(char *output, int block_index, char *block);
+ void (*clear_block)(char *output, int block_index);
+};
+
+struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
+ const struct mlxsw_afk_ops *ops);
+void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
+
+struct mlxsw_afk_key_info;
+
+struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_element_usage *elusage);
+void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info);
+bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_usage *elusage);
+
+u16
+mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info,
+ int block_index);
+unsigned int
+mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info);
+
+struct mlxsw_afk_element_values {
+ struct mlxsw_afk_element_usage elusage;
+ struct {
+ char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE];
+ char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE];
+ } storage;
+};
+
+void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value);
+void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
+ enum mlxsw_afk_element element,
+ const char *key_value, const char *mask_value,
+ unsigned int len);
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
+ struct mlxsw_afk_element_values *values,
+ char *key, char *mask);
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
new file mode 100644
index 000000000..0107cbc32
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -0,0 +1,1464 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/ethtool.h>
+#include <linux/sfp.h>
+#include <linux/mutex.h>
+
+#include "core.h"
+#include "core_env.h"
+#include "item.h"
+#include "reg.h"
+
+struct mlxsw_env_module_info {
+ u64 module_overheat_counter;
+ bool is_overheat;
+ int num_ports_mapped;
+ int num_ports_up;
+ enum ethtool_module_power_mode_policy power_mode_policy;
+ enum mlxsw_reg_pmtm_module_type type;
+};
+
+struct mlxsw_env_line_card {
+ u8 module_count;
+ bool active;
+ struct mlxsw_env_module_info module_info[];
+};
+
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 max_module_count; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mutex line_cards_lock; /* Protects line cards. */
+ struct mlxsw_env_line_card *line_cards[];
+};
+
+static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ return mlxsw_env->line_cards[slot_index]->active;
+}
+
+static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ bool active;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return active;
+}
+
+static struct
+mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return &mlxsw_env->line_cards[slot_index]->module_info[module];
+}
+
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ struct mlxsw_env_module_info *module_info;
+ int err;
+
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ module_info = mlxsw_env_module_info_get(core, slot_index, module);
+ switch (module_info->type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
+ err = -EINVAL;
+ break;
+ default:
+ err = 0;
+ }
+
+ return err;
+}
+
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = __mlxsw_env_validate_module_type(core, slot_index, module);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return err;
+}
+
+static int
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
+ bool *qsfp, bool *cmis)
+{
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char *eeprom_tmp;
+ u8 ident;
+ int err;
+
+ err = mlxsw_env_validate_module_type(core, slot_index, id);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
+ MLXSW_REG_MCIA_I2C_ADDR_LOW);
+ err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
+ ident = eeprom_tmp[0];
+ *cmis = false;
+ switch (ident) {
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
+ *qsfp = false;
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
+ *qsfp = true;
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
+ *qsfp = true;
+ *cmis = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, u16 offset, u16 size, void *data,
+ bool qsfp, unsigned int *p_read_size)
+{
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char *eeprom_tmp;
+ u16 i2c_addr;
+ u8 page = 0;
+ int status;
+ int err;
+
+ /* MCIA register accepts buffer size <= 48. Page of size 128 should be
+ * read by chunks of size 48, 48, 32. Align the size of the last chunk
+ * to avoid reading after the end of the page.
+ */
+ size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
+
+ if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
+ offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ /* Cross pages read, read until offset 256 in low page */
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+
+ i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
+ if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
+ if (qsfp) {
+ /* When reading upper pages 1, 2 and 3 the offset
+ * starts at 128. Please refer to "QSFP+ Memory Map"
+ * figure in SFF-8436 specification and to "CMIS Module
+ * Memory Map" figure in CMIS specification for
+ * graphical depiction.
+ */
+ page = MLXSW_REG_MCIA_PAGE_GET(offset);
+ offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
+ if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+ } else {
+ /* When reading upper pages 1, 2 and 3 the offset
+ * starts at 0 and I2C high address is used. Please refer
+ * to "Memory Organization" figure in SFF-8472
+ * specification for graphical depiction.
+ */
+ i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
+ offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ }
+ }
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ i2c_addr);
+
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+
+ status = mlxsw_reg_mcia_status_get(mcia_pl);
+ if (status)
+ return -EIO;
+
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
+ memcpy(data, eeprom_tmp, size);
+ *p_read_size = size;
+
+ return 0;
+}
+
+int
+mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
+ int module, int off, int *temp)
+{
+ unsigned int module_temp, module_crit, module_emerg;
+ union {
+ u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE];
+ u16 temp;
+ } temp_thresh;
+ char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ char *eeprom_tmp;
+ bool qsfp, cmis;
+ int page;
+ int err;
+
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, &module_crit,
+ &module_emerg, NULL);
+ if (!module_temp) {
+ *temp = 0;
+ return 0;
+ }
+
+ /* Validate if threshold reading is available through MTMP register,
+ * otherwise fallback to read through MCIA.
+ */
+ if (module_emerg) {
+ *temp = off == SFP_TEMP_HIGH_WARN ? module_crit : module_emerg;
+ return 0;
+ }
+
+ /* Read Free Side Device Temperature Thresholds from page 03h
+ * (MSB at lower byte address).
+ * Bytes:
+ * 128-129 - Temp High Alarm (SFP_TEMP_HIGH_ALARM);
+ * 130-131 - Temp Low Alarm (SFP_TEMP_LOW_ALARM);
+ * 132-133 - Temp High Warning (SFP_TEMP_HIGH_WARN);
+ * 134-135 - Temp Low Warning (SFP_TEMP_LOW_WARN);
+ */
+
+ /* Validate module identifier value. */
+ err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp,
+ &cmis);
+ if (err)
+ return err;
+
+ if (qsfp) {
+ /* For QSFP/CMIS module-defined thresholds are located in page
+ * 02h, otherwise in page 03h.
+ */
+ if (cmis)
+ page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
+ else
+ page = MLXSW_REG_MCIA_TH_PAGE_NUM;
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
+ MLXSW_REG_MCIA_TH_PAGE_OFF + off,
+ MLXSW_REG_MCIA_TH_ITEM_SIZE,
+ MLXSW_REG_MCIA_I2C_ADDR_LOW);
+ } else {
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
+ MLXSW_REG_MCIA_PAGE0_LO,
+ off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
+ MLXSW_REG_MCIA_I2C_ADDR_HIGH);
+ }
+
+ err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
+ memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE);
+ *temp = temp_thresh.temp * 1000;
+
+ return 0;
+}
+
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
+ u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
+ u8 module_rev_id, module_id, diag_mon;
+ unsigned int read_size;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ netdev_err(netdev,
+ "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0,
+ offset, module_info, false,
+ &read_size);
+ if (err)
+ return err;
+
+ if (read_size < offset)
+ return -EIO;
+
+ module_rev_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID];
+ module_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID];
+
+ switch (module_id) {
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
+ if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 ||
+ module_rev_id >=
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ }
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
+ /* Verify if transceiver provides diagnostic monitoring page */
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, SFP_DIAGMON, 1,
+ &diag_mon, false,
+ &read_size);
+ if (err)
+ return err;
+
+ if (read_size < 1)
+ return -EIO;
+
+ modinfo->type = ETH_MODULE_SFF_8472;
+ if (diag_mon)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ else
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
+ /* Use SFF_8636 as base type. ethtool should recognize specific
+ * type through the identifier value.
+ */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ /* Verify if module EEPROM is a flat memory. In case of flat
+ * memory only page 00h (0-255 bytes) can be read. Otherwise
+ * upper pages 01h and 02h can also be read. Upper pages 10h
+ * and 11h are currently not supported by the driver.
+ */
+ if (module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID] &
+ MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ else
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_info);
+
+int mlxsw_env_get_module_eeprom(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int offset = ee->offset;
+ unsigned int read_size;
+ bool qsfp, cmis;
+ int i = 0;
+ int err;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
+ memset(data, 0, ee->len);
+ /* Validate module identifier value. */
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module,
+ &qsfp, &cmis);
+ if (err)
+ return err;
+
+ while (i < ee->len) {
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, offset,
+ ee->len - i, data + i,
+ qsfp, &read_size);
+ if (err) {
+ netdev_err(netdev, "Eeprom query failed\n");
+ return err;
+ }
+
+ i += read_size;
+ offset += read_size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_eeprom);
+
+static int mlxsw_env_mcia_status_process(const char *mcia_pl,
+ struct netlink_ext_ack *extack)
+{
+ u8 status = mlxsw_reg_mcia_status_get(mcia_pl);
+
+ switch (status) {
+ case MLXSW_REG_MCIA_STATUS_GOOD:
+ return 0;
+ case MLXSW_REG_MCIA_STATUS_NO_EEPROM_MODULE:
+ NL_SET_ERR_MSG_MOD(extack, "No response from module's EEPROM");
+ return -EIO;
+ case MLXSW_REG_MCIA_STATUS_MODULE_NOT_SUPPORTED:
+ NL_SET_ERR_MSG_MOD(extack, "Module type not supported by the device");
+ return -EOPNOTSUPP;
+ case MLXSW_REG_MCIA_STATUS_MODULE_NOT_CONNECTED:
+ NL_SET_ERR_MSG_MOD(extack, "No module present indication");
+ return -EIO;
+ case MLXSW_REG_MCIA_STATUS_I2C_ERROR:
+ NL_SET_ERR_MSG_MOD(extack, "Error occurred while trying to access module's EEPROM using I2C");
+ return -EIO;
+ case MLXSW_REG_MCIA_STATUS_MODULE_DISABLED:
+ NL_SET_ERR_MSG_MOD(extack, "Module is disabled");
+ return -EIO;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown error");
+ return -EIO;
+ }
+}
+
+int
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 bytes_read = 0;
+ u16 device_addr;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot read EEPROM of module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ /* Offset cannot be larger than 2 * ETH_MODULE_EEPROM_PAGE_LEN */
+ device_addr = page->offset;
+
+ while (bytes_read < page->length) {
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char *eeprom_tmp;
+ u8 size;
+
+ size = min_t(u8, page->length - bytes_read,
+ MLXSW_REG_MCIA_EEPROM_SIZE);
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
+ device_addr + bytes_read, size,
+ page->i2c_address);
+ mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+ return err;
+ }
+
+ err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+ if (err)
+ return err;
+
+ eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
+ memcpy(page->data + bytes_read, eeprom_tmp, size);
+ bytes_read += size;
+ }
+
+ return bytes_read;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
+{
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
+ mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+ u32 req = *flags;
+ int err;
+
+ if (!(req & ETH_RESET_PHY) &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
+ return 0;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot reset module on an inactive line card\n");
+ return -EIO;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ netdev_err(netdev, "Reset module is not supported on port module type\n");
+ goto out;
+ }
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->num_ports_up) {
+ netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (module_info->num_ports_mapped > 1 &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
+ netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlxsw_env_module_reset(mlxsw_core, slot_index, module);
+ if (err) {
+ netdev_err(netdev, "Failed to reset module\n");
+ goto out;
+ }
+
+ *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
+
+out:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_reset_module);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+ char mcion_pl[MLXSW_REG_MCION_LEN];
+ u32 status_bits;
+ int err = 0;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
+ goto out;
+ }
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ params->policy = module_info->power_mode_policy;
+
+ /* Avoid accessing an inactive line card, as it will result in an error. */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out;
+
+ mlxsw_reg_mcion_pack(mcion_pl, slot_index, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
+ goto out;
+ }
+
+ status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl);
+ if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK))
+ goto out;
+
+ if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK)
+ params->mode = ETHTOOL_MODULE_POWER_MODE_LOW;
+ else
+ params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
+
+out:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
+
+static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module, bool enable)
+{
+ enum mlxsw_reg_pmaos_admin_status admin_status;
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
+ admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
+ mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
+ mlxsw_reg_pmaos_ase_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ bool low_power)
+{
+ u16 eeprom_override_mask, eeprom_override;
+ char pmmp_pl[MLXSW_REG_PMMP_LEN];
+
+ mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module);
+ mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
+ /* Mask all the bits except low power mode. */
+ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
+ mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask);
+ eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK :
+ 0;
+ mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl);
+}
+
+static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ bool low_power,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err;
+
+ /* Avoid accessing an inactive line card, as it will result in an error.
+ * Cached configuration will be applied by mlxsw_env_got_active() when
+ * line card becomes active.
+ */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
+ return err;
+ }
+
+ err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ low_power);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
+ goto err_module_low_power_set;
+ }
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
+ goto err_module_enable_set;
+ }
+
+ return 0;
+
+err_module_enable_set:
+ mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ !low_power);
+err_module_low_power_set:
+ mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
+ return err;
+}
+
+static int
+mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env_module_info *module_info;
+ bool low_power;
+ int err = 0;
+
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Power mode set is not supported on port module type");
+ goto out;
+ }
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy == policy)
+ goto out;
+
+ /* If any ports are up, we are already in high power mode. */
+ if (module_info->num_ports_up)
+ goto out_set_policy;
+
+ low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ low_power, extack);
+ if (err)
+ goto out;
+
+out_set_policy:
+ module_info->power_mode_policy = policy;
+out:
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index,
+ module, policy, extack);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
+
+static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ bool *p_has_temp_sensor)
+{
+ char mtbr_pl[MLXSW_REG_MTBR_LEN];
+ u16 temp;
+ int err;
+
+ mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+
+ switch (temp) {
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ case MLXSW_REG_MTBR_NO_CONN:
+ case MLXSW_REG_MTBR_NO_TEMP_SENS:
+ case MLXSW_REG_MTBR_INDEX_NA:
+ *p_has_temp_sensor = false;
+ break;
+ default:
+ *p_has_temp_sensor = temp ? true : false;
+ }
+ return 0;
+}
+
+static int
+mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u16 sensor_index, bool enable)
+{
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ enum mlxsw_reg_mtmp_tee tee;
+ int err, threshold_hi;
+
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index);
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+
+ if (enable) {
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ slot_index,
+ sensor_index -
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN,
+ SFP_TEMP_HIGH_WARN,
+ &threshold_hi);
+ /* In case it is not possible to query the module's threshold,
+ * use the default value.
+ */
+ if (err)
+ threshold_hi = MLXSW_REG_MTMP_THRESH_HI;
+ else
+ /* mlxsw_env_module_temp_thresholds_get() multiplies
+ * Celsius degrees by 1000 whereas MTMP expects
+ * temperature in 0.125 Celsius degrees units.
+ * Convert threshold_hi to correct units.
+ */
+ threshold_hi = threshold_hi / 1000 * 8;
+
+ mlxsw_reg_mtmp_temperature_threshold_hi_set(mtmp_pl, threshold_hi);
+ mlxsw_reg_mtmp_temperature_threshold_lo_set(mtmp_pl, threshold_hi -
+ MLXSW_REG_MTMP_HYSTERESIS_TEMP);
+ }
+ tee = enable ? MLXSW_REG_MTMP_TEE_GENERATE_EVENT : MLXSW_REG_MTMP_TEE_NO_EVENT;
+ mlxsw_reg_mtmp_tee_set(mtmp_pl, tee);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
+}
+
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int i, err, sensor_index;
+ bool has_temp_sensor;
+
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index,
+ i, &has_temp_sensor);
+ if (err)
+ return err;
+
+ if (!has_temp_sensor)
+ continue;
+
+ sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
+ err = mlxsw_env_temp_event_set(mlxsw_core, slot_index,
+ sensor_index, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct mlxsw_env_module_temp_warn_event {
+ struct mlxsw_env *mlxsw_env;
+ char mtwe_pl[MLXSW_REG_MTWE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_env_mtwe_event_work(struct work_struct *work)
+{
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env_module_info *module_info;
+ struct mlxsw_env *mlxsw_env;
+ int i, sensor_warning;
+ bool is_overheat;
+
+ event = container_of(work, struct mlxsw_env_module_temp_warn_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
+ for (i = 0; i < mlxsw_env->max_module_count; i++) {
+ /* 64-127 of sensor_index are mapped to the port modules
+ * sequentially (module 0 is mapped to sensor_index 64,
+ * module 1 to sensor_index 65 and so on)
+ */
+ sensor_warning =
+ mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
+ i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ /* MTWE only supports main board. */
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i);
+ is_overheat = module_info->is_overheat;
+
+ if ((is_overheat && sensor_warning) ||
+ (!is_overheat && !sensor_warning)) {
+ /* Current state is "warning" and MTWE still reports
+ * warning OR current state in "no warning" and MTWE
+ * does not report warning.
+ */
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+ continue;
+ } else if (is_overheat && !sensor_warning) {
+ /* MTWE reports "no warning", turn is_overheat off.
+ */
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+ } else {
+ /* Current state is "no warning" and MTWE reports
+ * "warning", increase the counter and turn is_overheat
+ * on.
+ */
+ module_info->is_overheat = true;
+ module_info->module_overheat_counter++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+ }
+ }
+
+ kfree(event);
+}
+
+static void
+mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
+ void *priv)
+{
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN);
+ INIT_WORK(&event->work, mlxsw_env_mtwe_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
+ MLXSW_CORE_EVENTL(mlxsw_env_mtwe_listener_func, MTWE);
+
+static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return mlxsw_core_trap_register(mlxsw_core,
+ &mlxsw_env_temp_warn_listener,
+ mlxsw_env);
+}
+
+static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
+{
+ mlxsw_core_trap_unregister(mlxsw_env->core,
+ &mlxsw_env_temp_warn_listener, mlxsw_env);
+}
+
+struct mlxsw_env_module_plug_unplug_event {
+ struct mlxsw_env *mlxsw_env;
+ u8 slot_index;
+ u8 module;
+ struct work_struct work;
+};
+
+static void mlxsw_env_pmpe_event_work(struct work_struct *work)
+{
+ struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env_module_info *module_info;
+ struct mlxsw_env *mlxsw_env;
+ bool has_temp_sensor;
+ u16 sensor_index;
+ int err;
+
+ event = container_of(work, struct mlxsw_env_module_plug_unplug_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core,
+ event->slot_index,
+ event->module);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core,
+ event->slot_index,
+ event->module,
+ &has_temp_sensor);
+ /* Do not disable events on modules without sensors or faulty sensors
+ * because FW returns errors.
+ */
+ if (err)
+ goto out;
+
+ if (!has_temp_sensor)
+ goto out;
+
+ sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
+ mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index,
+ sensor_index, true);
+
+out:
+ kfree(event);
+}
+
+static void
+mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
+ void *priv)
+{
+ u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl);
+ struct mlxsw_env_module_plug_unplug_event *event;
+ enum mlxsw_reg_pmpe_module_status module_status;
+ u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
+ struct mlxsw_env *mlxsw_env = priv;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count ||
+ slot_index >= mlxsw_env->num_of_slots))
+ return;
+
+ module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
+ if (module_status != MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ENABLED)
+ return;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ event->slot_index = slot_index;
+ event->module = module;
+ INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_env_module_plug_listener =
+ MLXSW_CORE_EVENTL(mlxsw_env_pmpe_listener_func, PMPE);
+
+static int
+mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return mlxsw_core_trap_register(mlxsw_core,
+ &mlxsw_env_module_plug_listener,
+ mlxsw_env);
+}
+
+static void
+mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
+{
+ mlxsw_core_trap_unregister(mlxsw_env->core,
+ &mlxsw_env_module_plug_listener,
+ mlxsw_env);
+}
+
+static int
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int i, err;
+
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i);
+ mlxsw_reg_pmaos_e_set(pmaos_pl,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ *p_counter = module_info->module_overheat_counter;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
+
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_map);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+ int err = 0;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_inc;
+
+ if (module_info->num_ports_up != 0)
+ goto out_inc;
+
+ /* Transition to high power mode following first port using the module
+ * being put administratively up.
+ */
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ false, NULL);
+ if (err)
+ goto out_unlock;
+
+out_inc:
+ module_info->num_ports_up++;
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_up);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_up--;
+
+ if (module_info->power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_unlock;
+
+ if (module_info->num_ports_up != 0)
+ goto out_unlock;
+
+ /* Transition to low power mode following last port using the module
+ * being put administratively down.
+ */
+ __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true,
+ NULL);
+
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_down);
+
+static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env)
+{
+ struct mlxsw_env_module_info *module_info;
+ int i, j;
+
+ for (i = 0; i < env->num_of_slots; i++) {
+ env->line_cards[i] = kzalloc(struct_size(env->line_cards[i],
+ module_info,
+ env->max_module_count),
+ GFP_KERNEL);
+ if (!env->line_cards[i])
+ goto kzalloc_err;
+
+ /* Firmware defaults to high power mode policy where modules
+ * are transitioned to high power mode following plug-in.
+ */
+ for (j = 0; j < env->max_module_count; j++) {
+ module_info = &env->line_cards[i]->module_info[j];
+ module_info->power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+ }
+ }
+
+ return 0;
+
+kzalloc_err:
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+ return -ENOMEM;
+}
+
+static void mlxsw_env_line_cards_free(struct mlxsw_env *env)
+{
+ int i = env->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+}
+
+static int
+mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+ int err;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core,
+ slot_index);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+}
+
+static int
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int i;
+
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ struct mlxsw_env_module_info *module_info;
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index,
+ i);
+ module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_env *env,
+ u8 slot_index)
+{
+ int i;
+
+ for (i = 0; i < env->line_cards[slot_index]->module_count; i++) {
+ enum ethtool_module_power_mode_policy policy;
+ struct mlxsw_env_module_info *module_info;
+ struct netlink_ext_ack extack;
+ int err;
+
+ module_info = &env->line_cards[slot_index]->module_info[i];
+ policy = module_info->power_mode_policy;
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core,
+ slot_index, i,
+ policy, &extack);
+ if (err)
+ dev_err(env->bus_info->dev, "%s\n", extack._msg);
+ }
+}
+
+static void
+mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, slot_index);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &mlxsw_env->line_cards[slot_index]->module_count,
+ NULL);
+
+ err = mlxsw_env_module_event_enable(mlxsw_env, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n",
+ slot_index);
+ goto err_mlxsw_env_module_event_enable;
+ }
+ err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n",
+ slot_index);
+ goto err_type_set;
+ }
+
+ mlxsw_env->line_cards[slot_index]->active = true;
+ /* Apply power mode policy. */
+ mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env,
+ slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return;
+
+err_type_set:
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+err_mlxsw_env_module_event_enable:
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static void
+mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+ mlxsw_env->line_cards[slot_index]->active = false;
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+ mlxsw_env->line_cards[slot_index]->module_count = 0;
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
+ .got_active = mlxsw_env_got_active,
+ .got_inactive = mlxsw_env_got_inactive,
+};
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env)
+{
+ u8 module_count, num_of_slots, max_module_count;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_env *env;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count,
+ &num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ max_module_count = num_of_slots ?
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) :
+ module_count;
+
+ env = kzalloc(struct_size(env, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ env->core = mlxsw_core;
+ env->bus_info = bus_info;
+ env->num_of_slots = num_of_slots + 1;
+ env->max_module_count = max_module_count;
+ err = mlxsw_env_line_cards_alloc(env);
+ if (err)
+ goto err_mlxsw_env_line_cards_alloc;
+
+ mutex_init(&env->line_cards_lock);
+ *p_env = env;
+
+ err = mlxsw_linecards_event_ops_register(env->core,
+ &mlxsw_env_event_ops, env);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ err = mlxsw_env_temp_warn_event_register(mlxsw_core);
+ if (err)
+ goto err_temp_warn_event_register;
+
+ err = mlxsw_env_module_plug_event_register(mlxsw_core);
+ if (err)
+ goto err_module_plug_event_register;
+
+ /* Set 'module_count' only for main board. Actual count for line card
+ * is to be set after line card is activated.
+ */
+ env->line_cards[0]->module_count = num_of_slots ? 0 : module_count;
+ /* Enable events only for main board. Line card events are to be
+ * configured only after line card is activated. Before that, access to
+ * modules on line cards is not allowed.
+ */
+ err = mlxsw_env_module_event_enable(env, 0);
+ if (err)
+ goto err_mlxsw_env_module_event_enable;
+
+ err = mlxsw_env_module_type_set(mlxsw_core, 0);
+ if (err)
+ goto err_type_set;
+
+ env->line_cards[0]->active = true;
+
+ return 0;
+
+err_type_set:
+ mlxsw_env_module_event_disable(env, 0);
+err_mlxsw_env_module_event_enable:
+ mlxsw_env_module_plug_event_unregister(env);
+err_module_plug_event_register:
+ mlxsw_env_temp_warn_event_unregister(env);
+err_temp_warn_event_register:
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+err_linecards_event_ops_register:
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
+err_mlxsw_env_line_cards_alloc:
+ kfree(env);
+ return err;
+}
+
+void mlxsw_env_fini(struct mlxsw_env *env)
+{
+ env->line_cards[0]->active = false;
+ mlxsw_env_module_event_disable(env, 0);
+ mlxsw_env_module_plug_event_unregister(env);
+ /* Make sure there is no more event work scheduled. */
+ mlxsw_core_flush_owq();
+ mlxsw_env_temp_warn_event_unregister(env);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
+ kfree(env);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
new file mode 100644
index 000000000..a197e3ae0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_CORE_ENV_H
+#define _MLXSW_CORE_ENV_H
+
+#include <linux/ethtool.h>
+
+struct ethtool_modinfo;
+struct ethtool_eeprom;
+
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core,
+ u8 slot_index, int module, int off,
+ int *temp);
+
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo);
+
+int mlxsw_env_get_module_eeprom(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data);
+
+int
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack);
+
+int
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter);
+
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
+
+int mlxsw_env_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env);
+void mlxsw_env_fini(struct mlxsw_env *env);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
new file mode 100644
index 000000000..0fd290d77
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -0,0 +1,941 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/err.h>
+#include <linux/sfp.h>
+
+#include "core.h"
+#include "core_env.h"
+
+#define MLXSW_HWMON_SENSORS_MAX_COUNT 64
+#define MLXSW_HWMON_MODULES_MAX_COUNT 64
+#define MLXSW_HWMON_GEARBOXES_MAX_COUNT 32
+
+#define MLXSW_HWMON_ATTR_PER_SENSOR 3
+#define MLXSW_HWMON_ATTR_PER_MODULE 7
+#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16
+
+#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
+ MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
+ MLXSW_HWMON_GEARBOXES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_GEARBOX + \
+ MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX)
+
+struct mlxsw_hwmon_attr {
+ struct device_attribute dev_attr;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev;
+ unsigned int type_index;
+ char name[32];
+};
+
+static int mlxsw_hwmon_get_attr_index(int index, int count)
+{
+ if (index >= count)
+ return index % count + MLXSW_REG_MTMP_GBOX_INDEX_MIN;
+
+ return index;
+}
+
+struct mlxsw_hwmon_dev {
+ char name[MLXSW_HWMON_DEV_NAME_LEN_MAX];
+ struct mlxsw_hwmon *hwmon;
+ struct device *hwmon_dev;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1];
+ struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
+ unsigned int attrs_count;
+ u8 sensor_count;
+ u8 module_sensor_max;
+ u8 slot_index;
+ bool active;
+};
+
+struct mlxsw_hwmon {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct mlxsw_hwmon_dev line_cards[];
+};
+
+static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ int temp, index;
+ int err;
+
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ int temp_max, index;
+ int err;
+
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL, NULL, NULL);
+ return sprintf(buf, "%d\n", temp_max);
+}
+
+static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned long val;
+ int index;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val != 1)
+ return -EINVAL;
+
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
+ mlxsw_hwmon_dev->module_sensor_max);
+
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index);
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_mte_set(mtmp_pl, true);
+ mlxsw_reg_mtmp_mtr_set(mtmp_pl, true);
+ err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n");
+ return err;
+ }
+ return len;
+}
+
+static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mfsm_pl[MLXSW_REG_MFSM_LEN];
+ int err;
+
+ mlxsw_reg_mfsm_pack(mfsm_pl, mlxsw_hwmon_attr->type_index);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
+ return err;
+ }
+ return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl));
+}
+
+static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char fore_pl[MLXSW_REG_FORE_LEN];
+ bool fault;
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(fore), fore_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
+ return err;
+ }
+ mlxsw_reg_fore_unpack(fore_pl, mlxsw_hwmon_attr->type_index, &fault);
+
+ return sprintf(buf, "%u\n", fault);
+}
+
+static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mfsc_pl[MLXSW_REG_MFSC_LEN];
+ int err;
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, 0);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n");
+ return err;
+ }
+ return sprintf(buf, "%u\n",
+ mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl));
+}
+
+static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mfsc_pl[MLXSW_REG_MFSC_LEN];
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val > 255)
+ return -EINVAL;
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, val);
+ err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n");
+ return err;
+ }
+ return len;
+}
+
+static int mlxsw_hwmon_module_temp_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ u8 module;
+ int err;
+
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
+ u8 module, fault;
+ u16 temp;
+ int err;
+
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature sensor\n");
+ return err;
+ }
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+
+ /* Update status and temperature cache. */
+ switch (temp) {
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ /* Untrusted cable is connected. Reading temperature from its
+ * sensor is faulty.
+ */
+ fault = 1;
+ break;
+ case MLXSW_REG_MTBR_NO_CONN:
+ case MLXSW_REG_MTBR_NO_TEMP_SENS:
+ case MLXSW_REG_MTBR_INDEX_NA:
+ default:
+ fault = 0;
+ break;
+ }
+
+ return sprintf(buf, "%u\n", fault);
+}
+
+static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ u8 module;
+ int err;
+
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_WARN,
+ p_temp);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature thresholds\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ u8 module;
+ int err;
+
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_ALARM,
+ p_temp);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature thresholds\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+
+ return sprintf(buf, "front panel %03u\n",
+ mlxsw_hwmon_attr->type_index + 1 -
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev->sensor_count);
+}
+
+static ssize_t
+mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ int index = mlxsw_hwmon_attr->type_index -
+ mlxsw_hwmon_dev->module_sensor_max + 1;
+
+ return sprintf(buf, "gearbox %03u\n", index);
+}
+
+static ssize_t mlxsw_hwmon_temp_critical_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp, critic_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ if (temp >= emergency_temp)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &critic_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= critic_temp);
+}
+
+static ssize_t mlxsw_hwmon_temp_emergency_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= emergency_temp);
+}
+
+enum mlxsw_hwmon_attr_type {
+ MLXSW_HWMON_ATTR_TYPE_TEMP,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST,
+ MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
+ MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
+ MLXSW_HWMON_ATTR_TYPE_PWM,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
+};
+
+static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev,
+ enum mlxsw_hwmon_attr_type attr_type,
+ unsigned int type_index, unsigned int num)
+{
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr;
+ unsigned int attr_index;
+
+ attr_index = mlxsw_hwmon_dev->attrs_count;
+ mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index];
+
+ switch (attr_type) {
+ case MLXSW_HWMON_ATTR_TYPE_TEMP:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_input", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_highest", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_RST:
+ mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0200;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_reset_history", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_FAN_RPM:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "fan%u_input", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_FAN_FAULT:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_fault_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "fan%u_fault", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_PWM:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show;
+ mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0644;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "pwm%u", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_module_temp_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_input", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_fault_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_fault", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_critical_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_crit", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_emergency_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_emergency", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_label_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_label", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_gbox_temp_label_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_label", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_critical_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_crit_alarm", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_emergency_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_emergency_alarm", num + 1);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ mlxsw_hwmon_attr->type_index = type_index;
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev;
+ mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name;
+ sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr);
+
+ mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
+ mlxsw_hwmon_dev->attrs_count++;
+}
+
+static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
+{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
+ int i;
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtcap), mtcap_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n");
+ return err;
+ }
+ mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
+ for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) {
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl,
+ mlxsw_hwmon_dev->slot_index);
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
+ mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_mte_set(mtmp_pl, true);
+ mlxsw_reg_mtmp_mtr_set(mtmp_pl, true);
+ err = mlxsw_reg_write(mlxsw_hwmon->core,
+ MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n",
+ i);
+ return err;
+ }
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP, i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i);
+ }
+ return 0;
+}
+
+static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
+{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0};
+ enum mlxsw_reg_mfcr_pwm_frequency freq;
+ unsigned int type_index;
+ unsigned int num;
+ u16 tacho_active;
+ u8 pwm_active;
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfcr), mfcr_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get to probe PWMs and Tachometers\n");
+ return err;
+ }
+ mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
+ num = 0;
+ for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) {
+ if (tacho_active & BIT(type_index)) {
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
+ type_index, num);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
+ type_index, num++);
+ }
+ }
+ num = 0;
+ for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) {
+ if (pwm_active & BIT(type_index))
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_PWM,
+ type_index, num++);
+ }
+ return 0;
+}
+
+static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
+{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 module_sensor_max;
+ int i, err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &module_sensor_max, NULL);
+
+ /* Add extra attributes for module temperature. Sensor index is
+ * assigned to sensor_count value, while all indexed before
+ * sensor_count are already utilized by the sensors connected through
+ * mtmp register by mlxsw_hwmon_temp_init().
+ */
+ mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon_dev->sensor_count;
+ i < mlxsw_hwmon_dev->module_sensor_max; i++) {
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
+ i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
+ i, i);
+ }
+
+ return 0;
+}
+
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
+{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ enum mlxsw_reg_mgpir_device_type device_type;
+ int index, max_index, sensor_index;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ u8 gbox_num;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL,
+ NULL);
+ if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+ !gbox_num)
+ return 0;
+
+ index = mlxsw_hwmon_dev->module_sensor_max;
+ max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num;
+ while (index < max_index) {
+ sensor_index = index % mlxsw_hwmon_dev->module_sensor_max +
+ MLXSW_REG_MTMP_GBOX_INDEX_MIN;
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ sensor_index, true, true);
+ err = mlxsw_reg_write(mlxsw_hwmon->core,
+ MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n",
+ sensor_index);
+ return err;
+ }
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP, index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ index, index);
+ index++;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+ struct device *dev;
+ int err;
+
+ dev = hwmon->bus_info->dev;
+ linecard = &hwmon->line_cards[slot_index];
+ if (linecard->active)
+ return;
+ /* For the main board, module sensor indexes start from 1, sensor index
+ * 0 is used for the ASIC. Use the same numbering for line cards.
+ */
+ linecard->sensor_count = 1;
+ linecard->slot_index = slot_index;
+ linecard->hwmon = hwmon;
+ err = mlxsw_hwmon_module_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_hwmon_gearbox_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->groups[0] = &linecard->group;
+ linecard->group.attrs = linecard->attrs;
+ sprintf(linecard->name, "%s#%02u", "linecard", slot_index);
+ linecard->hwmon_dev =
+ hwmon_device_register_with_groups(dev, linecard->name,
+ linecard, linecard->groups);
+ if (IS_ERR(linecard->hwmon_dev)) {
+ dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->active = true;
+}
+
+static void
+mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+
+ linecard = &hwmon->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ hwmon_device_unregister(linecard->hwmon_dev);
+ /* Reset attributes counter */
+ linecard->attrs_count = 0;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = {
+ .got_active = mlxsw_hwmon_got_active,
+ .got_inactive = mlxsw_hwmon_got_inactive,
+};
+
+int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct mlxsw_hwmon **p_hwmon)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_hwmon *mlxsw_hwmon;
+ struct device *hwmon_dev;
+ u8 num_of_slots;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards,
+ num_of_slots + 1), GFP_KERNEL);
+ if (!mlxsw_hwmon)
+ return -ENOMEM;
+
+ mlxsw_hwmon->core = mlxsw_core;
+ mlxsw_hwmon->bus_info = mlxsw_bus_info;
+ mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon;
+ mlxsw_hwmon->line_cards[0].slot_index = 0;
+
+ err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]);
+ if (err)
+ goto err_temp_init;
+
+ err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]);
+ if (err)
+ goto err_fans_init;
+
+ err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]);
+ if (err)
+ goto err_temp_module_init;
+
+ err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]);
+ if (err)
+ goto err_temp_gearbox_init;
+
+ mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group;
+ mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs;
+
+ hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
+ "mlxsw",
+ &mlxsw_hwmon->line_cards[0],
+ mlxsw_hwmon->line_cards[0].groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto err_hwmon_register;
+ }
+
+ err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops,
+ mlxsw_hwmon);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev;
+ mlxsw_hwmon->line_cards[0].active = true;
+ *p_hwmon = mlxsw_hwmon;
+ return 0;
+
+err_linecards_event_ops_register:
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
+err_hwmon_register:
+err_temp_gearbox_init:
+err_temp_module_init:
+err_fans_init:
+err_temp_init:
+ kfree(mlxsw_hwmon);
+ return err;
+}
+
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ mlxsw_hwmon->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops, mlxsw_hwmon);
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
+ kfree(mlxsw_hwmon);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
new file mode 100644
index 000000000..af37e650a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/idr.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include "core.h"
+
+#define MLXSW_LINECARD_DEV_ID_NAME "lc"
+
+struct mlxsw_linecard_dev {
+ struct mlxsw_linecard *linecard;
+};
+
+struct mlxsw_linecard_bdev {
+ struct auxiliary_device adev;
+ struct mlxsw_linecard *linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+};
+
+static DEFINE_IDA(mlxsw_linecard_bdev_ida);
+
+static int mlxsw_linecard_bdev_id_alloc(void)
+{
+ return ida_alloc(&mlxsw_linecard_bdev_ida, GFP_KERNEL);
+}
+
+static void mlxsw_linecard_bdev_id_free(int id)
+{
+ ida_free(&mlxsw_linecard_bdev_ida, id);
+}
+
+static void mlxsw_linecard_bdev_release(struct device *device)
+{
+ struct auxiliary_device *adev =
+ container_of(device, struct auxiliary_device, dev);
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+
+ mlxsw_linecard_bdev_id_free(adev->id);
+ kfree(linecard_bdev);
+}
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev;
+ int err;
+ int id;
+
+ id = mlxsw_linecard_bdev_id_alloc();
+ if (id < 0)
+ return id;
+
+ linecard_bdev = kzalloc(sizeof(*linecard_bdev), GFP_KERNEL);
+ if (!linecard_bdev) {
+ mlxsw_linecard_bdev_id_free(id);
+ return -ENOMEM;
+ }
+ linecard_bdev->adev.id = id;
+ linecard_bdev->adev.name = MLXSW_LINECARD_DEV_ID_NAME;
+ linecard_bdev->adev.dev.release = mlxsw_linecard_bdev_release;
+ linecard_bdev->adev.dev.parent = linecard->linecards->bus_info->dev;
+ linecard_bdev->linecard = linecard;
+
+ err = auxiliary_device_init(&linecard_bdev->adev);
+ if (err) {
+ mlxsw_linecard_bdev_id_free(id);
+ kfree(linecard_bdev);
+ return err;
+ }
+
+ err = auxiliary_device_add(&linecard_bdev->adev);
+ if (err) {
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ return err;
+ }
+
+ linecard->bdev = linecard_bdev;
+ return 0;
+}
+
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev = linecard->bdev;
+
+ if (!linecard_bdev)
+ /* Unprovisioned line cards do not have an auxiliary device. */
+ return;
+ auxiliary_device_delete(&linecard_bdev->adev);
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ linecard->bdev = NULL;
+}
+
+static int mlxsw_linecard_dev_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_devlink_info_get(linecard, req, extack);
+}
+
+static int
+mlxsw_linecard_dev_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_flash_update(devlink, linecard,
+ params->fw, extack);
+}
+
+static const struct devlink_ops mlxsw_linecard_dev_devlink_ops = {
+ .info_get = mlxsw_linecard_dev_devlink_info_get,
+ .flash_update = mlxsw_linecard_dev_devlink_flash_update,
+};
+
+static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
+ sizeof(*linecard_dev), &adev->dev);
+ if (!devlink)
+ return -ENOMEM;
+ linecard_dev = devlink_priv(devlink);
+ linecard_dev->linecard = linecard_bdev->linecard;
+ linecard_bdev->linecard_dev = linecard_dev;
+
+ devlink_register(devlink);
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ return 0;
+}
+
+static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
+static const struct auxiliary_device_id mlxsw_linecard_bdev_id_table[] = {
+ { .name = KBUILD_MODNAME "." MLXSW_LINECARD_DEV_ID_NAME },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlxsw_linecard_bdev_id_table);
+
+static struct auxiliary_driver mlxsw_linecard_driver = {
+ .name = MLXSW_LINECARD_DEV_ID_NAME,
+ .probe = mlxsw_linecard_bdev_probe,
+ .remove = mlxsw_linecard_bdev_remove,
+ .id_table = mlxsw_linecard_bdev_id_table,
+};
+
+int mlxsw_linecard_driver_register(void)
+{
+ return auxiliary_driver_register(&mlxsw_linecard_driver);
+}
+
+void mlxsw_linecard_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlxsw_linecard_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
new file mode 100644
index 000000000..83d2dc91b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+#include "../mlxfw/mlxfw.h"
+
+struct mlxsw_linecard_ini_file {
+ __le16 size;
+ union {
+ u8 data[0];
+ struct {
+ __be16 hw_revision;
+ __be16 ini_version;
+ u8 __dontcare[3];
+ u8 type;
+ u8 name[20];
+ } format;
+ };
+};
+
+struct mlxsw_linecard_types_info {
+ struct mlxsw_linecard_ini_file **ini_files;
+ unsigned int count;
+ size_t data_size;
+ char *data;
+};
+
+#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC)
+
+static void
+mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ linecard->status_event_type_to = status_event_type;
+ mlxsw_core_schedule_dw(&linecard->status_event_to_dw,
+ msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO));
+}
+
+static void
+mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ if (linecard->status_event_type_to == status_event_type)
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+}
+
+static const char *
+mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ int i;
+
+ types_info = linecards->types_info;
+ if (!types_info)
+ return NULL;
+ for (i = 0; i < types_info->count; i++) {
+ ini_file = linecards->types_info->ini_files[i];
+ if (ini_file->format.type == card_type)
+ return ini_file->format.name;
+ }
+ return NULL;
+}
+
+static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return ERR_PTR(err);
+ mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name);
+ return linecard->name;
+}
+
+struct mlxsw_linecard_device_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_linecard *linecard;
+};
+
+static int mlxsw_linecard_device_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index,
+ u32 *p_max_size,
+ u8 *p_align_bits,
+ u16 *p_max_write_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcqi_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcqi), &mcqi_pl);
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_linecard_device_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev,
+ u32 *fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data,
+ u16 size, u32 offset)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcda_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcda), &mcda_pl);
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int mlxsw_linecard_device_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE,
+ 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ u8 error_code;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_linecard_device_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static void mlxsw_linecard_device_fw_fsm_release(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_linecard_device_dev_ops = {
+ .component_query = mlxsw_linecard_device_fw_component_query,
+ .fsm_lock = mlxsw_linecard_device_fw_fsm_lock,
+ .fsm_component_update = mlxsw_linecard_device_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_linecard_device_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_linecard_device_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_linecard_device_fw_fsm_activate,
+ .fsm_query_state = mlxsw_linecard_device_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_linecard_device_fw_fsm_cancel,
+ .fsm_release = mlxsw_linecard_device_fw_fsm_release,
+};
+
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device_fw_info info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_linecard_device_dev_ops,
+ .psid = linecard->device.info.psid,
+ .psid_size = strlen(linecard->device.info.psid),
+ .devlink = linecard_devlink,
+ },
+ .mlxsw_core = mlxsw_core,
+ .linecard = linecard,
+ };
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ NL_SET_ERR_MSG_MOD(extack, "Only active line cards can be flashed");
+ err = -EINVAL;
+ goto unlock;
+ }
+ err = mlxsw_core_fw_flash(mlxsw_core, &info.mlxfw_dev,
+ firmware, extack);
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_device_psid_get(struct mlxsw_linecard *linecard,
+ u8 device_index, char *psid)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mgir_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index, device_index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mgir), &mgir_pl);
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(mgir_pl, psid);
+ return 0;
+}
+
+static int mlxsw_linecard_device_info_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ bool flashable_found = false;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ if (!flash_owner) /* We care only about flashable ones. */
+ continue;
+ if (flashable_found) {
+ dev_warn_once(linecard->linecards->bus_info->dev, "linecard %u: More flashable devices present, exposing only the first one\n",
+ linecard->slot_index);
+ return 0;
+ }
+
+ err = mlxsw_linecard_device_psid_get(linecard, device_index,
+ info.psid);
+ if (err)
+ return err;
+
+ linecard->device.info = info;
+ linecard->device.index = device_index;
+ flashable_found = true;
+ } while (msg_seq);
+
+ return 0;
+}
+
+static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
+{
+ linecard->provisioned = false;
+ linecard->ready = false;
+ linecard->active = false;
+ devlink_linecard_provision_fail(linecard->devlink_linecard);
+}
+
+struct mlxsw_linecards_event_ops_item {
+ struct list_head list;
+ const struct mlxsw_linecards_event_ops *event_ops;
+ void *priv;
+};
+
+static void
+mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard,
+ mlxsw_linecards_event_op_t *op, void *priv)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+
+ if (!op)
+ return;
+ op(mlxsw_core, linecard->slot_index, priv);
+}
+
+static void
+mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+static void
+mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item;
+
+ if (!linecards)
+ return 0;
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->event_ops = ops;
+ item->priv = priv;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_add_tail(&item->list, &linecards->event_ops_list);
+ mutex_unlock(&linecards->event_ops_list_lock);
+ mlxsw_linecards_event_ops_register_call(linecards, item);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_register);
+
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item, *tmp;
+ bool found = false;
+
+ if (!linecards)
+ return;
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) {
+ if (item->event_ops == ops && item->priv == priv) {
+ list_del(&item->list);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&linecards->event_ops_list_lock);
+
+ if (!found)
+ return;
+ mlxsw_linecards_event_ops_unregister_call(linecards, item);
+ kfree(item);
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (WARN_ON(!linecard->provisioned)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+ if (linecard->active) {
+ struct mlxsw_linecard_device_info *info = &linecard->device.info;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ info->psid);
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int
+mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
+ u16 hw_revision, u16 ini_version)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ const char *type;
+ int err;
+
+ type = mlxsw_linecard_types_lookup(linecards, card_type);
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ if (!type) {
+ /* It is possible for a line card to be provisioned before
+ * driver initialization. Due to a missing INI bundle file
+ * or an outdated one, the queried card's type might not
+ * be recognized by the driver. In this case, try to query
+ * the card's name from the device.
+ */
+ type = mlxsw_linecard_type_name(linecard);
+ if (IS_ERR(type)) {
+ mlxsw_linecard_provision_fail(linecard);
+ return PTR_ERR(type);
+ }
+ }
+ linecard->provisioned = true;
+ linecard->hw_revision = hw_revision;
+ linecard->ini_version = ini_version;
+
+ err = mlxsw_linecard_bdev_add(linecard);
+ if (err) {
+ linecard->provisioned = false;
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+
+ devlink_linecard_provision_set(linecard->devlink_linecard, type);
+ return 0;
+}
+
+static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ mlxsw_linecard_bdev_del(linecard);
+ linecard->provisioned = false;
+ devlink_linecard_provision_clear(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ err = mlxsw_linecard_device_info_update(linecard);
+ if (err)
+ return err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = true;
+ return 0;
+}
+
+static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = false;
+ return 0;
+}
+
+static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_active_ops_call(linecard);
+ linecard->active = true;
+ devlink_linecard_activate(linecard->devlink_linecard);
+}
+
+static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_inactive_ops_call(linecard);
+ linecard->active = false;
+ devlink_linecard_deactivate(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard,
+ const char *mddq_pl)
+{
+ enum mlxsw_reg_mddq_slot_info_ready ready;
+ bool provisioned, sr_valid, active;
+ u16 ini_version, hw_revision;
+ u8 slot_index, card_type;
+ int err = 0;
+
+ mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned,
+ &sr_valid, &ready, &active,
+ &hw_revision, &ini_version,
+ &card_type);
+
+ if (linecard) {
+ if (WARN_ON(slot_index != linecard->slot_index))
+ return -EINVAL;
+ } else {
+ if (WARN_ON(slot_index > linecards->count))
+ return -EINVAL;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ }
+
+ mutex_lock(&linecard->lock);
+
+ if (provisioned && linecard->provisioned != provisioned) {
+ err = mlxsw_linecard_provision_set(linecard, card_type,
+ hw_revision, ini_version);
+ if (err)
+ goto out;
+ }
+
+ if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) {
+ err = mlxsw_linecard_ready_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (active && linecard->active != active)
+ mlxsw_linecard_active_set(linecard);
+
+ if (!active && linecard->active != active)
+ mlxsw_linecard_active_clear(linecard);
+
+ if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY &&
+ linecard->ready) {
+ err = mlxsw_linecard_ready_clear(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!provisioned && linecard->provisioned != provisioned)
+ mlxsw_linecard_provision_clear(linecard);
+
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+
+ return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
+}
+
+static void mlxsw_linecards_irq_event_handler(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ /* Handle change of line card active state. */
+ for (i = 0; i < linecards->count; i++) {
+ struct mlxsw_linecard *linecard = mlxsw_linecard_get(linecards,
+ i + 1);
+
+ mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ }
+}
+
+static const char * const mlxsw_linecard_status_event_type_name[] = {
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
+};
+
+static void mlxsw_linecard_status_event_to_work(struct work_struct *work)
+{
+ struct mlxsw_linecard *linecard =
+ container_of(work, struct mlxsw_linecard,
+ status_event_to_dw.work);
+
+ mutex_lock(&linecard->lock);
+ dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event",
+ linecard->slot_index,
+ mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard)
+{
+ dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error",
+ linecard->slot_index);
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false);
+ return mlxsw_reg_write(linecard->linecards->mlxsw_core,
+ MLXSW_REG(mbct), linecard->mbct_pl);
+}
+
+static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_fsm_state fsm_state)
+{
+ if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR)
+ return 0;
+ return __mlxsw_linecard_fix_fsm_state(linecard);
+}
+
+static int
+mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_status *status,
+ enum mlxsw_reg_mbct_fsm_state *fsm_state,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS, false);
+ err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state);
+ return err;
+}
+
+static int
+mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ const struct mlxsw_linecard_ini_file *ini_file,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ size_t size_left;
+ const u8 *data;
+ int err;
+
+ size_left = le16_to_cpu(ini_file->size);
+ data = ini_file->data;
+ while (size_left) {
+ size_t data_size = MLXSW_REG_MBCT_DATA_LEN;
+ bool is_last = false;
+
+ if (size_left <= MLXSW_REG_MBCT_DATA_LEN) {
+ data_size = size_left;
+ is_last = true;
+ }
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, false);
+ mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size,
+ is_last, data);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL,
+ &status, &fsm_state);
+ if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) ||
+ (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data");
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+ }
+ size_left -= data_size;
+ data += data_size;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ switch (status) {
+ case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE:
+ break;
+ default:
+ /* Should not happen */
+ fallthrough;
+ case MLXSW_REG_MBCT_STATUS_ERASE_FAILED:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI");
+ goto fix_fsm_err_out;
+ case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used");
+ goto fix_fsm_err_out;
+ }
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core,
+ const char *mbct_pl)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ struct mlxsw_linecard *linecard;
+ u8 slot_index;
+
+ mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state);
+ if (WARN_ON(slot_index > linecards->count))
+ return;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mutex_lock(&linecard->lock);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI",
+ linecard->slot_index);
+ goto fix_fsm_out;
+ }
+ mutex_unlock(&linecard->lock);
+ return;
+
+fix_fsm_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ACTIVATE, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI");
+ goto fix_fsm_err_out;
+ }
+
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+#define MLXSW_LINECARD_INI_WAIT_RETRIES 10
+#define MLXSW_LINECARD_INI_WAIT_MS 500
+
+static int
+mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ unsigned int ini_wait_retries = 0;
+ int err;
+
+query_ini_status:
+ err = mlxsw_linecard_query_ini_status(linecard, &status,
+ &fsm_state, extack);
+ if (err)
+ return err;
+
+ switch (fsm_state) {
+ case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE:
+ if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused");
+ return -EINVAL;
+ }
+ mdelay(MLXSW_LINECARD_INI_WAIT_MS);
+ goto query_ini_status;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool mlxsw_linecard_port_selector(void *priv, u16 local_port)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+ return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port);
+}
+
+static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard,
+ ini_file, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard,
+ void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ mlxsw_core_ports_remove_selected(mlxsw_core,
+ mlxsw_linecard_port_selector,
+ linecard);
+
+ err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ bool ret;
+
+ mutex_lock(&linecard->lock);
+ ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) &&
+ linecard->ini_version == be16_to_cpu(ini_file->format.ini_version);
+ mutex_unlock(&linecard->lock);
+ return ret;
+}
+
+static unsigned int
+mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard,
+ void *priv)
+{
+ struct mlxsw_linecard *linecard = priv;
+
+ return linecard->linecards->types_info ?
+ linecard->linecards->types_info->count : 0;
+}
+
+static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard,
+ void *priv, unsigned int index,
+ const char **type, const void **type_priv)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ struct mlxsw_linecard *linecard = priv;
+
+ types_info = linecard->linecards->types_info;
+ if (WARN_ON_ONCE(!types_info))
+ return;
+ ini_file = types_info->ini_files[index];
+ *type = ini_file->format.name;
+ *type_priv = ini_file;
+}
+
+static const struct devlink_linecard_ops mlxsw_linecard_ops = {
+ .provision = mlxsw_linecard_provision,
+ .unprovision = mlxsw_linecard_unprovision,
+ .same_provision = mlxsw_linecard_same_provision,
+ .types_count = mlxsw_linecard_types_count,
+ .types_get = mlxsw_linecard_types_get,
+};
+
+struct mlxsw_linecard_status_event {
+ struct mlxsw_core *mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_status_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_linecards *linecards;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_status_event, work);
+ mlxsw_core = event->mlxsw_core;
+ linecards = mlxsw_core_linecards(mlxsw_core);
+ mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg,
+ char *mddq_pl, void *priv)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_status_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+struct mlxsw_linecard_bct_event {
+ struct mlxsw_core *mlxsw_core;
+ char mbct_pl[MLXSW_REG_MBCT_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_bct_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_bct_event, work);
+ mlxsw_core = event->mlxsw_core;
+ mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg,
+ char *mbct_pl, void *priv)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_bct_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_linecard_listener[] = {
+ MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC),
+ MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE),
+};
+
+static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ bool enable)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+}
+
+static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct devlink_linecard *devlink_linecard;
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ linecard->slot_index = slot_index;
+ linecard->linecards = linecards;
+ mutex_init(&linecard->lock);
+
+ devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
+ if (IS_ERR(devlink_linecard))
+ return PTR_ERR(devlink_linecard);
+
+ linecard->devlink_linecard = devlink_linecard;
+ INIT_DELAYED_WORK(&linecard->status_event_to_dw,
+ &mlxsw_linecard_status_event_to_work);
+
+ return 0;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_event_delivery_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
+ if (err)
+ return err;
+
+ err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ if (err)
+ goto err_status_get_and_process;
+
+ return 0;
+
+err_status_get_and_process:
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+ return err;
+}
+
+static void
+mlxsw_linecard_event_delivery_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+}
+
+/* LINECARDS INI BUNDLE FILE
+ * +----------------------------------+
+ * | MAGIC ("NVLCINI+") |
+ * +----------------------------------+ +--------------------+
+ * | INI 0 +---> | __le16 size |
+ * +----------------------------------+ | __be16 hw_revision |
+ * | INI 1 | | __be16 ini_version |
+ * +----------------------------------+ | u8 __dontcare[3] |
+ * | ... | | u8 type |
+ * +----------------------------------+ | u8 name[20] |
+ * | INI N | | ... |
+ * +----------------------------------+ +--------------------+
+ */
+
+#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+"
+
+static int
+mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ struct mlxsw_linecard_ini_file *ini_file;
+ size_t size = types_info->data_size;
+ const u8 *data = types_info->data;
+ unsigned int count = 0;
+ u16 ini_file_size;
+
+ if (size < magic_size) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n");
+ return -EINVAL;
+ }
+ if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n");
+ return -EINVAL;
+ }
+
+ data += magic_size;
+ size -= magic_size;
+
+ while (size > 0) {
+ if (size < sizeof(*ini_file)) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n");
+ return -EINVAL;
+ }
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ if (ini_file_size + sizeof(__le16) > size) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n");
+ return -EINVAL;
+ }
+ if (ini_file_size % 4) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n");
+ return -EINVAL;
+ }
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+ if (!count) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n");
+ return -EINVAL;
+ }
+ types_info->count = count;
+ return 0;
+}
+
+static void
+mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ size_t size = types_info->data_size - magic_size;
+ const u8 *data = types_info->data + magic_size;
+ struct mlxsw_linecard_ini_file *ini_file;
+ unsigned int count = 0;
+ u16 ini_file_size;
+ int i;
+
+ while (size) {
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ for (i = 0; i < ini_file_size / 4; i++) {
+ u32 *val = &((u32 *) ini_file->data)[i];
+
+ *val = swab32(*val);
+ }
+ types_info->ini_files[count] = ini_file;
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+}
+
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \
+ "mellanox/lc_ini_bundle_%u_%u.bin"
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \
+ (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4)
+
+static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev;
+ char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN];
+ struct mlxsw_linecard_types_info *types_info;
+ const struct firmware *firmware;
+ int err;
+
+ err = snprintf(filename, sizeof(filename),
+ MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT,
+ rev->minor, rev->subminor);
+ WARN_ON(err >= sizeof(filename));
+
+ err = request_firmware_direct(&firmware, filename,
+ linecards->bus_info->dev);
+ if (err) {
+ dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n",
+ filename);
+ return 0;
+ }
+
+ types_info = kzalloc(sizeof(*types_info), GFP_KERNEL);
+ if (!types_info) {
+ release_firmware(firmware);
+ return -ENOMEM;
+ }
+ linecards->types_info = types_info;
+
+ types_info->data_size = firmware->size;
+ types_info->data = vmalloc(types_info->data_size);
+ if (!types_info->data) {
+ err = -ENOMEM;
+ release_firmware(firmware);
+ goto err_data_alloc;
+ }
+ memcpy(types_info->data, firmware->data, types_info->data_size);
+ release_firmware(firmware);
+
+ err = mlxsw_linecard_types_file_validate(linecards, types_info);
+ if (err) {
+ err = 0;
+ goto err_type_file_file_validate;
+ }
+
+ types_info->ini_files = kmalloc_array(types_info->count,
+ sizeof(struct mlxsw_linecard_ini_file *),
+ GFP_KERNEL);
+ if (!types_info->ini_files) {
+ err = -ENOMEM;
+ goto err_ini_files_alloc;
+ }
+
+ mlxsw_linecard_types_file_parse(types_info);
+
+ return 0;
+
+err_ini_files_alloc:
+err_type_file_file_validate:
+ vfree(types_info->data);
+err_data_alloc:
+ kfree(types_info);
+ return err;
+}
+
+static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards)
+{
+ struct mlxsw_linecard_types_info *types_info = linecards->types_info;
+
+ if (!types_info)
+ return;
+ kfree(types_info->ini_files);
+ vfree(types_info->data);
+ kfree(types_info);
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_linecards *linecards;
+ u8 slot_count;
+ int err;
+ int i;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ NULL, &slot_count);
+ if (!slot_count)
+ return 0;
+
+ linecards = vzalloc(struct_size(linecards, linecards, slot_count));
+ if (!linecards)
+ return -ENOMEM;
+ linecards->count = slot_count;
+ linecards->mlxsw_core = mlxsw_core;
+ linecards->bus_info = bus_info;
+ INIT_LIST_HEAD(&linecards->event_ops_list);
+ mutex_init(&linecards->event_ops_list_lock);
+
+ err = mlxsw_linecard_types_init(mlxsw_core, linecards);
+ if (err)
+ goto err_types_init;
+
+ err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ if (err)
+ goto err_traps_register;
+
+ err = mlxsw_core_irq_event_handler_register(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+ if (err)
+ goto err_irq_event_handler_register;
+
+ mlxsw_core_linecards_set(mlxsw_core, linecards);
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1);
+ if (err)
+ goto err_linecard_init;
+ }
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_event_delivery_init(mlxsw_core, linecards,
+ i + 1);
+ if (err)
+ goto err_linecard_event_delivery_init;
+ }
+
+ return 0;
+
+err_linecard_event_delivery_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ i = linecards->count;
+err_linecard_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+err_irq_event_handler_register:
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+err_traps_register:
+ mlxsw_linecard_types_fini(linecards);
+err_types_init:
+ vfree(linecards);
+ return err;
+}
+
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ if (!linecards)
+ return;
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ mlxsw_linecard_types_fini(linecards);
+ mutex_destroy(&linecards->event_ops_list_lock);
+ WARN_ON(!list_empty(&linecards->event_ops_list));
+ vfree(linecards);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
new file mode 100644
index 000000000..ef5e61708
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved
+ * Copyright (c) 2016 Ivan Vecera <cera@cera.cz>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/sfp.h>
+
+#include "core.h"
+#include "core_env.h"
+
+#define MLXSW_THERMAL_POLL_INT 1000 /* ms */
+#define MLXSW_THERMAL_SLOW_POLL_INT 20000 /* ms */
+#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
+#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
+#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
+#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
+#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
+#define MLXSW_THERMAL_MAX_STATE 10
+#define MLXSW_THERMAL_MIN_STATE 2
+#define MLXSW_THERMAL_MAX_DUTY 255
+
+/* External cooling devices, allowed for binding to mlxsw thermal zones. */
+static char * const mlxsw_thermal_external_allowed_cdev[] = {
+ "mlxreg_fan",
+};
+
+enum mlxsw_thermal_trips {
+ MLXSW_THERMAL_TEMP_TRIP_NORM,
+ MLXSW_THERMAL_TEMP_TRIP_HIGH,
+ MLXSW_THERMAL_TEMP_TRIP_HOT,
+};
+
+struct mlxsw_thermal_trip {
+ int type;
+ int temp;
+ int hyst;
+ int min_state;
+ int max_state;
+};
+
+static const struct mlxsw_thermal_trip default_thermal_trips[] = {
+ { /* In range - 0-40% PWM */
+ .type = THERMAL_TRIP_ACTIVE,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_NORM,
+ .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .min_state = 0,
+ .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ },
+ {
+ /* In range - 40-100% PWM */
+ .type = THERMAL_TRIP_ACTIVE,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_HIGH,
+ .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ },
+ { /* Warning */
+ .type = THERMAL_TRIP_HOT,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_HOT,
+ .min_state = MLXSW_THERMAL_MAX_STATE,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ },
+};
+
+#define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips)
+
+/* Make sure all trips are writable */
+#define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1)
+
+struct mlxsw_thermal;
+
+struct mlxsw_thermal_module {
+ struct mlxsw_thermal *parent;
+ struct thermal_zone_device *tzdev;
+ struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ int module; /* Module or gearbox number */
+ u8 slot_index;
+};
+
+struct mlxsw_thermal_area {
+ struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
+ struct mlxsw_thermal_module *tz_gearbox_arr;
+ u8 tz_gearbox_num;
+ u8 slot_index;
+ bool active;
+};
+
+struct mlxsw_thermal {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct thermal_zone_device *tzdev;
+ int polling_delay;
+ struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ struct mlxsw_thermal_area line_cards[];
+};
+
+static inline u8 mlxsw_state_to_duty(int state)
+{
+ return DIV_ROUND_CLOSEST(state * MLXSW_THERMAL_MAX_DUTY,
+ MLXSW_THERMAL_MAX_STATE);
+}
+
+static inline int mlxsw_duty_to_state(u8 duty)
+{
+ return DIV_ROUND_CLOSEST(duty * MLXSW_THERMAL_MAX_STATE,
+ MLXSW_THERMAL_MAX_DUTY);
+}
+
+static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ if (thermal->cdevs[i] == cdev)
+ return i;
+
+ /* Allow mlxsw thermal zone binding to an external cooling device */
+ for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
+ if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i]))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void
+mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
+{
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
+}
+
+static int
+mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal_module *tz,
+ int crit_temp, int emerg_temp)
+{
+ int err;
+
+ /* Do not try to query temperature thresholds directly from the module's
+ * EEPROM if we got valid thresholds from MTMP.
+ */
+ if (!emerg_temp || !crit_temp) {
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
+ SFP_TEMP_HIGH_WARN,
+ &crit_temp);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
+ SFP_TEMP_HIGH_ALARM,
+ &emerg_temp);
+ if (err)
+ return err;
+ }
+
+ if (crit_temp > emerg_temp) {
+ dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+ tz->tzdev->type, crit_temp, emerg_temp);
+ return 0;
+ }
+
+ /* According to the system thermal requirements, the thermal zones are
+ * defined with three trip points. The critical and emergency
+ * temperature thresholds, provided by QSFP module are set as "active"
+ * and "hot" trip points, "normal" trip point is derived from "active"
+ * by subtracting double hysteresis value.
+ */
+ if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
+ MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+ else
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
+
+ return 0;
+}
+
+static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ int i, err;
+
+ /* If the cooling device is one of ours bind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ const struct mlxsw_thermal_trip *trip = &thermal->trips[i];
+
+ err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+ trip->max_state,
+ trip->min_state,
+ THERMAL_WEIGHT_DEFAULT);
+ if (err < 0) {
+ dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ int i;
+ int err;
+
+ /* If the cooling device is our one unbind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
+ if (err < 0) {
+ dev_err(dev, "Failed to unbind cooling device\n");
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ int temp;
+ int err;
+
+ mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false);
+
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(dev, "Failed to query temp sensor\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
+
+ *p_temp = temp;
+ return 0;
+}
+
+static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+ int trip,
+ enum thermal_trip_type *p_type)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_type = thermal->trips[trip].type;
+ return 0;
+}
+
+static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int *p_temp)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_temp = thermal->trips[trip].temp;
+ return 0;
+}
+
+static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int temp)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ thermal->trips[trip].temp = temp;
+ return 0;
+}
+
+static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev,
+ int trip, int *p_hyst)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ *p_hyst = thermal->trips[trip].hyst;
+ return 0;
+}
+
+static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
+ int trip, int hyst)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ thermal->trips[trip].hyst = hyst;
+ return 0;
+}
+
+static struct thermal_zone_params mlxsw_thermal_params = {
+ .no_hwmon = true,
+};
+
+static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+ .bind = mlxsw_thermal_bind,
+ .unbind = mlxsw_thermal_unbind,
+ .get_temp = mlxsw_thermal_get_temp,
+ .get_trip_type = mlxsw_thermal_get_trip_type,
+ .get_trip_temp = mlxsw_thermal_get_trip_temp,
+ .set_trip_temp = mlxsw_thermal_set_trip_temp,
+ .get_trip_hyst = mlxsw_thermal_get_trip_hyst,
+ .set_trip_hyst = mlxsw_thermal_set_trip_hyst,
+};
+
+static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int i, j, err;
+
+ /* If the cooling device is one of ours bind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ const struct mlxsw_thermal_trip *trip = &tz->trips[i];
+
+ err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+ trip->max_state,
+ trip->min_state,
+ THERMAL_WEIGHT_DEFAULT);
+ if (err < 0)
+ goto err_thermal_zone_bind_cooling_device;
+ }
+ return 0;
+
+err_thermal_zone_bind_cooling_device:
+ for (j = i - 1; j >= 0; j--)
+ thermal_zone_unbind_cooling_device(tzdev, j, cdev);
+ return err;
+}
+
+static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int i;
+ int err;
+
+ /* If the cooling device is one of ours unbind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
+ WARN_ON(err);
+ }
+ return err;
+}
+
+static void
+mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
+ u8 slot_index, u16 sensor_index,
+ int *p_temp, int *p_crit_temp,
+ int *p_emerg_temp)
+{
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ int err;
+
+ /* Read module temperature and thresholds. */
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index,
+ false, false);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ /* Set temperature and thresholds to zero to avoid passing
+ * uninitialized data back to the caller.
+ */
+ *p_temp = 0;
+ *p_crit_temp = 0;
+ *p_emerg_temp = 0;
+
+ return;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, p_crit_temp, p_emerg_temp,
+ NULL);
+}
+
+static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int temp, crit_temp, emerg_temp;
+ struct device *dev;
+ u16 sensor_index;
+
+ dev = thermal->bus_info->dev;
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module;
+
+ /* Read module temperature and thresholds. */
+ mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ tz->slot_index,
+ sensor_index, &temp,
+ &crit_temp, &emerg_temp);
+ *p_temp = temp;
+
+ if (!temp)
+ return 0;
+
+ /* Update trip points. */
+ mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
+ crit_temp, emerg_temp);
+
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip,
+ enum thermal_trip_type *p_type)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_type = tz->trips[trip].type;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev,
+ int trip, int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_temp = tz->trips[trip].temp;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
+ int trip, int temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ tz->trips[trip].temp = temp;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip,
+ int *p_hyst)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ *p_hyst = tz->trips[trip].hyst;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
+ int hyst)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ tz->trips[trip].hyst = hyst;
+ return 0;
+}
+
+static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ .bind = mlxsw_thermal_module_bind,
+ .unbind = mlxsw_thermal_module_unbind,
+ .get_temp = mlxsw_thermal_module_temp_get,
+ .get_trip_type = mlxsw_thermal_module_trip_type_get,
+ .get_trip_temp = mlxsw_thermal_module_trip_temp_get,
+ .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
+ .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
+ .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
+};
+
+static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ u16 index;
+ int temp;
+ int err;
+
+ index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module;
+ mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false);
+
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
+
+ *p_temp = temp;
+ return 0;
+}
+
+static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ .bind = mlxsw_thermal_module_bind,
+ .unbind = mlxsw_thermal_module_unbind,
+ .get_temp = mlxsw_thermal_gearbox_temp_get,
+ .get_trip_type = mlxsw_thermal_module_trip_type_get,
+ .get_trip_temp = mlxsw_thermal_module_trip_temp_get,
+ .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
+ .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
+ .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
+};
+
+static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *p_state)
+{
+ *p_state = MLXSW_THERMAL_MAX_STATE;
+ return 0;
+}
+
+static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *p_state)
+
+{
+ struct mlxsw_thermal *thermal = cdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ char mfsc_pl[MLXSW_REG_MFSC_LEN];
+ int err, idx;
+ u8 duty;
+
+ idx = mlxsw_get_cooling_device_idx(thermal, cdev);
+ if (idx < 0)
+ return idx;
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err) {
+ dev_err(dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
+ *p_state = mlxsw_duty_to_state(duty);
+ return 0;
+}
+
+static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ struct mlxsw_thermal *thermal = cdev->devdata;
+ struct device *dev = thermal->bus_info->dev;
+ char mfsc_pl[MLXSW_REG_MFSC_LEN];
+ int idx;
+ int err;
+
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
+ idx = mlxsw_get_cooling_device_idx(thermal, cdev);
+ if (idx < 0)
+ return idx;
+
+ /* Normalize the state to the valid speed range. */
+ state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err) {
+ dev_err(dev, "Failed to write PWM duty\n");
+ return err;
+ }
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
+ .get_max_state = mlxsw_thermal_get_max_state,
+ .get_cur_state = mlxsw_thermal_get_cur_state,
+ .set_cur_state = mlxsw_thermal_set_cur_state,
+};
+
+static int
+mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+{
+ char tz_name[THERMAL_NAME_LENGTH];
+ int err;
+
+ if (module_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d",
+ module_tz->slot_index, module_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ module_tz->module + 1);
+ module_tz->tzdev = thermal_zone_device_register(tz_name,
+ MLXSW_THERMAL_NUM_TRIPS,
+ MLXSW_THERMAL_TRIP_MASK,
+ module_tz,
+ &mlxsw_thermal_module_ops,
+ &mlxsw_thermal_params,
+ 0,
+ module_tz->parent->polling_delay);
+ if (IS_ERR(module_tz->tzdev)) {
+ err = PTR_ERR(module_tz->tzdev);
+ return err;
+ }
+
+ err = thermal_zone_device_enable(module_tz->tzdev);
+ if (err)
+ thermal_zone_device_unregister(module_tz->tzdev);
+
+ return err;
+}
+
+static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
+{
+ thermal_zone_device_unregister(tzdev);
+}
+
+static int
+mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area, u8 module)
+{
+ struct mlxsw_thermal_module *module_tz;
+ int dummy_temp, crit_temp, emerg_temp;
+ u16 sensor_index;
+
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
+ module_tz = &area->tz_module_arr[module];
+ /* Skip if parent is already set (case of port split). */
+ if (module_tz->parent)
+ return 0;
+ module_tz->module = module;
+ module_tz->slot_index = area->slot_index;
+ module_tz->parent = thermal;
+ memcpy(module_tz->trips, default_thermal_trips,
+ sizeof(thermal->trips));
+ /* Initialize all trip point. */
+ mlxsw_thermal_module_trips_reset(module_tz);
+ /* Read module temperature and thresholds. */
+ mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index,
+ sensor_index, &dummy_temp,
+ &crit_temp, &emerg_temp);
+ /* Update trip point according to the module data. */
+ return mlxsw_thermal_module_trips_update(dev, core, module_tz,
+ crit_temp, emerg_temp);
+}
+
+static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
+{
+ if (module_tz && module_tz->tzdev) {
+ mlxsw_thermal_module_tz_fini(module_tz->tzdev);
+ module_tz->tzdev = NULL;
+ module_tz->parent = NULL;
+ }
+}
+
+static int
+mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
+{
+ struct mlxsw_thermal_module *module_tz;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int i, err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &area->tz_module_num, NULL);
+
+ /* For modular system module counter could be zero. */
+ if (!area->tz_module_num)
+ return 0;
+
+ area->tz_module_arr = kcalloc(area->tz_module_num,
+ sizeof(*area->tz_module_arr),
+ GFP_KERNEL);
+ if (!area->tz_module_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < area->tz_module_num; i++) {
+ err = mlxsw_thermal_module_init(dev, core, thermal, area, i);
+ if (err)
+ goto err_thermal_module_init;
+ }
+
+ for (i = 0; i < area->tz_module_num; i++) {
+ module_tz = &area->tz_module_arr[i];
+ if (!module_tz->parent)
+ continue;
+ err = mlxsw_thermal_module_tz_init(module_tz);
+ if (err)
+ goto err_thermal_module_tz_init;
+ }
+
+ return 0;
+
+err_thermal_module_tz_init:
+err_thermal_module_init:
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
+ return err;
+}
+
+static void
+mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
+{
+ int i;
+
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
+}
+
+static int
+mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+{
+ char tz_name[THERMAL_NAME_LENGTH];
+ int ret;
+
+ if (gearbox_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d",
+ gearbox_tz->slot_index, gearbox_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ gearbox_tz->module + 1);
+ gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
+ MLXSW_THERMAL_NUM_TRIPS,
+ MLXSW_THERMAL_TRIP_MASK,
+ gearbox_tz,
+ &mlxsw_thermal_gearbox_ops,
+ &mlxsw_thermal_params, 0,
+ gearbox_tz->parent->polling_delay);
+ if (IS_ERR(gearbox_tz->tzdev))
+ return PTR_ERR(gearbox_tz->tzdev);
+
+ ret = thermal_zone_device_enable(gearbox_tz->tzdev);
+ if (ret)
+ thermal_zone_device_unregister(gearbox_tz->tzdev);
+
+ return ret;
+}
+
+static void
+mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz)
+{
+ thermal_zone_device_unregister(gearbox_tz->tzdev);
+}
+
+static int
+mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
+{
+ enum mlxsw_reg_mgpir_device_type device_type;
+ struct mlxsw_thermal_module *gearbox_tz;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 gbox_num;
+ int i;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
+ NULL, NULL);
+ if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+ !gbox_num)
+ return 0;
+
+ area->tz_gearbox_num = gbox_num;
+ area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num,
+ sizeof(*area->tz_gearbox_arr),
+ GFP_KERNEL);
+ if (!area->tz_gearbox_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < area->tz_gearbox_num; i++) {
+ gearbox_tz = &area->tz_gearbox_arr[i];
+ memcpy(gearbox_tz->trips, default_thermal_trips,
+ sizeof(thermal->trips));
+ gearbox_tz->module = i;
+ gearbox_tz->parent = thermal;
+ gearbox_tz->slot_index = area->slot_index;
+ err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
+ if (err)
+ goto err_thermal_gearbox_tz_init;
+ }
+
+ return 0;
+
+err_thermal_gearbox_tz_init:
+ for (i--; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
+ return err;
+}
+
+static void
+mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
+{
+ int i;
+
+ for (i = area->tz_gearbox_num - 1; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
+}
+
+static void
+mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+ int err;
+
+ linecard = &thermal->line_cards[slot_index];
+
+ if (linecard->active)
+ return;
+
+ linecard->slot_index = slot_index;
+ err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core,
+ thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev,
+ thermal->core, thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n",
+ slot_index);
+ goto err_thermal_linecard_gearboxes_init;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_thermal_linecard_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, linecard);
+}
+
+static void
+mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+
+ linecard = &thermal->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ mlxsw_thermal_gearboxes_fini(thermal, linecard);
+ mlxsw_thermal_modules_fini(thermal, linecard);
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = {
+ .got_active = mlxsw_thermal_got_active,
+ .got_inactive = mlxsw_thermal_got_inactive,
+};
+
+int mlxsw_thermal_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_thermal **p_thermal)
+{
+ char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 };
+ enum mlxsw_reg_mfcr_pwm_frequency freq;
+ struct device *dev = bus_info->dev;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_thermal *thermal;
+ u8 pwm_active, num_of_slots;
+ u16 tacho_active;
+ int err, i;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
+ if (!thermal)
+ return -ENOMEM;
+
+ thermal->core = core;
+ thermal->bus_info = bus_info;
+ memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+ thermal->line_cards[0].slot_index = 0;
+
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
+ if (err) {
+ dev_err(dev, "Failed to probe PWMs\n");
+ goto err_reg_query;
+ }
+ mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
+
+ for (i = 0; i < MLXSW_MFCR_TACHOS_MAX; i++) {
+ if (tacho_active & BIT(i)) {
+ char mfsl_pl[MLXSW_REG_MFSL_LEN];
+
+ mlxsw_reg_mfsl_pack(mfsl_pl, i, 0, 0);
+
+ /* We need to query the register to preserve maximum */
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl),
+ mfsl_pl);
+ if (err)
+ goto err_reg_query;
+
+ /* set the minimal RPMs to 0 */
+ mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0);
+ err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl),
+ mfsl_pl);
+ if (err)
+ goto err_reg_write;
+ }
+ }
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
+ if (pwm_active & BIT(i)) {
+ struct thermal_cooling_device *cdev;
+
+ cdev = thermal_cooling_device_register("mlxsw_fan",
+ thermal,
+ &mlxsw_cooling_ops);
+ if (IS_ERR(cdev)) {
+ err = PTR_ERR(cdev);
+ dev_err(dev, "Failed to register cooling device\n");
+ goto err_thermal_cooling_device_register;
+ }
+ thermal->cdevs[i] = cdev;
+ }
+ }
+
+ thermal->polling_delay = bus_info->low_frequency ?
+ MLXSW_THERMAL_SLOW_POLL_INT :
+ MLXSW_THERMAL_POLL_INT;
+
+ thermal->tzdev = thermal_zone_device_register("mlxsw",
+ MLXSW_THERMAL_NUM_TRIPS,
+ MLXSW_THERMAL_TRIP_MASK,
+ thermal,
+ &mlxsw_thermal_ops,
+ &mlxsw_thermal_params, 0,
+ thermal->polling_delay);
+ if (IS_ERR(thermal->tzdev)) {
+ err = PTR_ERR(thermal->tzdev);
+ dev_err(dev, "Failed to register thermal zone\n");
+ goto err_thermal_zone_device_register;
+ }
+
+ err = mlxsw_thermal_modules_init(dev, core, thermal,
+ &thermal->line_cards[0]);
+ if (err)
+ goto err_thermal_modules_init;
+
+ err = mlxsw_thermal_gearboxes_init(dev, core, thermal,
+ &thermal->line_cards[0]);
+ if (err)
+ goto err_thermal_gearboxes_init;
+
+ err = mlxsw_linecards_event_ops_register(core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ err = thermal_zone_device_enable(thermal->tzdev);
+ if (err)
+ goto err_thermal_zone_device_enable;
+
+ thermal->line_cards[0].active = true;
+ *p_thermal = thermal;
+ return 0;
+
+err_thermal_zone_device_enable:
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+err_linecards_event_ops_register:
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+err_thermal_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
+err_thermal_modules_init:
+ if (thermal->tzdev) {
+ thermal_zone_device_unregister(thermal->tzdev);
+ thermal->tzdev = NULL;
+ }
+err_thermal_zone_device_register:
+err_thermal_cooling_device_register:
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ if (thermal->cdevs[i])
+ thermal_cooling_device_unregister(thermal->cdevs[i]);
+err_reg_write:
+err_reg_query:
+ kfree(thermal);
+ return err;
+}
+
+void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
+{
+ int i;
+
+ thermal->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
+ if (thermal->tzdev) {
+ thermal_zone_device_unregister(thermal->tzdev);
+ thermal->tzdev = NULL;
+ }
+
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
+ if (thermal->cdevs[i]) {
+ thermal_cooling_device_unregister(thermal->cdevs[i]);
+ thermal->cdevs[i] = NULL;
+ }
+ }
+
+ kfree(thermal);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 000000000..acfbbec52
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+ MLXSW_EMAD_TLV_TYPE_END,
+ MLXSW_EMAD_TLV_TYPE_OP,
+ MLXSW_EMAD_TLV_TYPE_STRING,
+ MLXSW_EMAD_TLV_TYPE_REG,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */
+
+enum {
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+ MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+ MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+ MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+ MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+ MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+ MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+ MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+ MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return "operation performed";
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ return "device is busy";
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ return "version not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ return "unknown TLV";
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ return "register not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ return "class not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ return "method not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ return "bad parameter";
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ return "resource not available";
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ return "acknowledged. retransmit";
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ return "internal error";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum {
+ MLXSW_EMAD_OP_TLV_REQUEST,
+ MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+ MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+ MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* STRING TLV */
+#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
new file mode 100644
index 000000000..3beefc167
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/slab.h>
+
+#include "cmd.h"
+#include "core.h"
+#include "i2c.h"
+#include "resources.h"
+
+#define MLXSW_I2C_CIR2_BASE 0x72000
+#define MLXSW_I2C_CIR_STATUS_OFF 0x18
+#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \
+ MLXSW_I2C_CIR_STATUS_OFF)
+#define MLXSW_I2C_OPMOD_SHIFT 12
+#define MLXSW_I2C_EVENT_BIT_SHIFT 22
+#define MLXSW_I2C_GO_BIT_SHIFT 23
+#define MLXSW_I2C_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_I2C_EVENT_BIT BIT(MLXSW_I2C_EVENT_BIT_SHIFT)
+#define MLXSW_I2C_GO_BIT BIT(MLXSW_I2C_GO_BIT_SHIFT)
+#define MLXSW_I2C_GO_OPMODE BIT(MLXSW_I2C_OPMOD_SHIFT)
+#define MLXSW_I2C_SET_IMM_CMD (MLXSW_I2C_GO_OPMODE | \
+ MLXSW_CMD_OPCODE_QUERY_FW)
+#define MLXSW_I2C_PUSH_IMM_CMD (MLXSW_I2C_GO_BIT | \
+ MLXSW_I2C_SET_IMM_CMD)
+#define MLXSW_I2C_SET_CMD (MLXSW_CMD_OPCODE_ACCESS_REG)
+#define MLXSW_I2C_PUSH_CMD (MLXSW_I2C_GO_BIT | MLXSW_I2C_SET_CMD)
+#define MLXSW_I2C_TLV_HDR_SIZE 0x10
+#define MLXSW_I2C_ADDR_WIDTH 4
+#define MLXSW_I2C_PUSH_CMD_SIZE (MLXSW_I2C_ADDR_WIDTH + 4)
+#define MLXSW_I2C_SET_EVENT_CMD (MLXSW_I2C_EVENT_BIT)
+#define MLXSW_I2C_PUSH_EVENT_CMD (MLXSW_I2C_GO_BIT | \
+ MLXSW_I2C_SET_EVENT_CMD)
+#define MLXSW_I2C_READ_SEMA_SIZE 4
+#define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28)
+#define MLXSW_I2C_MBOX_SIZE 20
+#define MLXSW_I2C_MBOX_OUT_PARAM_OFF 12
+#define MLXSW_I2C_MBOX_OFFSET_BITS 20
+#define MLXSW_I2C_MBOX_SIZE_BITS 12
+#define MLXSW_I2C_ADDR_BUF_SIZE 4
+#define MLXSW_I2C_BLK_DEF 32
+#define MLXSW_I2C_BLK_MAX 100
+#define MLXSW_I2C_RETRY 5
+#define MLXSW_I2C_TIMEOUT_MSECS 5000
+#define MLXSW_I2C_MAX_DATA_SIZE 256
+
+/* Driver can be initialized by kernel platform driver or from the user
+ * space. In the first case IRQ line number is passed through the platform
+ * data, otherwise default IRQ line is to be used. Default IRQ is relevant
+ * only for specific I2C slave address, allowing 3.4 MHz I2C path to the chip
+ * (special hardware feature for I2C acceleration).
+ */
+#define MLXSW_I2C_DEFAULT_IRQ 17
+#define MLXSW_FAST_I2C_SLAVE 0x37
+
+/**
+ * struct mlxsw_i2c - device private data:
+ * @cmd: command attributes;
+ * @cmd.mb_size_in: input mailbox size;
+ * @cmd.mb_off_in: input mailbox offset in register space;
+ * @cmd.mb_size_out: output mailbox size;
+ * @cmd.mb_off_out: output mailbox offset in register space;
+ * @cmd.lock: command execution lock;
+ * @dev: I2C device;
+ * @core: switch core pointer;
+ * @bus_info: bus info block;
+ * @block_size: maximum block size allowed to pass to under layer;
+ * @pdata: device platform data;
+ * @irq_work: interrupts work item;
+ * @irq: IRQ line number;
+ */
+struct mlxsw_i2c {
+ struct {
+ u32 mb_size_in;
+ u32 mb_off_in;
+ u32 mb_size_out;
+ u32 mb_off_out;
+ struct mutex lock;
+ } cmd;
+ struct device *dev;
+ struct mlxsw_core *core;
+ struct mlxsw_bus_info bus_info;
+ u16 block_size;
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct work_struct irq_work;
+ int irq;
+};
+
+#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
+ { .addr = (_client)->addr, \
+ .buf = (_addr_buf), \
+ .len = MLXSW_I2C_ADDR_BUF_SIZE, \
+ .flags = 0 }, \
+ { .addr = (_client)->addr, \
+ .buf = (_buf), \
+ .len = (_len), \
+ .flags = I2C_M_RD } }
+
+#define MLXSW_I2C_WRITE_MSG(_client, _buf, _len) \
+ { .addr = (_client)->addr, \
+ .buf = (u8 *)(_buf), \
+ .len = (_len), \
+ .flags = 0 }
+
+/* Routine converts in and out mail boxes offset and size. */
+static inline void
+mlxsw_i2c_convert_mbox(struct mlxsw_i2c *mlxsw_i2c, u8 *buf)
+{
+ u32 tmp;
+
+ /* Local in/out mailboxes: 20 bits for offset, 12 for size */
+ tmp = be32_to_cpup((__be32 *) buf);
+ mlxsw_i2c->cmd.mb_off_in = tmp &
+ GENMASK(MLXSW_I2C_MBOX_OFFSET_BITS - 1, 0);
+ mlxsw_i2c->cmd.mb_size_in = (tmp & GENMASK(31,
+ MLXSW_I2C_MBOX_OFFSET_BITS)) >>
+ MLXSW_I2C_MBOX_OFFSET_BITS;
+
+ tmp = be32_to_cpup((__be32 *) (buf + MLXSW_I2C_ADDR_WIDTH));
+ mlxsw_i2c->cmd.mb_off_out = tmp &
+ GENMASK(MLXSW_I2C_MBOX_OFFSET_BITS - 1, 0);
+ mlxsw_i2c->cmd.mb_size_out = (tmp & GENMASK(31,
+ MLXSW_I2C_MBOX_OFFSET_BITS)) >>
+ MLXSW_I2C_MBOX_OFFSET_BITS;
+}
+
+/* Routine obtains register size from mail box buffer. */
+static inline int mlxsw_i2c_get_reg_size(u8 *in_mbox)
+{
+ u16 tmp = be16_to_cpup((__be16 *) (in_mbox + MLXSW_I2C_TLV_HDR_SIZE));
+
+ return (tmp & 0x7ff) * 4 + MLXSW_I2C_TLV_HDR_SIZE;
+}
+
+/* Routine sets I2C device internal offset in the transaction buffer. */
+static inline void mlxsw_i2c_set_slave_addr(u8 *buf, u32 off)
+{
+ __be32 *val = (__be32 *) buf;
+
+ *val = htonl(off);
+}
+
+/* Routine waits until go bit is cleared. */
+static int mlxsw_i2c_wait_go_bit(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c, u8 *p_status)
+{
+ u8 addr_buf[MLXSW_I2C_ADDR_BUF_SIZE];
+ u8 buf[MLXSW_I2C_READ_SEMA_SIZE];
+ int len = MLXSW_I2C_READ_SEMA_SIZE;
+ struct i2c_msg read_sema[] =
+ MLXSW_I2C_READ_MSG(client, addr_buf, buf, len);
+ bool wait_done = false;
+ unsigned long end;
+ int i = 0, err;
+
+ mlxsw_i2c_set_slave_addr(addr_buf, MLXSW_I2C_CIR2_OFF_STATUS);
+
+ end = jiffies + msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
+ do {
+ u32 ctrl;
+
+ err = i2c_transfer(client->adapter, read_sema,
+ ARRAY_SIZE(read_sema));
+
+ ctrl = be32_to_cpu(*(__be32 *) buf);
+ if (err == ARRAY_SIZE(read_sema)) {
+ if (!(ctrl & MLXSW_I2C_GO_BIT)) {
+ wait_done = true;
+ *p_status = ctrl >>
+ MLXSW_I2C_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ }
+ cond_resched();
+ } while ((time_before(jiffies, end)) || (i++ < MLXSW_I2C_RETRY));
+
+ if (wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ return -ETIMEDOUT;
+ }
+
+ return err > 0 ? 0 : err;
+}
+
+/* Routine posts a command to ASIC through mail box. */
+static int mlxsw_i2c_write_cmd(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c,
+ int immediate)
+{
+ __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = {
+ 0, cpu_to_be32(MLXSW_I2C_PUSH_IMM_CMD)
+ };
+ __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = {
+ 0, 0, 0, 0, 0, 0,
+ cpu_to_be32(client->adapter->nr & 0xffff),
+ cpu_to_be32(MLXSW_I2C_SET_IMM_CMD)
+ };
+ struct i2c_msg push_cmd =
+ MLXSW_I2C_WRITE_MSG(client, push_cmd_buf,
+ MLXSW_I2C_PUSH_CMD_SIZE);
+ struct i2c_msg prep_cmd =
+ MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE);
+ int err;
+
+ if (!immediate) {
+ push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_CMD);
+ prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_SET_CMD);
+ }
+ mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf,
+ MLXSW_I2C_CIR2_BASE);
+ mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf,
+ MLXSW_I2C_CIR2_OFF_STATUS);
+
+ /* Prepare Command Interface Register for transaction */
+ err = i2c_transfer(client->adapter, &prep_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ /* Write out Command Interface Register GO bit to push transaction */
+ err = i2c_transfer(client->adapter, &push_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ return 0;
+}
+
+/* Routine posts initialization command to ASIC through mail box. */
+static int
+mlxsw_i2c_write_init_cmd(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c, u16 opcode, u32 in_mod)
+{
+ __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = {
+ 0, cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD)
+ };
+ __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = {
+ 0, 0, 0, 0, 0, 0,
+ cpu_to_be32(client->adapter->nr & 0xffff),
+ cpu_to_be32(MLXSW_I2C_SET_EVENT_CMD)
+ };
+ struct i2c_msg push_cmd =
+ MLXSW_I2C_WRITE_MSG(client, push_cmd_buf,
+ MLXSW_I2C_PUSH_CMD_SIZE);
+ struct i2c_msg prep_cmd =
+ MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE);
+ u8 status;
+ int err;
+
+ push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD | opcode);
+ prep_cmd_buf[3] = cpu_to_be32(in_mod);
+ prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_GO_BIT | opcode);
+ mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf,
+ MLXSW_I2C_CIR2_BASE);
+ mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf,
+ MLXSW_I2C_CIR2_OFF_STATUS);
+
+ /* Prepare Command Interface Register for transaction */
+ err = i2c_transfer(client->adapter, &prep_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ /* Write out Command Interface Register GO bit to push transaction */
+ err = i2c_transfer(client->adapter, &push_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ /* Wait until go bit is cleared. */
+ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status);
+ if (err) {
+ dev_err(&client->dev, "HW semaphore is not released");
+ return err;
+ }
+
+ /* Validate transaction completion status. */
+ if (status) {
+ dev_err(&client->dev, "Bad transaction completion status %x\n",
+ status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Routine obtains mail box offsets from ASIC register space. */
+static int mlxsw_i2c_get_mbox(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c)
+{
+ u8 addr_buf[MLXSW_I2C_ADDR_BUF_SIZE];
+ u8 buf[MLXSW_I2C_MBOX_SIZE];
+ struct i2c_msg mbox_cmd[] =
+ MLXSW_I2C_READ_MSG(client, addr_buf, buf, MLXSW_I2C_MBOX_SIZE);
+ int err;
+
+ /* Read mail boxes offsets. */
+ mlxsw_i2c_set_slave_addr(addr_buf, MLXSW_I2C_CIR2_BASE);
+ err = i2c_transfer(client->adapter, mbox_cmd, 2);
+ if (err != 2) {
+ dev_err(&client->dev, "Could not obtain mail boxes\n");
+ if (!err)
+ return -EIO;
+ else
+ return err;
+ }
+
+ /* Convert mail boxes. */
+ mlxsw_i2c_convert_mbox(mlxsw_i2c, &buf[MLXSW_I2C_MBOX_OUT_PARAM_OFF]);
+
+ return err;
+}
+
+/* Routine sends I2C write transaction to ASIC device. */
+static int
+mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
+ u8 *p_status)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
+ unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
+ int off = mlxsw_i2c->cmd.mb_off_in, chunk_size, i, j;
+ unsigned long end;
+ u8 *tran_buf;
+ struct i2c_msg write_tran =
+ MLXSW_I2C_WRITE_MSG(client, NULL, MLXSW_I2C_PUSH_CMD_SIZE);
+ int err;
+
+ tran_buf = kmalloc(mlxsw_i2c->block_size + MLXSW_I2C_ADDR_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tran_buf)
+ return -ENOMEM;
+
+ write_tran.buf = tran_buf;
+ for (i = 0; i < num; i++) {
+ chunk_size = (in_mbox_size > mlxsw_i2c->block_size) ?
+ mlxsw_i2c->block_size : in_mbox_size;
+ write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
+ mlxsw_i2c_set_slave_addr(tran_buf, off);
+ memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
+ mlxsw_i2c->block_size * i, chunk_size);
+
+ j = 0;
+ end = jiffies + timeout;
+ do {
+ err = i2c_transfer(client->adapter, &write_tran, 1);
+ if (err == 1)
+ break;
+
+ cond_resched();
+ } while ((time_before(jiffies, end)) ||
+ (j++ < MLXSW_I2C_RETRY));
+
+ if (err != 1) {
+ if (!err) {
+ err = -EIO;
+ goto mlxsw_i2c_write_exit;
+ }
+ }
+
+ off += chunk_size;
+ in_mbox_size -= chunk_size;
+ }
+
+ /* Prepare and write out Command Interface Register for transaction. */
+ err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 0);
+ if (err) {
+ dev_err(&client->dev, "Could not start transaction");
+ err = -EIO;
+ goto mlxsw_i2c_write_exit;
+ }
+
+ /* Wait until go bit is cleared. */
+ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, p_status);
+ if (err) {
+ dev_err(&client->dev, "HW semaphore is not released");
+ goto mlxsw_i2c_write_exit;
+ }
+
+ /* Validate transaction completion status. */
+ if (*p_status) {
+ dev_err(&client->dev, "Bad transaction completion status %x\n",
+ *p_status);
+ err = -EIO;
+ }
+
+mlxsw_i2c_write_exit:
+ kfree(tran_buf);
+ return err;
+}
+
+/* Routine executes I2C command. */
+static int
+mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
+ u8 *in_mbox, size_t out_mbox_size, u8 *out_mbox, u8 *status)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
+ unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
+ u8 tran_buf[MLXSW_I2C_ADDR_BUF_SIZE];
+ int num, chunk_size, reg_size, i, j;
+ int off = mlxsw_i2c->cmd.mb_off_out;
+ unsigned long end;
+ struct i2c_msg read_tran[] =
+ MLXSW_I2C_READ_MSG(client, tran_buf, NULL, 0);
+ int err;
+
+ WARN_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+
+ if (in_mbox) {
+ reg_size = mlxsw_i2c_get_reg_size(in_mbox);
+ num = reg_size / mlxsw_i2c->block_size;
+ if (reg_size % mlxsw_i2c->block_size)
+ num++;
+
+ if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ dev_err(&client->dev, "Could not acquire lock");
+ return -EINVAL;
+ }
+
+ err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status);
+ if (err)
+ goto cmd_fail;
+
+ /* No out mailbox is case of write transaction. */
+ if (!out_mbox) {
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+ return 0;
+ }
+ } else {
+ /* No input mailbox is case of initialization query command. */
+ reg_size = MLXSW_I2C_MAX_DATA_SIZE;
+ num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
+
+ if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ dev_err(&client->dev, "Could not acquire lock");
+ return -EINVAL;
+ }
+
+ err = mlxsw_i2c_write_init_cmd(client, mlxsw_i2c, opcode,
+ in_mod);
+ if (err)
+ goto cmd_fail;
+ }
+
+ /* Send read transaction to get output mailbox content. */
+ read_tran[1].buf = out_mbox;
+ for (i = 0; i < num; i++) {
+ chunk_size = (reg_size > mlxsw_i2c->block_size) ?
+ mlxsw_i2c->block_size : reg_size;
+ read_tran[1].len = chunk_size;
+ mlxsw_i2c_set_slave_addr(tran_buf, off);
+
+ j = 0;
+ end = jiffies + timeout;
+ do {
+ err = i2c_transfer(client->adapter, read_tran,
+ ARRAY_SIZE(read_tran));
+ if (err == ARRAY_SIZE(read_tran))
+ break;
+
+ cond_resched();
+ } while ((time_before(jiffies, end)) ||
+ (j++ < MLXSW_I2C_RETRY));
+
+ if (err != ARRAY_SIZE(read_tran)) {
+ if (!err)
+ err = -EIO;
+
+ goto cmd_fail;
+ }
+
+ off += chunk_size;
+ reg_size -= chunk_size;
+ read_tran[1].buf += chunk_size;
+ }
+
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+
+ return 0;
+
+cmd_fail:
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+ return err;
+}
+
+static int mlxsw_i2c_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *status)
+{
+ struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+
+ return mlxsw_i2c_cmd(mlxsw_i2c->dev, opcode, in_mod, in_mbox_size,
+ in_mbox, out_mbox_size, out_mbox, status);
+}
+
+static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return false;
+}
+
+static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return 0;
+}
+
+static int
+mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *res)
+{
+ struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+ char *mbox;
+ int err;
+
+ mlxsw_i2c->core = mlxsw_core;
+
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto mbox_put;
+
+ mlxsw_i2c->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_i2c->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_i2c->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+
+mbox_put:
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
+}
+
+static void mlxsw_i2c_fini(void *bus_priv)
+{
+ struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+
+ mlxsw_i2c->core = NULL;
+}
+
+static void mlxsw_i2c_work_handler(struct work_struct *work)
+{
+ struct mlxsw_i2c *mlxsw_i2c;
+
+ mlxsw_i2c = container_of(work, struct mlxsw_i2c, irq_work);
+ mlxsw_core_irq_event_handlers_call(mlxsw_i2c->core);
+}
+
+static irqreturn_t mlxsw_i2c_irq_handler(int irq, void *dev)
+{
+ struct mlxsw_i2c *mlxsw_i2c = dev;
+
+ mlxsw_core_schedule_work(&mlxsw_i2c->irq_work);
+
+ /* Interrupt handler shares IRQ line with 'main' interrupt handler.
+ * Return here IRQ_NONE, while main handler will return IRQ_HANDLED.
+ */
+ return IRQ_NONE;
+}
+
+static int mlxsw_i2c_irq_init(struct mlxsw_i2c *mlxsw_i2c, u8 addr)
+{
+ int err;
+
+ /* Initialize interrupt handler if system hotplug driver is reachable,
+ * otherwise interrupt line is not enabled and interrupts will not be
+ * raised to CPU. Also request_irq() call will be not valid.
+ */
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG))
+ return 0;
+
+ /* Set default interrupt line. */
+ if (mlxsw_i2c->pdata && mlxsw_i2c->pdata->irq)
+ mlxsw_i2c->irq = mlxsw_i2c->pdata->irq;
+ else if (addr == MLXSW_FAST_I2C_SLAVE)
+ mlxsw_i2c->irq = MLXSW_I2C_DEFAULT_IRQ;
+
+ if (!mlxsw_i2c->irq)
+ return 0;
+
+ INIT_WORK(&mlxsw_i2c->irq_work, mlxsw_i2c_work_handler);
+ err = request_irq(mlxsw_i2c->irq, mlxsw_i2c_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED, "mlxsw-i2c",
+ mlxsw_i2c);
+ if (err) {
+ dev_err(mlxsw_i2c->bus_info.dev, "Failed to request irq: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_i2c_irq_fini(struct mlxsw_i2c *mlxsw_i2c)
+{
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG) || !mlxsw_i2c->irq)
+ return;
+ cancel_work_sync(&mlxsw_i2c->irq_work);
+ free_irq(mlxsw_i2c->irq, mlxsw_i2c);
+}
+
+static const struct mlxsw_bus mlxsw_i2c_bus = {
+ .kind = "i2c",
+ .init = mlxsw_i2c_init,
+ .fini = mlxsw_i2c_fini,
+ .skb_transmit_busy = mlxsw_i2c_skb_transmit_busy,
+ .skb_transmit = mlxsw_i2c_skb_transmit,
+ .cmd_exec = mlxsw_i2c_cmd_exec,
+};
+
+static int mlxsw_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+ struct mlxsw_i2c *mlxsw_i2c;
+ u8 status;
+ int err;
+
+ mlxsw_i2c = devm_kzalloc(&client->dev, sizeof(*mlxsw_i2c), GFP_KERNEL);
+ if (!mlxsw_i2c)
+ return -ENOMEM;
+
+ if (quirks) {
+ if ((quirks->max_read_len &&
+ quirks->max_read_len < MLXSW_I2C_BLK_DEF) ||
+ (quirks->max_write_len &&
+ quirks->max_write_len < MLXSW_I2C_BLK_DEF)) {
+ dev_err(&client->dev, "Insufficient transaction buffer length\n");
+ return -EOPNOTSUPP;
+ }
+
+ mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX,
+ min_t(u16, quirks->max_read_len,
+ quirks->max_write_len));
+ } else {
+ mlxsw_i2c->block_size = MLXSW_I2C_BLK_DEF;
+ }
+
+ i2c_set_clientdata(client, mlxsw_i2c);
+ mutex_init(&mlxsw_i2c->cmd.lock);
+
+ /* In order to use mailboxes through the i2c, special area is reserved
+ * on the i2c address space that can be used for input and output
+ * mailboxes. Such mailboxes are called local mailboxes. When using a
+ * local mailbox, software should specify 0 as the Input/Output
+ * parameters. The location of the Local Mailbox addresses on the i2c
+ * space can be retrieved through the QUERY_FW command.
+ * For this purpose QUERY_FW is to be issued with opcode modifier equal
+ * 0x01. For such command the output parameter is an immediate value.
+ * Here QUERY_FW command is invoked for ASIC probing and for getting
+ * local mailboxes addresses from immedate output parameters.
+ */
+
+ /* Prepare and write out Command Interface Register for transaction */
+ err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 1);
+ if (err) {
+ dev_err(&client->dev, "Could not start transaction");
+ goto errout;
+ }
+
+ /* Wait until go bit is cleared. */
+ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status);
+ if (err) {
+ dev_err(&client->dev, "HW semaphore is not released");
+ goto errout;
+ }
+
+ /* Validate transaction completion status. */
+ if (status) {
+ dev_err(&client->dev, "Bad transaction completion status %x\n",
+ status);
+ err = -EIO;
+ goto errout;
+ }
+
+ /* Get mailbox offsets. */
+ err = mlxsw_i2c_get_mbox(client, mlxsw_i2c);
+ if (err < 0) {
+ dev_err(&client->dev, "Fail to get mailboxes\n");
+ goto errout;
+ }
+
+ dev_info(&client->dev, "%s mb size=%x off=0x%08x out mb size=%x off=0x%08x\n",
+ id->name, mlxsw_i2c->cmd.mb_size_in,
+ mlxsw_i2c->cmd.mb_off_in, mlxsw_i2c->cmd.mb_size_out,
+ mlxsw_i2c->cmd.mb_off_out);
+
+ /* Register device bus. */
+ mlxsw_i2c->bus_info.device_kind = id->name;
+ mlxsw_i2c->bus_info.device_name = client->name;
+ mlxsw_i2c->bus_info.dev = &client->dev;
+ mlxsw_i2c->bus_info.low_frequency = true;
+ mlxsw_i2c->dev = &client->dev;
+ mlxsw_i2c->pdata = client->dev.platform_data;
+
+ err = mlxsw_i2c_irq_init(mlxsw_i2c, client->addr);
+ if (err)
+ goto errout;
+
+ err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
+ &mlxsw_i2c_bus, mlxsw_i2c, false,
+ NULL, NULL);
+ if (err) {
+ dev_err(&client->dev, "Fail to register core bus\n");
+ goto err_bus_device_register;
+ }
+
+ return 0;
+
+err_bus_device_register:
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
+errout:
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
+ i2c_set_clientdata(client, NULL);
+
+ return err;
+}
+
+static void mlxsw_i2c_remove(struct i2c_client *client)
+{
+ struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
+
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
+}
+
+int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
+{
+ i2c_driver->probe = mlxsw_i2c_probe;
+ i2c_driver->remove = mlxsw_i2c_remove;
+ return i2c_add_driver(i2c_driver);
+}
+EXPORT_SYMBOL(mlxsw_i2c_driver_register);
+
+void mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver)
+{
+ i2c_del_driver(i2c_driver);
+}
+EXPORT_SYMBOL(mlxsw_i2c_driver_unregister);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch I2C interface driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
new file mode 100644
index 000000000..17e059d47
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_I2C_H
+#define _MLXSW_I2C_H
+
+#include <linux/i2c.h>
+
+#if IS_ENABLED(CONFIG_MLXSW_I2C)
+
+int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver);
+void mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver);
+
+#else
+
+static inline int
+mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
+{
+ return -ENODEV;
+}
+
+static inline void
+mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 000000000..cfafbeb42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+ unsigned short offset; /* bytes in container */
+ short step; /* step in bytes for indexed items */
+ unsigned short in_step_offset; /* offset within one step */
+ unsigned char shift; /* shift in bits */
+ unsigned char element_size; /* size of element in bit array */
+ bool no_real_shift;
+ union {
+ unsigned char bits;
+ unsigned short bytes;
+ } size;
+ const char *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
+ size_t typesize)
+{
+ BUG_ON(index && !item->step);
+ if (item->offset % typesize != 0 ||
+ item->step % typesize != 0 ||
+ item->in_step_offset % typesize != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+ item->name, item->offset, item->step,
+ item->in_step_offset, typesize);
+ BUG();
+ }
+
+ return ((item->offset + item->step * index + item->in_step_offset) /
+ typesize);
+}
+
+static inline u8 __mlxsw_item_get8(const char *buf,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8));
+ u8 *b = (u8 *) buf;
+ u8 tmp;
+
+ tmp = b[offset];
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item,
+ unsigned short index, u8 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u8));
+ u8 *b = (u8 *) buf;
+ u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u8 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = b[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = tmp;
+}
+
+static inline u16 __mlxsw_item_get16(const char *buf,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 tmp;
+
+ tmp = be16_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item,
+ unsigned short index, u16 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u16 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be16_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(const char *buf,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 tmp;
+
+ tmp = be32_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item,
+ unsigned short index, u32 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u32 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be32_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(const char *buf,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 tmp;
+
+ tmp = be64_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item,
+ unsigned short index, u64 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+ u64 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be64_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(dst, &buf[offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
+ const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(&buf[offset], src, item->size.bytes);
+}
+
+static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ return &buf[offset];
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
+ u16 index, u8 *shift)
+{
+ u16 max_index, be_index;
+ u16 offset; /* byte offset inside the array */
+ u8 in_byte_index;
+
+ BUG_ON(index && !item->element_size);
+ if (item->offset % sizeof(u32) != 0 ||
+ BITS_PER_BYTE % item->element_size != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+ item->name, item->offset, item->element_size);
+ BUG();
+ }
+
+ max_index = (item->size.bytes << 3) / item->element_size - 1;
+ be_index = max_index - index;
+ offset = be_index * item->element_size >> 3;
+ in_byte_index = index % (BITS_PER_BYTE / item->element_size);
+ *shift = in_byte_index * item->element_size;
+
+ return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(const char *buf,
+ const struct mlxsw_item *item,
+ u16 index)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+ tmp = buf[offset];
+ tmp >>= shift;
+ tmp &= GENMASK(item->element_size - 1, 0);
+ return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf,
+ const struct mlxsw_item *item,
+ u16 index, u8 val)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+ u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+ val <<= shift;
+ val &= mask;
+ tmp = buf[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname) \
+ mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+{ \
+ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
+{ \
+ __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
+{ \
+ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u8 val) \
+{ \
+ __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u16 val) \
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define LOCAL_PORT_LSB_SIZE 8
+#define LOCAL_PORT_MSB_SIZE 2
+
+#define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \
+ .offset = _offset1, \
+ .shift = _shift1, \
+ .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \
+ .name = #_type "_" #_cname "_local_port", \
+}; \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \
+ .offset = _offset2, \
+ .shift = _shift2, \
+ .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \
+ .name = #_type "_" #_cname "_lp_msb", \
+}; \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \
+{ \
+ u32 local_port, lp_msb; \
+ \
+ local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \
+ local_port), 0); \
+ lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \
+ 0); \
+ return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \
+ val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \
+ val >> LOCAL_PORT_LSB_SIZE); \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
+ _sizebits, _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u64 val) \
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline char * __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
+{ \
+ return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}
+
+#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
+ _step, _instepoffset) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
+ unsigned short index, \
+ char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
+ unsigned short index, \
+ const char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
+} \
+static inline char * __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_data(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+ _element_size) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .element_size = _element_size, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
+{ \
+ return __mlxsw_item_bit_array_get(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
+{ \
+ return __mlxsw_item_bit_array_set(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+} \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
new file mode 100644
index 000000000..15116d930
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "core_env.h"
+#include "i2c.h"
+
+static const char mlxsw_m_driver_name[] = "mlxsw_minimal";
+
+#define MLXSW_M_FWREV_MINOR 2000
+#define MLXSW_M_FWREV_SUBMINOR 1886
+
+static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
+ .minor = MLXSW_M_FWREV_MINOR,
+ .subminor = MLXSW_M_FWREV_SUBMINOR,
+};
+
+struct mlxsw_m_port;
+
+struct mlxsw_m_line_card {
+ bool active;
+ int module_to_port[];
+};
+
+struct mlxsw_m {
+ struct mlxsw_m_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 base_mac[ETH_ALEN];
+ u8 max_ports;
+ u8 max_modules_per_slot; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mlxsw_m_line_card **line_cards;
+};
+
+struct mlxsw_m_port {
+ struct net_device *dev;
+ struct mlxsw_m *mlxsw_m;
+ u16 local_port;
+ u8 slot_index;
+ u8 module;
+ u8 module_offset;
+};
+
+static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_m->base_mac);
+ return 0;
+}
+
+static int mlxsw_m_port_open(struct net_device *dev)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ return mlxsw_env_module_port_up(mlxsw_m->core, 0,
+ mlxsw_m_port->module);
+}
+
+static int mlxsw_m_port_stop(struct net_device *dev)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module);
+ return 0;
+}
+
+static struct devlink_port *
+mlxsw_m_port_get_devlink_port(struct net_device *dev)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ return mlxsw_core_port_devlink_port_get(mlxsw_m->core,
+ mlxsw_m_port->local_port);
+}
+
+static const struct net_device_ops mlxsw_m_port_netdev_ops = {
+ .ndo_open = mlxsw_m_port_open,
+ .ndo_stop = mlxsw_m_port_stop,
+ .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
+};
+
+static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ strscpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
+ sizeof(drvinfo->driver));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_m->bus_info->fw_rev.major,
+ mlxsw_m->bus_info->fw_rev.minor,
+ mlxsw_m->bus_info->fw_rev.subminor);
+ strscpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+static int mlxsw_m_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_info(netdev, core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module, modinfo);
+}
+
+static int
+mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_eeprom(netdev, core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module, ee, data);
+}
+
+static int
+mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ page, extack);
+}
+
+static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ flags);
+}
+
+static int
+mlxsw_m_get_module_power_mode(struct net_device *netdev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ params, extack);
+}
+
+static int
+mlxsw_m_set_module_power_mode(struct net_device *netdev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ params->policy, extack);
+}
+
+static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_m_module_get_drvinfo,
+ .get_module_info = mlxsw_m_get_module_info,
+ .get_module_eeprom = mlxsw_m_get_module_eeprom,
+ .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .reset = mlxsw_m_reset,
+ .get_module_power_mode = mlxsw_m_get_module_power_mode,
+ .set_module_power_mode = mlxsw_m_set_module_power_mode,
+};
+
+static int
+mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
+ u8 *p_module, u8 *p_width, u8 *p_slot_index)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
+
+ return 0;
+}
+
+static int
+mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+ u8 addr[ETH_ALEN];
+ int err;
+
+ mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1 +
+ mlxsw_m_port->module_offset);
+ return 0;
+}
+
+static bool mlxsw_m_port_created(struct mlxsw_m *mlxsw_m, u16 local_port)
+{
+ return mlxsw_m->ports[local_port];
+}
+
+static int
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 slot_index,
+ u8 module)
+{
+ struct mlxsw_m_port *mlxsw_m_port;
+ struct net_device *dev;
+ int err;
+
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, slot_index,
+ module + 1, false, 0, false,
+ 0, mlxsw_m->base_mac,
+ sizeof(mlxsw_m->base_mac));
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_m_port));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_m->core));
+ mlxsw_m_port = netdev_priv(dev);
+ mlxsw_m_port->dev = dev;
+ mlxsw_m_port->mlxsw_m = mlxsw_m;
+ mlxsw_m_port->local_port = local_port;
+ mlxsw_m_port->module = module;
+ mlxsw_m_port->slot_index = slot_index;
+ /* Add module offset for line card. Offset for main board iz zero.
+ * For line card in slot #n offset is calculated as (#n - 1)
+ * multiplied by maximum modules number, which could be found on a line
+ * card.
+ */
+ mlxsw_m_port->module_offset = mlxsw_m_port->slot_index ?
+ (mlxsw_m_port->slot_index - 1) *
+ mlxsw_m->max_modules_per_slot : 0;
+
+ dev->netdev_ops = &mlxsw_m_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_m_port_ethtool_ops;
+
+ err = mlxsw_m_port_dev_addr_get(mlxsw_m_port);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Port %d: Unable to get port mac address\n",
+ mlxsw_m_port->local_port);
+ goto err_dev_addr_get;
+ }
+
+ netif_carrier_off(dev);
+ mlxsw_m->ports[local_port] = mlxsw_m_port;
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_m_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port,
+ mlxsw_m_port, dev);
+
+ return 0;
+
+err_register_netdev:
+ mlxsw_m->ports[local_port] = NULL;
+err_dev_addr_get:
+ free_netdev(dev);
+err_alloc_etherdev:
+ mlxsw_core_port_fini(mlxsw_m->core, local_port);
+ return err;
+}
+
+static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
+{
+ struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port];
+
+ mlxsw_core_port_clear(mlxsw_m->core, local_port, mlxsw_m);
+ unregister_netdev(mlxsw_m_port->dev); /* This calls ndo_stop */
+ mlxsw_m->ports[local_port] = NULL;
+ free_netdev(mlxsw_m_port->dev);
+ mlxsw_core_port_fini(mlxsw_m->core, local_port);
+}
+
+static int*
+mlxsw_m_port_mapping_get(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
+{
+ return &mlxsw_m->line_cards[slot_index]->module_to_port[module];
+}
+
+static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
+ u8 *last_module)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ u8 module, width, slot_index;
+ int *module_to_port;
+ int err;
+
+ /* Fill out to local port mapping array */
+ err = mlxsw_m_port_module_info_get(mlxsw_m, local_port, &module,
+ &width, &slot_index);
+ if (err)
+ return err;
+
+ /* Skip if line card has been already configured */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ return 0;
+ if (!width)
+ return 0;
+ /* Skip, if port belongs to the cluster */
+ if (module == *last_module)
+ return 0;
+ *last_module = module;
+
+ if (WARN_ON_ONCE(module >= max_ports))
+ return -EINVAL;
+ mlxsw_env_module_port_map(mlxsw_m->core, slot_index, module);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, module);
+ *module_to_port = local_port;
+
+ return 0;
+}
+
+static void
+mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
+{
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index,
+ module);
+ *module_to_port = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, slot_index, module);
+}
+
+static int mlxsw_m_linecards_init(struct mlxsw_m *mlxsw_m)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 num_of_modules;
+ int i, j, err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &num_of_modules,
+ &mlxsw_m->num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ if (mlxsw_m->num_of_slots)
+ mlxsw_m->max_modules_per_slot =
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl);
+ else
+ mlxsw_m->max_modules_per_slot = num_of_modules;
+ /* Add slot for main board. */
+ mlxsw_m->num_of_slots += 1;
+
+ mlxsw_m->ports = kcalloc(max_ports, sizeof(*mlxsw_m->ports),
+ GFP_KERNEL);
+ if (!mlxsw_m->ports)
+ return -ENOMEM;
+
+ mlxsw_m->line_cards = kcalloc(mlxsw_m->num_of_slots,
+ sizeof(*mlxsw_m->line_cards),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards) {
+ err = -ENOMEM;
+ goto err_kcalloc;
+ }
+
+ for (i = 0; i < mlxsw_m->num_of_slots; i++) {
+ mlxsw_m->line_cards[i] =
+ kzalloc(struct_size(mlxsw_m->line_cards[i],
+ module_to_port,
+ mlxsw_m->max_modules_per_slot),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards[i]) {
+ err = -ENOMEM;
+ goto err_kmalloc_array;
+ }
+
+ /* Invalidate the entries of module to local port mapping array. */
+ for (j = 0; j < mlxsw_m->max_modules_per_slot; j++)
+ mlxsw_m->line_cards[i]->module_to_port[j] = -1;
+ }
+
+ return 0;
+
+err_kmalloc_array:
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+ kfree(mlxsw_m->line_cards);
+err_kcalloc:
+ kfree(mlxsw_m->ports);
+ return err;
+}
+
+static void mlxsw_m_linecards_fini(struct mlxsw_m *mlxsw_m)
+{
+ int i = mlxsw_m->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+ kfree(mlxsw_m->line_cards);
+ kfree(mlxsw_m->ports);
+}
+
+static void
+mlxsw_m_linecard_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int i;
+
+ for (i = mlxsw_m->max_modules_per_slot - 1; i >= 0; i--) {
+ int *module_to_port;
+
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0)
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
+ }
+}
+
+static int
+mlxsw_m_linecard_ports_create(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int *module_to_port;
+ int i, err;
+
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0) {
+ err = mlxsw_m_port_create(mlxsw_m, *module_to_port,
+ slot_index, i);
+ if (err)
+ goto err_port_create;
+ /* Mark slot as active */
+ if (!mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = true;
+ }
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--) {
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ /* Mark slot as inactive */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = false;
+ }
+ }
+ return err;
+}
+
+static void
+mlxsw_m_linecard_ports_remove(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m,
+ slot_index, i);
+
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
+ }
+ }
+}
+
+static int mlxsw_m_ports_module_map(struct mlxsw_m *mlxsw_m)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ u8 last_module = max_ports;
+ int i, err;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+{
+ int err;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, 0);
+ if (err)
+ goto err_linecard_ports_create;
+
+ return 0;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, 0);
+
+ return err;
+}
+
+static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+{
+ mlxsw_m_linecard_ports_remove(mlxsw_m, 0);
+}
+
+static void
+mlxsw_m_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_linecard *linecard_priv = priv;
+ struct mlxsw_m_line_card *linecard;
+
+ linecard = mlxsw_m->line_cards[linecard_priv->slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, linecard_priv->slot_index);
+ linecard->active = false;
+}
+
+static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_m->bus_info->fw_rev;
+
+ /* Validate driver and FW are compatible.
+ * Do not check major version, since it defines chip type, while
+ * driver is supposed to support any type.
+ */
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, &mlxsw_m_fw_rev))
+ return 0;
+
+ dev_err(mlxsw_m->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, rev->major,
+ mlxsw_m_fw_rev.minor, mlxsw_m_fw_rev.subminor);
+
+ return -EINVAL;
+}
+
+static void
+mlxsw_m_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+ int err;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+ /* Skip if line card has been already configured during init */
+ if (linecard->active)
+ return;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, slot_index);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create port for line card at slot %d\n",
+ slot_index);
+ goto err_linecard_ports_create;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, slot_index);
+}
+
+static void
+mlxsw_m_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, slot_index);
+ linecard->active = false;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_m_event_ops = {
+ .got_active = mlxsw_m_got_active,
+ .got_inactive = mlxsw_m_got_inactive,
+};
+
+static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ mlxsw_m->core = mlxsw_core;
+ mlxsw_m->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_m_fw_rev_validate(mlxsw_m);
+ if (err)
+ return err;
+
+ err = mlxsw_m_base_mac_get(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
+ err = mlxsw_m_linecards_init(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create line cards\n");
+ return err;
+ }
+
+ err = mlxsw_linecards_event_ops_register(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to register line cards operations\n");
+ goto linecards_event_ops_register;
+ }
+
+ err = mlxsw_m_ports_create(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
+ return 0;
+
+err_ports_create:
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+linecards_event_ops_register:
+ mlxsw_m_linecards_fini(mlxsw_m);
+ return err;
+}
+
+static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_m_ports_remove(mlxsw_m);
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ mlxsw_m_linecards_fini(mlxsw_m);
+}
+
+static const struct mlxsw_config_profile mlxsw_m_config_profile;
+
+static struct mlxsw_driver mlxsw_m_driver = {
+ .kind = mlxsw_m_driver_name,
+ .priv_size = sizeof(struct mlxsw_m),
+ .init = mlxsw_m_init,
+ .fini = mlxsw_m_fini,
+ .ports_remove_selected = mlxsw_m_ports_remove_selected,
+ .profile = &mlxsw_m_config_profile,
+};
+
+static const struct i2c_device_id mlxsw_m_i2c_id[] = {
+ { "mlxsw_minimal", 0},
+ { },
+};
+
+static struct i2c_driver mlxsw_m_i2c_driver = {
+ .driver.name = "mlxsw_minimal",
+ .class = I2C_CLASS_HWMON,
+ .id_table = mlxsw_m_i2c_id,
+};
+
+static int __init mlxsw_m_module_init(void)
+{
+ int err;
+
+ err = mlxsw_core_driver_register(&mlxsw_m_driver);
+ if (err)
+ return err;
+
+ err = mlxsw_i2c_driver_register(&mlxsw_m_i2c_driver);
+ if (err)
+ goto err_i2c_driver_register;
+
+ return 0;
+
+err_i2c_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_m_driver);
+
+ return err;
+}
+
+static void __exit mlxsw_m_module_exit(void)
+{
+ mlxsw_i2c_driver_unregister(&mlxsw_m_i2c_driver);
+ mlxsw_core_driver_unregister(&mlxsw_m_driver);
+}
+
+module_init(mlxsw_m_module_init);
+module_exit(mlxsw_m_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox minimal driver");
+MODULE_DEVICE_TABLE(i2c, mlxsw_m_i2c_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 000000000..51eea1f05
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,2068 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/string.h>
+
+#include "pci_hw.h"
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+#include "resources.h"
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+ ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+ MLXSW_PCI_QUEUE_TYPE_SDQ,
+ MLXSW_PCI_QUEUE_TYPE_RDQ,
+ MLXSW_PCI_QUEUE_TYPE_CQ,
+ MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+ MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+ MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+ 0, /* unused */
+ 0, /* unused */
+ MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+ char *buf;
+ dma_addr_t mapaddr;
+ size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+ char *elem; /* pointer to actual dma mapped element mem chunk */
+ union {
+ struct {
+ struct sk_buff *skb;
+ } sdq;
+ struct {
+ struct sk_buff *skb;
+ } rdq;
+ } u;
+};
+
+struct mlxsw_pci_queue {
+ spinlock_t lock; /* for queue accesses */
+ struct mlxsw_pci_mem_item mem_item;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u16 producer_counter;
+ u16 consumer_counter;
+ u16 count; /* number of elements in queue */
+ u8 num; /* queue number */
+ u8 elem_size; /* size of one element */
+ enum mlxsw_pci_queue_type type;
+ struct tasklet_struct tasklet; /* queue processing tasklet */
+ struct mlxsw_pci *pci;
+ union {
+ struct {
+ u32 comp_sdq_count;
+ u32 comp_rdq_count;
+ enum mlxsw_pci_cqe_v v;
+ } cq;
+ struct {
+ u32 ev_cmd_count;
+ u32 ev_comp_count;
+ u32 ev_other_count;
+ } eq;
+ } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+ struct mlxsw_pci_queue *q;
+ u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ u64 free_running_clock_offset;
+ u64 utc_sec_offset;
+ u64 utc_nsec_offset;
+ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+ u32 doorbell_offset;
+ struct mlxsw_core *core;
+ struct {
+ struct mlxsw_pci_mem_item *items;
+ unsigned int count;
+ } fw_area;
+ struct {
+ struct mlxsw_pci_mem_item out_mbox;
+ struct mlxsw_pci_mem_item in_mbox;
+ struct mutex lock; /* Lock access to command registers */
+ bool nopoll;
+ wait_queue_head_t wait;
+ bool wait_done;
+ struct {
+ u8 status;
+ u64 out_param;
+ } comp;
+ } cmd;
+ struct mlxsw_bus_info bus_info;
+ const struct pci_device_id *id;
+ enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
+ u8 num_sdq_cqs; /* Number of CQs used for SDQs */
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+ tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+ size_t elem_size, int elem_index)
+{
+ return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->producer_counter & (q->count - 1);
+
+ if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
+ return NULL;
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->consumer_counter & (q->count - 1);
+
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+ return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+ return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+ return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_type_offset[q->type],
+ q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_arm_type_offset[q->type],
+ q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+ q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+ int page_index)
+{
+ return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int tclass;
+ int lp;
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+ tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
+ MLXSW_PCI_SDQ_CTL_TC;
+ lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
+
+ /* Set CQ of same number of this SDQ. */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, char *frag_data, size_t frag_len,
+ int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ dma_addr_t mapaddr;
+
+ mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
+ dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+ return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+ dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+ if (!frag_len)
+ return;
+ dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ size_t buf_len = MLXSW_PORT_MAX_MTU;
+ char *wqe = elem_info->elem;
+ struct sk_buff *skb;
+ int err;
+
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+ goto err_frag_map;
+
+ elem_info->u.rdq.skb = skb;
+ return 0;
+
+err_frag_map:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ struct sk_buff *skb;
+ char *wqe;
+
+ skb = elem_info->u.rdq.skb;
+ wqe = elem_info->elem;
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this RDQ with base
+ * above SDQ count as the lower ones are assigned to SDQs.
+ */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ BUG_ON(!elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err)
+ goto rollback;
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ }
+
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+ return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+}
+
+static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ q->u.cq.v = mlxsw_pci->max_cqe_ver;
+
+ if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
+ q->num < mlxsw_pci->num_sdq_cqs &&
+ !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
+ q->u.cq.v = MLXSW_PCI_CQE_V1;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
+ }
+
+ if (q->u.cq.v == MLXSW_PCI_CQE_V1)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
+ else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
+
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
+ ptrdiff_t off)
+{
+ return ioread32be(mlxsw_pci->hw_addr + off);
+}
+
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ u8 ts_type;
+
+ if (cqe_v != MLXSW_PCI_CQE_V2)
+ return;
+
+ ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
+
+ if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
+ ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
+ return;
+
+ mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+ mlxsw_skb_cb(skb)->cqe_ts.nsec =
+ mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ struct mlxsw_tx_info tx_info;
+ char *wqe;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
+ skb = elem_info->u.sdq.skb;
+ wqe = elem_info->elem;
+ for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+
+ if (unlikely(!tx_info.is_emad &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
+ tx_info.local_port);
+ skb = NULL;
+ }
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+ elem_info->u.sdq.skb = NULL;
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+ spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
+ const char *cqe)
+{
+ struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
+
+ if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
+ cb->rx_md_info.tx_port_is_lag = true;
+ cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
+ cb->rx_md_info.tx_lag_port_index =
+ mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
+ } else {
+ cb->rx_md_info.tx_port_is_lag = false;
+ cb->rx_md_info.tx_sys_port =
+ mlxsw_pci_cqe2_tx_system_port_get(cqe);
+ }
+
+ if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
+ cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
+ cb->rx_md_info.tx_port_valid = 1;
+ else
+ cb->rx_md_info.tx_port_valid = 0;
+}
+
+static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
+{
+ struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
+
+ cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
+ if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
+ cb->rx_md_info.tx_congestion_valid = 1;
+ else
+ cb->rx_md_info.tx_congestion_valid = 0;
+ cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
+
+ cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
+ if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
+ cb->rx_md_info.latency_valid = 1;
+ else
+ cb->rx_md_info.latency_valid = 0;
+
+ cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
+ if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
+ cb->rx_md_info.tx_tc_valid = 1;
+ else
+ cb->rx_md_info.tx_tc_valid = 0;
+
+ mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ struct mlxsw_rx_info rx_info = {};
+ char wqe[MLXSW_PCI_WQE_SIZE];
+ struct sk_buff *skb;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.rdq.skb;
+ memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err) {
+ dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ goto out;
+ }
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+ if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
+ rx_info.is_lag = true;
+ rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
+ rx_info.lag_port_index =
+ mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
+ } else {
+ rx_info.is_lag = false;
+ rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+ }
+
+ rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+ if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
+ rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
+ u32 cookie_index = 0;
+
+ if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
+ cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
+ } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
+ rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
+ mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
+ rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
+ mlxsw_pci_cqe_rdq_md_init(skb, cqe);
+ } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
+ mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
+ mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
+ }
+
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
+ byte_count -= ETH_FCS_LEN;
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+out:
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
+}
+
+static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
+{
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ char *cqe;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+
+ if (sendq) {
+ struct mlxsw_pci_queue *sdq;
+
+ sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, q->u.cq.v, ncqe);
+ q->u.cq.comp_sdq_count++;
+ } else {
+ struct mlxsw_pci_queue *rdq;
+
+ rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, q->u.cq.v, ncqe);
+ q->u.cq.comp_rdq_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items)
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+}
+
+static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
+ MLXSW_PCI_CQE01_COUNT;
+}
+
+static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
+ MLXSW_PCI_CQE01_SIZE;
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+ mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+ mlxsw_pci->cmd.comp.out_param =
+ ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+ mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+ mlxsw_pci->cmd.wait_done = true;
+ wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = mlxsw_pci_eqe_owner_get(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
+}
+
+static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
+{
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
+ char *eqe;
+ u8 cqn;
+ bool cq_handle = false;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ memset(&active_cqns, 0, sizeof(active_cqns));
+
+ while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+
+ /* Command interface completion events are always received on
+ * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+ * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+ */
+ switch (q->num) {
+ case MLXSW_PCI_EQ_ASYNC_NUM:
+ mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+ q->u.eq.ev_cmd_count++;
+ break;
+ case MLXSW_PCI_EQ_COMP_NUM:
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
+ cq_handle = true;
+ q->u.eq.ev_comp_count++;
+ break;
+ default:
+ q->u.eq.ev_other_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+
+ if (!cq_handle)
+ return;
+ for_each_set_bit(cqn, active_cqns, cq_count) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+}
+
+struct mlxsw_pci_queue_ops {
+ const char *name;
+ enum mlxsw_pci_queue_type type;
+ void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
+ int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q);
+ void (*fini)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
+ void (*tasklet)(struct tasklet_struct *t);
+ u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
+ u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
+ u16 elem_count;
+ u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
+ .init = mlxsw_pci_sdq_init,
+ .fini = mlxsw_pci_sdq_fini,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
+ .init = mlxsw_pci_rdq_init,
+ .fini = mlxsw_pci_rdq_fini,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .pre_init = mlxsw_pci_cq_pre_init,
+ .init = mlxsw_pci_cq_init,
+ .fini = mlxsw_pci_cq_fini,
+ .tasklet = mlxsw_pci_cq_tasklet,
+ .elem_count_f = mlxsw_pci_cq_elem_count,
+ .elem_size_f = mlxsw_pci_cq_elem_size
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_EQ,
+ .init = mlxsw_pci_eq_init,
+ .fini = mlxsw_pci_eq_fini,
+ .tasklet = mlxsw_pci_eq_tasklet,
+ .elem_count = MLXSW_PCI_EQE_COUNT,
+ .elem_size = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q, u8 q_num)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+ int i;
+ int err;
+
+ q->num = q_num;
+ if (q_ops->pre_init)
+ q_ops->pre_init(mlxsw_pci, q);
+
+ spin_lock_init(&q->lock);
+ q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
+ q_ops->elem_count;
+ q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
+ q_ops->elem_size;
+ q->type = q_ops->type;
+ q->pci = mlxsw_pci;
+
+ if (q_ops->tasklet)
+ tasklet_setup(&q->tasklet, q_ops->tasklet);
+
+ mem_item->size = MLXSW_PCI_AQ_SIZE;
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size, &mem_item->mapaddr,
+ GFP_KERNEL);
+ if (!mem_item->buf)
+ return -ENOMEM;
+
+ q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+ if (!q->elem_info) {
+ err = -ENOMEM;
+ goto err_elem_info_alloc;
+ }
+
+ /* Initialize dma mapped elements info elem_info for
+ * future easy access.
+ */
+ for (i = 0; i < q->count; i++) {
+ struct mlxsw_pci_queue_elem_info *elem_info;
+
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ elem_info->elem =
+ __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
+ }
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = q_ops->init(mlxsw_pci, mbox, q);
+ if (err)
+ goto err_q_ops_init;
+ return 0;
+
+err_q_ops_init:
+ kfree(q->elem_info);
+err_elem_info_alloc:
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+ q_ops->fini(mlxsw_pci, q);
+ kfree(q->elem_info);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ u8 num_qs)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+ int i;
+ int err;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+ if (!queue_group->q)
+ return -ENOMEM;
+
+ for (i = 0; i < num_qs; i++) {
+ err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+ &queue_group->q[i], i);
+ if (err)
+ goto err_queue_init;
+ }
+ queue_group->count = num_qs;
+
+ return 0;
+
+err_queue_init:
+ for (i--; i >= 0; i--)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+ return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+ int i;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ for (i = 0; i < queue_group->count; i++)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ u8 num_sdqs;
+ u8 sdq_log2sz;
+ u8 num_rdqs;
+ u8 rdq_log2sz;
+ u8 num_cqs;
+ u8 cq_log2sz;
+ u8 cqv2_log2sz;
+ u8 num_eqs;
+ u8 eq_log2sz;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+
+ num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+ sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+ num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+ rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+ num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+ cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
+ num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+ eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+ if (num_sdqs + num_rdqs > num_cqs ||
+ num_sdqs < MLXSW_PCI_SDQS_MIN ||
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
+ dev_err(&pdev->dev, "Unsupported number of queues\n");
+ return -EINVAL;
+ }
+
+ if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
+ (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
+ (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
+ (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+ return -EINVAL;
+ }
+
+ mlxsw_pci->num_sdq_cqs = num_sdqs;
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+ num_eqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize event queues\n");
+ return err;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+ num_cqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+ goto err_cqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+ num_sdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+ goto err_sdqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+ num_rdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+ goto err_rdqs_init;
+ }
+
+ /* We have to poll in command interface until queues are initialized */
+ mlxsw_pci->cmd.nopoll = true;
+ return 0;
+
+err_rdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+ return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci->cmd.nopoll = false;
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+ char *mbox, int index,
+ const struct mlxsw_swid_config *swid)
+{
+ u8 mask = 0;
+
+ if (swid->used_type) {
+ mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+ mbox, index, swid->type);
+ mask |= 1;
+ }
+ if (swid->used_properties) {
+ mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+ mbox, index, swid->properties);
+ mask |= 2;
+ }
+ mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *res)
+{
+ u64 single_size, double_size, linear_size;
+ int err;
+
+ err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+ &single_size, &double_size,
+ &linear_size);
+ if (err)
+ return err;
+
+ MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
+ MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
+ MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
+
+ return 0;
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *res)
+{
+ int i;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ if (profile->used_max_vepa_channels) {
+ mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+ mbox, profile->max_vepa_channels);
+ }
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
+ profile->max_lag);
+ }
+ if (profile->used_max_mid) {
+ mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_mid_set(
+ mbox, profile->max_mid);
+ }
+ if (profile->used_max_pgt) {
+ mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pgt_set(
+ mbox, profile->max_pgt);
+ }
+ if (profile->used_max_system_port) {
+ mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_system_port_set(
+ mbox, profile->max_system_port);
+ }
+ if (profile->used_max_vlan_groups) {
+ mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+ mbox, profile->max_vlan_groups);
+ }
+ if (profile->used_max_regions) {
+ mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_regions_set(
+ mbox, profile->max_regions);
+ }
+ if (profile->used_flood_tables) {
+ mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+ mbox, profile->max_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+ mbox, profile->max_vid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
+ mbox, profile->max_fid_offset_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
+ mbox, profile->fid_offset_flood_table_size);
+ mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
+ mbox, profile->max_fid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
+ mbox, profile->fid_flood_table_size);
+ }
+ if (profile->used_flood_mode) {
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(
+ mbox, profile->flood_mode);
+ }
+ if (profile->used_max_ib_mc) {
+ mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+ mbox, profile->max_ib_mc);
+ }
+ if (profile->used_max_pkey) {
+ mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pkey_set(
+ mbox, profile->max_pkey);
+ }
+ if (profile->used_ar_sec) {
+ mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ar_sec_set(
+ mbox, profile->ar_sec);
+ }
+ if (profile->used_adaptive_routing_group_cap) {
+ mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+ mbox, profile->adaptive_routing_group_cap);
+ }
+ if (profile->used_ubridge) {
+ mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
+ profile->ubridge);
+ }
+ if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
+ err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
+ if (err)
+ return err;
+
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
+ MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
+ MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
+ MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
+ }
+
+ for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+ mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+ &profile->swid_config[i]);
+
+ if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
+ }
+
+ if (profile->used_cqe_time_stamp_type) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
+ profile->cqe_time_stamp_type);
+ }
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+ mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+ mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+ return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ u16 num_pages)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int nent = 0;
+ int i;
+ int err;
+
+ mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+ GFP_KERNEL);
+ if (!mlxsw_pci->fw_area.items)
+ return -ENOMEM;
+ mlxsw_pci->fw_area.count = num_pages;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ for (i = 0; i < num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ mem_item->size = MLXSW_PCI_PAGE_SIZE;
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size,
+ &mem_item->mapaddr, GFP_KERNEL);
+ if (!mem_item->buf) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
+ if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ nent = 0;
+ mlxsw_cmd_mbox_zero(mbox);
+ }
+ }
+
+ if (nent) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ }
+
+ return 0;
+
+err_cmd_map_fa:
+err_alloc:
+ for (i--; i >= 0; i--) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+ return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+
+ mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+ for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_id;
+ struct mlxsw_pci_queue *q;
+ int i;
+
+ for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ int err = 0;
+
+ mbox->size = MLXSW_CMD_MBOX_SIZE;
+ mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr, GFP_KERNEL);
+ if (!mbox->buf) {
+ dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+
+ dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
+}
+
+static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id,
+ u32 *p_sys_status)
+{
+ unsigned long end;
+ u32 val;
+
+ /* We must wait for the HW to become responsive. */
+ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
+ end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ do {
+ val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
+ if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
+ return 0;
+ cond_resched();
+ } while (time_before(jiffies, end));
+
+ *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
+
+ return -EBUSY;
+}
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ u32 sys_status;
+ int err;
+
+ err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
+ sys_status);
+ return err;
+ }
+
+ mlxsw_reg_mrsr_pack(mrsr_pl);
+ err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ if (err)
+ return err;
+
+ err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
+ sys_status);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_res *res)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char *mbox;
+ u16 num_pages;
+ int err;
+
+ mlxsw_pci->core = mlxsw_core;
+
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+
+ err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ if (err)
+ goto err_sw_reset;
+
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_alloc_irq;
+ }
+
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto err_query_fw;
+
+ mlxsw_pci->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+ dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+ err = -EINVAL;
+ goto err_iface_rev;
+ }
+ if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+ err = -EINVAL;
+ goto err_doorbell_page_bar;
+ }
+
+ mlxsw_pci->doorbell_offset =
+ mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_fr_rn_clk_bar;
+ }
+
+ mlxsw_pci->free_running_clock_offset =
+ mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_sec_bar;
+ }
+
+ mlxsw_pci->utc_sec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_nsec_bar;
+ }
+
+ mlxsw_pci->utc_nsec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
+ num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+ err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+ if (err)
+ goto err_fw_area_init;
+
+ err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+ if (err)
+ goto err_boardinfo;
+
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_query_resources;
+
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
+ else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
+ else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
+ } else {
+ dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
+ goto err_cqe_v_check;
+ }
+
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
+ if (err)
+ goto err_config_profile;
+
+ /* Some resources depend on unified bridge model, which is configured
+ * as part of config_profile. Query the resources again to get correct
+ * values.
+ */
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_requery_resources;
+
+ err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+ if (err)
+ goto err_aqs_init;
+
+ err = request_irq(pci_irq_vector(pdev, 0),
+ mlxsw_pci_eq_irq_handler, 0,
+ mlxsw_pci->bus_info.device_kind, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ request failed\n");
+ goto err_request_eq_irq;
+ }
+
+ goto mbox_put;
+
+err_request_eq_irq:
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_requery_resources:
+err_config_profile:
+err_cqe_v_check:
+err_query_resources:
+err_boardinfo:
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
+err_fr_rn_clk_bar:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+err_alloc_irq:
+err_sw_reset:
+mbox_put:
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_tx_info *tx_info)
+{
+ u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 sdqn;
+
+ if (tx_info->is_emad) {
+ sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
+ } else {
+ BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
+ sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
+ }
+
+ return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+ return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ int i;
+ int err;
+
+ if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ }
+
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ spin_lock_bh(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ if (!elem_info) {
+ /* queue is full */
+ err = -EAGAIN;
+ goto unlock;
+ }
+ mlxsw_skb_cb(skb)->tx_info = *tx_info;
+ elem_info->u.sdq.skb = skb;
+
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+ mlxsw_pci_wqe_lp_set(wqe, 0);
+ mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (err)
+ goto unmap_frags;
+ }
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Set unused sq entries byte count to zero. */
+ for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+ /* Everything is set up, ring producer doorbell to get HW going */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ goto unlock;
+
+unmap_frags:
+ for (; i >= 0; i--)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+ spin_unlock_bh(&q->lock);
+ return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
+ bool evreq = mlxsw_pci->cmd.nopoll;
+ unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+ bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ int err;
+
+ *p_status = MLXSW_CMD_STATUS_OK;
+
+ err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+ if (err)
+ return err;
+
+ if (in_mbox) {
+ memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+ in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+ }
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
+
+ if (out_mbox)
+ out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+ mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+ *p_wait_done = false;
+
+ wmb(); /* all needs to be written before we write control register */
+ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+ MLXSW_PCI_CIR_CTRL_GO_BIT |
+ (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+ (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+ opcode);
+
+ if (!evreq) {
+ unsigned long end;
+
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
+ } else {
+ wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+ *p_status = mlxsw_pci->cmd.comp.status;
+ }
+
+ err = 0;
+ if (*p_wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && out_mbox && out_mbox_direct) {
+ /* Some commands don't use output param as address to mailbox
+ * but they store output directly into registers. In that case,
+ * copy registers into mbox buffer.
+ */
+ __be32 tmp;
+
+ if (!evreq) {
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+ }
+ } else if (!err && out_mbox) {
+ memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+ }
+
+ mutex_unlock(&mlxsw_pci->cmd.lock);
+
+ return err;
+}
+
+static u32 mlxsw_pci_read_frc_h(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ u64 frc_offset_h;
+
+ frc_offset_h = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
+}
+
+static u32 mlxsw_pci_read_frc_l(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ u64 frc_offset_l;
+
+ frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
+}
+
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+ .read_frc_h = mlxsw_pci_read_frc_h,
+ .read_frc_l = mlxsw_pci_read_frc_l,
+ .read_utc_sec = mlxsw_pci_read_utc_sec,
+ .read_utc_nsec = mlxsw_pci_read_utc_nsec,
+ .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
+};
+
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto err_in_mbox_alloc;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ return 0;
+
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+ return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ const char *driver_name = dev_driver_string(&pdev->dev);
+ struct mlxsw_pci *mlxsw_pci;
+ int err;
+
+ mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+ if (!mlxsw_pci)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
+ goto err_pci_resource_len_check;
+ }
+
+ mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!mlxsw_pci->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ mlxsw_pci->pdev = pdev;
+ pci_set_drvdata(pdev, mlxsw_pci);
+
+ err = mlxsw_pci_cmd_init(mlxsw_pci);
+ if (err)
+ goto err_pci_cmd_init;
+
+ mlxsw_pci->bus_info.device_kind = driver_name;
+ mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+ mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->bus_info.read_clock_capable = true;
+ mlxsw_pci->id = id;
+
+ err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+ &mlxsw_pci_bus, mlxsw_pci, false,
+ NULL, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register bus device\n");
+ goto err_bus_device_register;
+ }
+
+ return 0;
+
+err_bus_device_register:
+ mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
+ iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(mlxsw_pci);
+ return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_cmd_fini(mlxsw_pci);
+ iounmap(mlxsw_pci->hw_addr);
+ pci_release_regions(mlxsw_pci->pdev);
+ pci_disable_device(mlxsw_pci->pdev);
+ kfree(mlxsw_pci);
+}
+
+int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
+{
+ pci_driver->probe = mlxsw_pci_probe;
+ pci_driver->remove = mlxsw_pci_remove;
+ pci_driver->shutdown = mlxsw_pci_remove;
+ return pci_register_driver(pci_driver);
+}
+EXPORT_SYMBOL(mlxsw_pci_driver_register);
+
+void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
+{
+ pci_unregister_driver(pci_driver);
+}
+EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
+
+static int __init mlxsw_pci_module_init(void)
+{
+ return 0;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 000000000..cacc2f9fa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM4 0xcf80
+
+#if IS_ENABLED(CONFIG_MLXSW_PCI)
+
+int mlxsw_pci_driver_register(struct pci_driver *pci_driver);
+void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver);
+
+#else
+
+static inline int
+mlxsw_pci_driver_register(struct pci_driver *pci_driver)
+{
+ return 0;
+}
+
+static inline void
+mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
new file mode 100644
index 000000000..7cdf0ce24
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_PCI_HW_H
+#define _MLXSW_PCI_HW_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE 4096
+
+#define MLXSW_PCI_CIR_BASE 0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 400
+#define MLXSW_PCI_FW_READY 0xA1844
+#define MLXSW_PCI_FW_READY_MASK 0xFFFF
+#define MLXSW_PCI_FW_READY_MAGIC 0x5E
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
+ ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_CQS_MAX 96
+#define MLXSW_PCI_EQS_COUNT 2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM 1
+
+#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
+#define MLXSW_PCI_SDQ_EMAD_INDEX 0
+#define MLXSW_PCI_SDQ_EMAD_TC 0
+#define MLXSW_PCI_SDQ_CTL_TC 3
+
+#define MLXSW_PCI_AQ_PAGES 8
+#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
+#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
+#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
+#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES 3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+enum mlxsw_pci_cqe_v {
+ MLXSW_PCI_CQE_V0,
+ MLXSW_PCI_CQE_V1,
+ MLXSW_PCI_CQE_V2,
+};
+
+#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
+static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
+{ \
+ switch (v) { \
+ default: \
+ case MLXSW_PCI_CQE_V0: \
+ return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
+ case MLXSW_PCI_CQE_V1: \
+ return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
+ case MLXSW_PCI_CQE_V2: \
+ return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
+ } \
+} \
+static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
+ char *cqe, u32 val) \
+{ \
+ switch (v) { \
+ default: \
+ case MLXSW_PCI_CQE_V0: \
+ mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
+ break; \
+ case MLXSW_PCI_CQE_V1: \
+ mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
+ break; \
+ case MLXSW_PCI_CQE_V2: \
+ mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
+ break; \
+ } \
+}
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
+MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
+mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
+
+/* pci_cqe_system_port/lag_id
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
+MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
+mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
+MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
+MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
+mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+#define MLXSW_PCI_CQE2_MIRROR_CONG_INVALID 0xFFFF
+
+/* pci_cqe_mirror_cong_high
+ * Congestion level in units of 8KB of the egress traffic class of the original
+ * packet that does mirroring to the CPU. Value of 0xFFFF means that the
+ * congestion level is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_cong_high, 0x08, 16, 4);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 10);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
+MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
+mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
+MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
+mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
+MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
+mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
+MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
+mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+
+/* pci_cqe_time_stamp_low
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_low, 0x0C, 16, 16);
+
+#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
+
+/* pci_cqe_mirror_tclass
+ * The egress traffic class of the original packet that does mirroring to the
+ * CPU. Value of 0x1F means that the traffic class is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_tclass, 0x10, 27, 5);
+
+/* pci_cqe_tx_lag
+ * The Tx port of a packet that is mirrored / sampled to the CPU is a LAG.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag, 0x10, 24, 1);
+
+/* pci_cqe_tx_lag_subport
+ * The port index within the LAG of a packet that is mirrored / sampled to the
+ * CPU. Reserved when tx_lag is 0.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag_subport, 0x10, 16, 8);
+
+#define MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT 0xFFFE
+#define MLXSW_PCI_CQE2_TX_PORT_INVALID 0xFFFF
+
+/* pci_cqe_tx_lag_id
+ * The Tx LAG ID of the original packet that is mirrored / sampled to the CPU.
+ * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx LAG ID
+ * is invalid. Reserved when tx_lag is 0.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_lag_id, 0x10, 0, 16);
+
+/* pci_cqe_tx_system_port
+ * The Tx port of the original packet that is mirrored / sampled to the CPU.
+ * Value of 0xFFFE means multi-port. Value fo 0xFFFF means that the Tx port is
+ * invalid. Reserved when tx_lag is 1.
+ */
+MLXSW_ITEM32(pci, cqe2, tx_system_port, 0x10, 0, 16);
+
+/* pci_cqe_mirror_cong_low
+ * Congestion level in units of 8KB of the egress traffic class of the original
+ * packet that does mirroring to the CPU. Value of 0xFFFF means that the
+ * congestion level is invalid.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_cong_low, 0x14, 20, 12);
+
+#define MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT 13 /* Units of 8KB. */
+
+static inline u16 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe)
+{
+ u16 cong_high = mlxsw_pci_cqe2_mirror_cong_high_get(cqe);
+ u16 cong_low = mlxsw_pci_cqe2_mirror_cong_low_get(cqe);
+
+ return cong_high << 12 | cong_low;
+}
+
+/* pci_cqe_user_def_val_orig_pkt_len
+ * When trap_id is an ACL: User defined value from policy engine action.
+ */
+MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
+
+/* pci_cqe_mirror_reason
+ * Mirror reason.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+
+enum mlxsw_pci_cqe_time_stamp_type {
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_USEC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_FRC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC,
+};
+
+/* pci_cqe_time_stamp_type
+ * Time stamp type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type)
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_type, 0x18, 22, 2);
+
+#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+
+/* pci_cqe_time_stamp_high
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_high, 0x18, 0, 22);
+
+static inline u64 mlxsw_pci_cqe2_time_stamp_get(const char *cqe)
+{
+ u64 ts_high = mlxsw_pci_cqe2_time_stamp_high_get(cqe);
+ u64 ts_low = mlxsw_pci_cqe2_time_stamp_low_get(cqe);
+
+ return ts_high << 16 | ts_low;
+}
+
+static inline u8 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts >> 30 & 0xFF;
+}
+
+static inline u32 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts & 0x3FFFFFFF;
+}
+
+/* pci_cqe_mirror_latency
+ * End-to-end latency of the original packet that does mirroring to the CPU.
+ * Value of 0xFFFFFF means that the latency is invalid. Units are according to
+ * MOGCR.mirror_latency_units.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_latency, 0x1C, 8, 24);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
+MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
+mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggered this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 000000000..ac4d4ea51
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU 10000
+
+#define MLXSW_PORT_DEFAULT_VID 1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT 255
+#define MLXSW_PORT_SWID_ALL_SWIDS 254
+#define MLXSW_PORT_SWID_TYPE_IB 1
+#define MLXSW_PORT_SWID_TYPE_ETH 2
+
+#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
+#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
+
+#define MLXSW_PORT_CPU_PORT 0x0
+
+#define MLXSW_PORT_DONT_CARE 0xFF
+
+enum mlxsw_port_admin_status {
+ MLXSW_PORT_ADMIN_STATUS_UP = 1,
+ MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+ MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+ MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+ MLXSW_PORT_OPER_STATUS_UP = 1,
+ MLXSW_PORT_OPER_STATUS_DOWN = 2,
+ MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 000000000..a34ff19c5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,12937 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+ u16 id;
+ u16 len; /* In u8 */
+ const char *name;
+};
+
+#define MLXSW_REG_DEFINE(_name, _id, _len) \
+static const struct mlxsw_reg_info mlxsw_reg_##_name = { \
+ .id = _id, \
+ .len = _len, \
+ .name = #_name, \
+}
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+MLXSW_REG_DEFINE(sgcr, MLXSW_REG_SGCR_ID, MLXSW_REG_SGCR_LEN);
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+ MLXSW_REG_ZERO(sgcr, payload);
+ mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN);
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+MLXSW_REG_DEFINE(sspr, MLXSW_REG_SSPR_ID, MLXSW_REG_SSPR_LEN);
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(sspr, payload);
+ mlxsw_reg_sspr_m_set(payload, 1);
+ mlxsw_reg_sspr_local_port_set(payload, local_port);
+ mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SFDAT - Switch Filtering Database Aging Time
+ * --------------------------------------------
+ * Controls the Switch aging time. Aging time is able to be set per Switch
+ * Partition.
+ */
+#define MLXSW_REG_SFDAT_ID 0x2009
+#define MLXSW_REG_SFDAT_LEN 0x8
+
+MLXSW_REG_DEFINE(sfdat, MLXSW_REG_SFDAT_ID, MLXSW_REG_SFDAT_LEN);
+
+/* reg_sfdat_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
+
+/* reg_sfdat_age_time
+ * Aging time in seconds
+ * Min - 10 seconds
+ * Max - 1,000,000 seconds
+ * Default is 300 seconds.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
+
+static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
+{
+ MLXSW_REG_ZERO(sfdat, payload);
+ mlxsw_reg_sfdat_swid_set(payload, 0);
+ mlxsw_reg_sfdat_age_time_set(payload, age_time);
+}
+
+/* SFD - Switch Filtering Database
+ * -------------------------------
+ * The following register defines the access to the filtering database.
+ * The register supports querying, adding, removing and modifying the database.
+ * The access is optimized for bulk updates in which case more than one
+ * FDB record is present in the same command.
+ */
+#define MLXSW_REG_SFD_ID 0x200A
+#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFD_REC_MAX_COUNT 64
+#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
+ MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(sfd, MLXSW_REG_SFD_ID, MLXSW_REG_SFD_LEN);
+
+/* reg_sfd_swid
+ * Switch partition ID for queries. Reserved on Write.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfd_op {
+ /* Dump entire FDB a (process according to record_locator) */
+ MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
+ /* Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
+ /* Query and clear activity. Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
+ /* Test. Response indicates if each of the records could be
+ * added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_TEST = 0,
+ /* Add/modify. Aged-out records cannot be added. This command removes
+ * the learning notification of the {MAC, VID/FID}. Response includes
+ * the entries that were added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
+ /* Remove record by {MAC, VID/FID}. This command also removes
+ * the learning notification and aged-out notifications
+ * of the {MAC, VID/FID}. The response provides current (pre-removal)
+ * entries as non-aged-out.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
+ /* Remove learned notification by {MAC, VID/FID}. The response provides
+ * the removed learning notification.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
+};
+
+/* reg_sfd_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
+
+/* reg_sfd_record_locator
+ * Used for querying the FDB. Use record_locator=0 to initiate the
+ * query. When a record is returned, a new record_locator is
+ * returned to be used in the subsequent query.
+ * Reserved for database update.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
+
+/* reg_sfd_num_rec
+ * Request: Number of records to read/add/modify/remove
+ * Response: Number of records read/added/replaced/removed
+ * See above description for more details.
+ * Ranges 0..64
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
+
+static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
+ u32 record_locator)
+{
+ MLXSW_REG_ZERO(sfd, payload);
+ mlxsw_reg_sfd_op_set(payload, op);
+ mlxsw_reg_sfd_record_locator_set(payload, record_locator);
+}
+
+/* reg_sfd_rec_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_type {
+ MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
+ MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC,
+};
+
+/* reg_sfd_rec_type
+ * FDB record type.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_policy {
+ /* Replacement disabled, aging disabled. */
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
+ /* (mlag remote): Replacement enabled, aging disabled,
+ * learning notification enabled on this port.
+ */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
+ /* (ingress device): Replacement enabled, aging enabled. */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
+};
+
+/* reg_sfd_rec_policy
+ * Policy.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_a
+ * Activity. Set for new static entries. Set for static entries if a frame SMAC
+ * lookup hits on the entry.
+ * To clear the a bit, use "query and clear activity" op.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_mac
+ * MAC address.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
+ MLXSW_REG_SFD_REC_LEN, 0x02);
+
+enum mlxsw_reg_sfd_rec_action {
+ /* forward */
+ MLXSW_REG_SFD_REC_ACTION_NOP = 0,
+ /* forward and trap, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
+ /* trap and do not forward, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
+ /* forward to IP router */
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
+ MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
+};
+
+/* reg_sfd_rec_action
+ * Action to apply on the packet.
+ * Note: Dynamic entries can only be configured with NOP action.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_sub_port
+ * VEPA channel on local port.
+ * Valid only if local port is a non-stacking port. Must be 0 if multichannel
+ * VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_vid
+ * New VID when set_vid=1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when set_vid=0.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_type rec_type,
+ const char *mac,
+ enum mlxsw_reg_sfd_rec_action action)
+{
+ u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type);
+ mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
+ mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
+}
+
+static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid_vid, u16 vid,
+ enum mlxsw_reg_sfd_rec_action action,
+ u16 local_port)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
+ mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
+ mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
+}
+
+/* reg_sfd_uc_lag_sub_port
+ * LAG sub port.
+ * Must be 0 if multichannel VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_lag_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_lag_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_lag_lag_vid
+ * New vlan ID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and set_vid=0.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_lag_lag_id
+ * LAG Identifier - pointer into the LAG descriptor table.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void
+mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid_vid,
+ enum mlxsw_reg_sfd_rec_action action, u16 lag_vid,
+ u16 lag_id)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG,
+ mac, action);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
+ mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
+ mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
+}
+
+/* reg_sfd_mc_pgi
+ *
+ * Multicast port group index - index into the port group table.
+ * Value 0x1FFF indicates the pgi should point to the MID entry.
+ * For Spectrum this value must be set to 0x1FFF
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_mc_fid_vid
+ *
+ * Filtering ID or VLAN ID
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_mc_mid
+ *
+ * Multicast identifier - global identifier that represents the multicast
+ * group across all devices.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void
+mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
+ const char *mac, u16 fid_vid,
+ enum mlxsw_reg_sfd_rec_action action, u16 mid)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action);
+ mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF);
+ mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
+}
+
+/* reg_sfd_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * destination IP.
+ * When protocol is IPv6, reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24,
+ 8, MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_tunnel_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfd_uc_tunnel_protocol {
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfd_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27,
+ 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 destination IP.
+ * When protocol is IPv6, pointer to the underlay IPv6 destination IP
+ * which is configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0,
+ 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action,
+ enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
+ action);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
+ mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
+}
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack4(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, policy, mac, fid,
+ action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4);
+}
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack6(char *payload, int rec_index, const char *mac,
+ u16 fid, enum mlxsw_reg_sfd_rec_action action,
+ u32 uip_ptr)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip_ptr);
+ /* Only static policy is supported for IPv6 unicast tunnel entry. */
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY,
+ mac, fid, action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6);
+}
+
+enum mlxsw_reg_tunnel_port {
+ MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL0,
+ MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL1,
+};
+
+/* SFN - Switch FDB Notification Register
+ * -------------------------------------------
+ * The switch provides notifications on newly learned FDB entries and
+ * aged out entries. The notifications can be polled by software.
+ */
+#define MLXSW_REG_SFN_ID 0x200B
+#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFN_REC_MAX_COUNT 64
+#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
+ MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(sfn, MLXSW_REG_SFN_ID, MLXSW_REG_SFN_LEN);
+
+/* reg_sfn_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+
+/* reg_sfn_end
+ * Forces the current session to end.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
+
+/* reg_sfn_num_rec
+ * Request: Number of learned notifications and aged-out notification
+ * records requested.
+ * Response: Number of notification records returned (must be smaller
+ * than or equal to the value requested)
+ * Ranges 0..64
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
+
+static inline void mlxsw_reg_sfn_pack(char *payload)
+{
+ MLXSW_REG_ZERO(sfn, payload);
+ mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_end_set(payload, 0);
+ mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
+}
+
+/* reg_sfn_rec_swid
+ * Switch partition ID.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfn_rec_type {
+ /* MAC addresses learned on a regular port. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
+ /* MAC addresses learned on a LAG port. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG = 0x6,
+ /* Aged-out MAC address on a regular port. */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
+ /* Aged-out MAC address on a LAG port. */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
+ /* Learned unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL = 0xD,
+ /* Aged-out unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL = 0xE,
+};
+
+/* reg_sfn_rec_type
+ * Notification record type.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+/* reg_sfn_rec_mac
+ * MAC address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
+ MLXSW_REG_SFN_REC_LEN, 0x02);
+
+/* reg_sfn_mac_sub_port
+ * VEPA channel on the local port.
+ * 0 if multichannel VEPA is not enabled.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_fid
+ * Filtering identifier.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u16 *p_local_port)
+{
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
+}
+
+/* reg_sfn_mac_lag_lag_id
+ * LAG ID (pointer into the LAG descriptor table).
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_lag_lag_id, MLXSW_REG_SFN_BASE_LEN, 0, 10,
+ MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u16 *p_lag_id)
+{
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
+}
+
+/* reg_sfn_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * address of the remote VTEP.
+ * When protocol is IPv6, reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_msb, MLXSW_REG_SFN_BASE_LEN, 24,
+ 8, MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfn_uc_tunnel_protocol {
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfn_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
+ 1, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+/* reg_sfn_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 address of the remote VTEP.
+ * When protocol is IPv6, ipv6_id to be queried from TNIPSD.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
+ 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+/* reg_sfn_uc_tunnel_port
+ * Tunnel port.
+ * Reserved on Spectrum.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, tunnel_port, MLXSW_REG_SFN_BASE_LEN, 0, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x10, false);
+
+static inline void
+mlxsw_reg_sfn_uc_tunnel_unpack(char *payload, int rec_index, char *mac,
+ u16 *p_fid, u32 *p_uip,
+ enum mlxsw_reg_sfn_uc_tunnel_protocol *p_proto)
+{
+ u32 uip_msb, uip_lsb;
+
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_fid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ uip_msb = mlxsw_reg_sfn_uc_tunnel_uip_msb_get(payload, rec_index);
+ uip_lsb = mlxsw_reg_sfn_uc_tunnel_uip_lsb_get(payload, rec_index);
+ *p_uip = uip_msb << 24 | uip_lsb;
+ *p_proto = mlxsw_reg_sfn_uc_tunnel_protocol_get(payload, rec_index);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200D
+#define MLXSW_REG_SPMS_LEN 0x404
+
+MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN);
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, spms, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_spms_state {
+ MLXSW_REG_SPMS_STATE_NO_CHANGE,
+ MLXSW_REG_SPMS_STATE_DISCARDING,
+ MLXSW_REG_SPMS_STATE_LEARNING,
+ MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(spms, payload);
+ mlxsw_reg_spms_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
+ mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SPVID - Switch Port VID
+ * -----------------------
+ * The switch port VID configures the default VID for a port.
+ */
+#define MLXSW_REG_SPVID_ID 0x200E
+#define MLXSW_REG_SPVID_LEN 0x08
+
+MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN);
+
+/* reg_spvid_tport
+ * Port is tunnel port.
+ * Reserved when SwitchX/-2 or Spectrum-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1);
+
+/* reg_spvid_local_port
+ * When tport = 0: Local port number. Not supported for CPU port.
+ * When tport = 1: Tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, spvid, 0x00, 16, 0x00, 12);
+
+/* reg_spvid_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+
+/* reg_spvid_egr_et_set
+ * When VLAN is pushed at ingress (for untagged packets or for
+ * QinQ push mode) then the EtherType is decided at the egress port.
+ * Reserved when Spectrum-1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, egr_et_set, 0x04, 24, 1);
+
+/* reg_spvid_et_vlan
+ * EtherType used for when VLAN is pushed at ingress (for untagged
+ * packets or for QinQ push mode).
+ * 0: ether_type0 - (default)
+ * 1: ether_type1
+ * 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2
+ * Ethertype IDs are configured by SVER.
+ * Reserved when egr_et_set = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
+
+/* reg_spvid_pvid
+ * Port default VID
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
+
+static inline void mlxsw_reg_spvid_pack(char *payload, u16 local_port, u16 pvid,
+ u8 et_vlan)
+{
+ MLXSW_REG_ZERO(spvid, payload);
+ mlxsw_reg_spvid_local_port_set(payload, local_port);
+ mlxsw_reg_spvid_pvid_set(payload, pvid);
+ mlxsw_reg_spvid_et_vlan_set(payload, et_vlan);
+}
+
+/* SPVM - Switch Port VLAN Membership
+ * ----------------------------------
+ * The Switch Port VLAN Membership register configures the VLAN membership
+ * of a port in a VLAN denoted by VID. VLAN membership is managed per
+ * virtual port. The register can be used to add and remove VID(s) from a port.
+ */
+#define MLXSW_REG_SPVM_ID 0x200F
+#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
+#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
+ MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(spvm, MLXSW_REG_SPVM_ID, MLXSW_REG_SPVM_LEN);
+
+/* reg_spvm_pt
+ * Priority tagged. If this bit is set, packets forwarded to the port with
+ * untagged VLAN membership (u bit is set) will be tagged with priority tag
+ * (VID=0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
+
+/* reg_spvm_pte
+ * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
+ * the pt bit will NOT be updated. To update the pt bit, pte must be set.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
+
+/* reg_spvm_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, spvm, 0x00, 16, 0x00, 12);
+
+/* reg_spvm_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
+
+/* reg_spvm_num_rec
+ * Number of records to update. Each record contains: i, e, u, vid.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
+
+/* reg_spvm_rec_i
+ * Ingress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
+ MLXSW_REG_SPVM_BASE_LEN, 14, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_e
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
+ MLXSW_REG_SPVM_BASE_LEN, 13, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_u
+ * Untagged - port is an untagged member - egress transmission uses untagged
+ * frames on VID<n>
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
+ MLXSW_REG_SPVM_BASE_LEN, 12, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_vid
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
+ MLXSW_REG_SPVM_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+static inline void mlxsw_reg_spvm_pack(char *payload, u16 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
+{
+ int size = vid_end - vid_begin + 1;
+ int i;
+
+ MLXSW_REG_ZERO(spvm, payload);
+ mlxsw_reg_spvm_local_port_set(payload, local_port);
+ mlxsw_reg_spvm_num_rec_set(payload, size);
+
+ for (i = 0; i < size; i++) {
+ mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
+ mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
+/* SPAFT - Switch Port Acceptable Frame Types
+ * ------------------------------------------
+ * The Switch Port Acceptable Frame Types register configures the frame
+ * admittance of the port.
+ */
+#define MLXSW_REG_SPAFT_ID 0x2010
+#define MLXSW_REG_SPAFT_LEN 0x08
+
+MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN);
+
+/* reg_spaft_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported (all tag types are allowed).
+ */
+MLXSW_ITEM32_LP(reg, spaft, 0x00, 16, 0x00, 12);
+
+/* reg_spaft_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
+
+/* reg_spaft_allow_untagged
+ * When set, untagged frames on the ingress are allowed (default).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
+
+/* reg_spaft_allow_prio_tagged
+ * When set, priority tagged frames on the ingress are allowed (default).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
+
+/* reg_spaft_allow_tagged
+ * When set, tagged frames on the ingress are allowed (default).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
+
+static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
+ bool allow_untagged)
+{
+ MLXSW_REG_ZERO(spaft, payload);
+ mlxsw_reg_spaft_local_port_set(payload, local_port);
+ mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
+ mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged);
+ mlxsw_reg_spaft_allow_tagged_set(payload, true);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_LEN 0x14
+
+MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN);
+
+enum mlxsw_reg_sfgc_type {
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_TYPE_RESERVED,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
+ MLXSW_REG_SFGC_TYPE_MAX,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+/* bridge_type is used in SFGC and SFMR. */
+enum mlxsw_reg_bridge_type {
+ MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
+ MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+ MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+ MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+/* reg_sfgc_mid_base
+ * MID Base.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_bridge_type bridge_type,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int flood_table, u16 mid_base)
+{
+ MLXSW_REG_ZERO(sfgc, payload);
+ mlxsw_reg_sfgc_type_set(payload, type);
+ mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfgc_table_type_set(payload, table_type);
+ mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+ mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
+}
+
+/* SFDF - Switch Filtering DB Flush
+ * --------------------------------
+ * The switch filtering DB flush register is used to flush the FDB.
+ * Note that FDB notifications are flushed as well.
+ */
+#define MLXSW_REG_SFDF_ID 0x2013
+#define MLXSW_REG_SFDF_LEN 0x14
+
+MLXSW_REG_DEFINE(sfdf, MLXSW_REG_SFDF_ID, MLXSW_REG_SFDF_LEN);
+
+/* reg_sfdf_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfdf_flush_type {
+ MLXSW_REG_SFDF_FLUSH_PER_SWID,
+ MLXSW_REG_SFDF_FLUSH_PER_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_PORT,
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_LAG,
+ MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID,
+};
+
+/* reg_sfdf_flush_type
+ * Flush type.
+ * 0 - All SWID dynamic entries are flushed.
+ * 1 - All FID dynamic entries are flushed.
+ * 2 - All dynamic entries pointing to port are flushed.
+ * 3 - All FID dynamic entries pointing to port are flushed.
+ * 4 - All dynamic entries pointing to LAG are flushed.
+ * 5 - All FID dynamic entries pointing to LAG are flushed.
+ * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed.
+ * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed, per FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
+
+/* reg_sfdf_flush_static
+ * Static.
+ * 0 - Flush only dynamic entries.
+ * 1 - Flush both dynamic and static entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
+
+static inline void mlxsw_reg_sfdf_pack(char *payload,
+ enum mlxsw_reg_sfdf_flush_type type)
+{
+ MLXSW_REG_ZERO(sfdf, payload);
+ mlxsw_reg_sfdf_flush_type_set(payload, type);
+ mlxsw_reg_sfdf_flush_static_set(payload, true);
+}
+
+/* reg_sfdf_fid
+ * FID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
+
+/* reg_sfdf_system_port
+ * Port to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
+
+/* reg_sfdf_port_fid_system_port
+ * Port to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
+
+/* reg_sfdf_lag_id
+ * LAG ID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
+
+/* reg_sfdf_lag_fid_lag_id
+ * LAG ID to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
+
+/* SLDR - Switch LAG Descriptor Register
+ * -----------------------------------------
+ * The switch LAG descriptor register is populated by LAG descriptors.
+ * Each LAG descriptor is indexed by lag_id. The LAG ID runs from 0 to
+ * max_lag-1.
+ */
+#define MLXSW_REG_SLDR_ID 0x2014
+#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */
+
+MLXSW_REG_DEFINE(sldr, MLXSW_REG_SLDR_ID, MLXSW_REG_SLDR_LEN);
+
+enum mlxsw_reg_sldr_op {
+ /* Indicates a creation of a new LAG-ID, lag_id must be valid */
+ MLXSW_REG_SLDR_OP_LAG_CREATE,
+ MLXSW_REG_SLDR_OP_LAG_DESTROY,
+ /* Ports that appear in the list have the Distributor enabled */
+ MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST,
+ /* Removes ports from the disributor list */
+ MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST,
+};
+
+/* reg_sldr_op
+ * Operation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sldr, op, 0x00, 29, 3);
+
+/* reg_sldr_lag_id
+ * LAG identifier. The lag_id is the index into the LAG descriptor table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sldr, lag_id, 0x00, 0, 10);
+
+static inline void mlxsw_reg_sldr_lag_create_pack(char *payload, u8 lag_id)
+{
+ MLXSW_REG_ZERO(sldr, payload);
+ mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_CREATE);
+ mlxsw_reg_sldr_lag_id_set(payload, lag_id);
+}
+
+static inline void mlxsw_reg_sldr_lag_destroy_pack(char *payload, u8 lag_id)
+{
+ MLXSW_REG_ZERO(sldr, payload);
+ mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_DESTROY);
+ mlxsw_reg_sldr_lag_id_set(payload, lag_id);
+}
+
+/* reg_sldr_num_ports
+ * The number of member ports of the LAG.
+ * Reserved for Create / Destroy operations
+ * For Add / Remove operations - indicates the number of ports in the list.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8);
+
+/* reg_sldr_system_port
+ * System port.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false);
+
+static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
+ u16 local_port)
+{
+ MLXSW_REG_ZERO(sldr, payload);
+ mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST);
+ mlxsw_reg_sldr_lag_id_set(payload, lag_id);
+ mlxsw_reg_sldr_num_ports_set(payload, 1);
+ mlxsw_reg_sldr_system_port_set(payload, 0, local_port);
+}
+
+static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
+ u16 local_port)
+{
+ MLXSW_REG_ZERO(sldr, payload);
+ mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST);
+ mlxsw_reg_sldr_lag_id_set(payload, lag_id);
+ mlxsw_reg_sldr_num_ports_set(payload, 1);
+ mlxsw_reg_sldr_system_port_set(payload, 0, local_port);
+}
+
+/* SLCR - Switch LAG Configuration 2 Register
+ * -------------------------------------------
+ * The Switch LAG Configuration register is used for configuring the
+ * LAG properties of the switch.
+ */
+#define MLXSW_REG_SLCR_ID 0x2015
+#define MLXSW_REG_SLCR_LEN 0x10
+
+MLXSW_REG_DEFINE(slcr, MLXSW_REG_SLCR_ID, MLXSW_REG_SLCR_LEN);
+
+enum mlxsw_reg_slcr_pp {
+ /* Global Configuration (for all ports) */
+ MLXSW_REG_SLCR_PP_GLOBAL,
+ /* Per port configuration, based on local_port field */
+ MLXSW_REG_SLCR_PP_PER_PORT,
+};
+
+/* reg_slcr_pp
+ * Per Port Configuration
+ * Note: Reading at Global mode results in reading port 1 configuration.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1);
+
+/* reg_slcr_local_port
+ * Local port number
+ * Supported from CPU port
+ * Not supported from router port
+ * Reserved when pp = Global Configuration
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, slcr, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_slcr_type {
+ MLXSW_REG_SLCR_TYPE_CRC, /* default */
+ MLXSW_REG_SLCR_TYPE_XOR,
+ MLXSW_REG_SLCR_TYPE_RANDOM,
+};
+
+/* reg_slcr_type
+ * Hash type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
+
+/* Ingress port */
+#define MLXSW_REG_SLCR_LAG_HASH_IN_PORT BIT(0)
+/* SMAC - for IPv4 and IPv6 packets */
+#define MLXSW_REG_SLCR_LAG_HASH_SMAC_IP BIT(1)
+/* SMAC - for non-IP packets */
+#define MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP BIT(2)
+#define MLXSW_REG_SLCR_LAG_HASH_SMAC \
+ (MLXSW_REG_SLCR_LAG_HASH_SMAC_IP | \
+ MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP)
+/* DMAC - for IPv4 and IPv6 packets */
+#define MLXSW_REG_SLCR_LAG_HASH_DMAC_IP BIT(3)
+/* DMAC - for non-IP packets */
+#define MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP BIT(4)
+#define MLXSW_REG_SLCR_LAG_HASH_DMAC \
+ (MLXSW_REG_SLCR_LAG_HASH_DMAC_IP | \
+ MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP)
+/* Ethertype - for IPv4 and IPv6 packets */
+#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP BIT(5)
+/* Ethertype - for non-IP packets */
+#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP BIT(6)
+#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE \
+ (MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP | \
+ MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP)
+/* VLAN ID - for IPv4 and IPv6 packets */
+#define MLXSW_REG_SLCR_LAG_HASH_VLANID_IP BIT(7)
+/* VLAN ID - for non-IP packets */
+#define MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP BIT(8)
+#define MLXSW_REG_SLCR_LAG_HASH_VLANID \
+ (MLXSW_REG_SLCR_LAG_HASH_VLANID_IP | \
+ MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP)
+/* Source IP address (can be IPv4 or IPv6) */
+#define MLXSW_REG_SLCR_LAG_HASH_SIP BIT(9)
+/* Destination IP address (can be IPv4 or IPv6) */
+#define MLXSW_REG_SLCR_LAG_HASH_DIP BIT(10)
+/* TCP/UDP source port */
+#define MLXSW_REG_SLCR_LAG_HASH_SPORT BIT(11)
+/* TCP/UDP destination port*/
+#define MLXSW_REG_SLCR_LAG_HASH_DPORT BIT(12)
+/* IPv4 Protocol/IPv6 Next Header */
+#define MLXSW_REG_SLCR_LAG_HASH_IPPROTO BIT(13)
+/* IPv6 Flow label */
+#define MLXSW_REG_SLCR_LAG_HASH_FLOWLABEL BIT(14)
+/* SID - FCoE source ID */
+#define MLXSW_REG_SLCR_LAG_HASH_FCOE_SID BIT(15)
+/* DID - FCoE destination ID */
+#define MLXSW_REG_SLCR_LAG_HASH_FCOE_DID BIT(16)
+/* OXID - FCoE originator exchange ID */
+#define MLXSW_REG_SLCR_LAG_HASH_FCOE_OXID BIT(17)
+/* Destination QP number - for RoCE packets */
+#define MLXSW_REG_SLCR_LAG_HASH_ROCE_DQP BIT(19)
+
+/* reg_slcr_lag_hash
+ * LAG hashing configuration. This is a bitmask, in which each set
+ * bit includes the corresponding item in the LAG hash calculation.
+ * The default lag_hash contains SMAC, DMAC, VLANID and
+ * Ethertype (for all packet types).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
+
+/* reg_slcr_seed
+ * LAG seed value. The seed is the same for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32);
+
+static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed)
+{
+ MLXSW_REG_ZERO(slcr, payload);
+ mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
+ mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
+ mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
+ mlxsw_reg_slcr_seed_set(payload, seed);
+}
+
+/* SLCOR - Switch LAG Collector Register
+ * -------------------------------------
+ * The Switch LAG Collector register controls the Local Port membership
+ * in a LAG and enablement of the collector.
+ */
+#define MLXSW_REG_SLCOR_ID 0x2016
+#define MLXSW_REG_SLCOR_LEN 0x10
+
+MLXSW_REG_DEFINE(slcor, MLXSW_REG_SLCOR_ID, MLXSW_REG_SLCOR_LEN);
+
+enum mlxsw_reg_slcor_col {
+ /* Port is added with collector disabled */
+ MLXSW_REG_SLCOR_COL_LAG_ADD_PORT,
+ MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED,
+ MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_DISABLED,
+ MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT,
+};
+
+/* reg_slcor_col
+ * Collector configuration
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2);
+
+/* reg_slcor_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, slcor, 0x00, 16, 0x00, 12);
+
+/* reg_slcor_lag_id
+ * LAG Identifier. Index into the LAG descriptor table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10);
+
+/* reg_slcor_port_index
+ * Port index in the LAG list. Only valid on Add Port to LAG col.
+ * Valid range is from 0 to cap_max_lag_members-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10);
+
+static inline void mlxsw_reg_slcor_pack(char *payload,
+ u16 local_port, u16 lag_id,
+ enum mlxsw_reg_slcor_col col)
+{
+ MLXSW_REG_ZERO(slcor, payload);
+ mlxsw_reg_slcor_col_set(payload, col);
+ mlxsw_reg_slcor_local_port_set(payload, local_port);
+ mlxsw_reg_slcor_lag_id_set(payload, lag_id);
+}
+
+static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
+ u16 local_port, u16 lag_id,
+ u8 port_index)
+{
+ mlxsw_reg_slcor_pack(payload, local_port, lag_id,
+ MLXSW_REG_SLCOR_COL_LAG_ADD_PORT);
+ mlxsw_reg_slcor_port_index_set(payload, port_index);
+}
+
+static inline void mlxsw_reg_slcor_port_remove_pack(char *payload,
+ u16 local_port, u16 lag_id)
+{
+ mlxsw_reg_slcor_pack(payload, local_port, lag_id,
+ MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT);
+}
+
+static inline void mlxsw_reg_slcor_col_enable_pack(char *payload,
+ u16 local_port, u16 lag_id)
+{
+ mlxsw_reg_slcor_pack(payload, local_port, lag_id,
+ MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
+}
+
+static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
+ u16 local_port, u16 lag_id)
+{
+ mlxsw_reg_slcor_pack(payload, local_port, lag_id,
+ MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN);
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, spmlr, 0x00, 16, 0x00, 12);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+ MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+ MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ MLXSW_REG_ZERO(spmlr, payload);
+ mlxsw_reg_spmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spmlr_sub_port_set(payload, 0);
+ mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* SVFA - Switch VID to FID Allocation Register
+ * --------------------------------------------
+ * Controls the VID to FID mapping and {Port, VID} to FID mapping for
+ * virtualized ports.
+ */
+#define MLXSW_REG_SVFA_ID 0x201C
+#define MLXSW_REG_SVFA_LEN 0x18
+
+MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN);
+
+/* reg_svfa_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
+
+/* reg_svfa_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_svfa_mt {
+ MLXSW_REG_SVFA_MT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_VNI_TO_FID,
+};
+
+/* reg_svfa_mapping_table
+ * Mapping table:
+ * 0 - VID to FID
+ * 1 - {Port, VID} to FID
+ * Access: Index
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
+
+/* reg_svfa_v
+ * Valid.
+ * Valid if set.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
+
+/* reg_svfa_fid
+ * Filtering ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
+
+/* reg_svfa_vid
+ * VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
+
+/* reg_svfa_counter_set_type
+ * Counter set type for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
+
+/* reg_svfa_counter_index
+ * Counter index for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
+
+/* reg_svfa_vni
+ * Virtual Network Identifier.
+ * Access: Index
+ *
+ * Note: Reserved when mapping_table is not 2 (VNI mapping table).
+ */
+MLXSW_ITEM32(reg, svfa, vni, 0x10, 0, 24);
+
+/* reg_svfa_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non enabled RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, svfa, irif_v, 0x14, 24, 1);
+
+/* reg_svfa_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
+
+static inline void __mlxsw_reg_svfa_pack(char *payload,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, bool irif_v, u16 irif)
+{
+ MLXSW_REG_ZERO(svfa, payload);
+ mlxsw_reg_svfa_swid_set(payload, 0);
+ mlxsw_reg_svfa_mapping_table_set(payload, mt);
+ mlxsw_reg_svfa_v_set(payload, valid);
+ mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_irif_v_set(payload, irif_v);
+ mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
+}
+
+static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
+ bool valid, u16 fid, u16 vid,
+ bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
+ u16 vid, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
+ u32 vni, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_vni_set(payload, vni);
+}
+
+/* SPVTR - Switch Port VLAN Stacking Register
+ * ------------------------------------------
+ * The Switch Port VLAN Stacking register configures the VLAN mode of the port
+ * to enable VLAN stacking.
+ */
+#define MLXSW_REG_SPVTR_ID 0x201D
+#define MLXSW_REG_SPVTR_LEN 0x10
+
+MLXSW_REG_DEFINE(spvtr, MLXSW_REG_SPVTR_ID, MLXSW_REG_SPVTR_LEN);
+
+/* reg_spvtr_tport
+ * Port is tunnel port.
+ * Access: Index
+ *
+ * Note: Reserved when SwitchX/-2 or Spectrum-1.
+ */
+MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1);
+
+/* reg_spvtr_local_port
+ * When tport = 0: local port number (Not supported from/to CPU).
+ * When tport = 1: tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, spvtr, 0x00, 16, 0x00, 12);
+
+/* reg_spvtr_ippe
+ * Ingress Port Prio Mode Update Enable.
+ * When set, the Port Prio Mode is updated with the provided ipprio_mode field.
+ * Reserved on Get operations.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, ippe, 0x04, 31, 1);
+
+/* reg_spvtr_ipve
+ * Ingress Port VID Mode Update Enable.
+ * When set, the Ingress Port VID Mode is updated with the provided ipvid_mode
+ * field.
+ * Reserved on Get operations.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, ipve, 0x04, 30, 1);
+
+/* reg_spvtr_epve
+ * Egress Port VID Mode Update Enable.
+ * When set, the Egress Port VID Mode is updated with the provided epvid_mode
+ * field.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvtr, epve, 0x04, 29, 1);
+
+/* reg_spvtr_ipprio_mode
+ * Ingress Port Priority Mode.
+ * This controls the PCP and DEI of the new outer VLAN
+ * Note: for SwitchX/-2 the DEI is not affected.
+ * 0: use port default PCP and DEI (configured by QPDPC).
+ * 1: use C-VLAN PCP and DEI.
+ * Has no effect when ipvid_mode = 0.
+ * Reserved when tport = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvtr, ipprio_mode, 0x04, 20, 4);
+
+enum mlxsw_reg_spvtr_ipvid_mode {
+ /* IEEE Compliant PVID (default) */
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID,
+ /* Push VLAN (for VLAN stacking, except prio tagged packets) */
+ MLXSW_REG_SPVTR_IPVID_MODE_PUSH_VLAN_FOR_UNTAGGED_PACKET,
+ /* Always push VLAN (also for prio tagged packets) */
+ MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN,
+};
+
+/* reg_spvtr_ipvid_mode
+ * Ingress Port VLAN-ID Mode.
+ * For Spectrum family, this affects the values of SPVM.i
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvtr, ipvid_mode, 0x04, 16, 4);
+
+enum mlxsw_reg_spvtr_epvid_mode {
+ /* IEEE Compliant VLAN membership */
+ MLXSW_REG_SPVTR_EPVID_MODE_IEEE_COMPLIANT_VLAN_MEMBERSHIP,
+ /* Pop VLAN (for VLAN stacking) */
+ MLXSW_REG_SPVTR_EPVID_MODE_POP_VLAN,
+};
+
+/* reg_spvtr_epvid_mode
+ * Egress Port VLAN-ID Mode.
+ * For Spectrum family, this affects the values of SPVM.e,u,pt.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4);
+
+static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport,
+ u16 local_port,
+ enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode)
+{
+ MLXSW_REG_ZERO(spvtr, payload);
+ mlxsw_reg_spvtr_tport_set(payload, tport);
+ mlxsw_reg_spvtr_local_port_set(payload, local_port);
+ mlxsw_reg_spvtr_ipvid_mode_set(payload, ipvid_mode);
+ mlxsw_reg_spvtr_ipve_set(payload, true);
+}
+
+/* SVPE - Switch Virtual-Port Enabling Register
+ * --------------------------------------------
+ * Enables port virtualization.
+ */
+#define MLXSW_REG_SVPE_ID 0x201E
+#define MLXSW_REG_SVPE_LEN 0x4
+
+MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN);
+
+/* reg_svpe_local_port
+ * Local port number
+ * Access: Index
+ *
+ * Note: CPU port is not supported (uses VLAN mode only).
+ */
+MLXSW_ITEM32_LP(reg, svpe, 0x00, 16, 0x00, 12);
+
+/* reg_svpe_vp_en
+ * Virtual port enable.
+ * 0 - Disable, VLAN mode (VID to FID).
+ * 1 - Enable, Virtual port mode ({Port, VID} to FID).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
+
+static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
+ bool enable)
+{
+ MLXSW_REG_ZERO(svpe, payload);
+ mlxsw_reg_svpe_local_port_set(payload, local_port);
+ mlxsw_reg_svpe_vp_en_set(payload, enable);
+}
+
+/* SFMR - Switch FID Management Register
+ * -------------------------------------
+ * Creates and configures FIDs.
+ */
+#define MLXSW_REG_SFMR_ID 0x201F
+#define MLXSW_REG_SFMR_LEN 0x30
+
+MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN);
+
+enum mlxsw_reg_sfmr_op {
+ MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_REG_SFMR_OP_DESTROY_FID,
+};
+
+/* reg_sfmr_op
+ * Operation.
+ * 0 - Create or edit FID.
+ * 1 - Destroy FID.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
+
+/* reg_sfmr_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+
+/* reg_sfmr_flood_rsp
+ * Router sub-port flooding table.
+ * 0 - Regular flooding table.
+ * 1 - Router sub-port flooding table. For this FID the flooding is per
+ * router-sub-port local_port. Must not be set for a FID which is not a
+ * router-sub-port and must be set prior to enabling the relevant RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
+
+/* reg_sfmr_flood_bridge_type
+ * Flood bridge type (see SFGC.bridge_type).
+ * 0 - type_0.
+ * 1 - type_1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
+
+/* reg_sfmr_fid_offset
+ * FID offset.
+ * Used to point into the flooding table selected by SFGC register if
+ * the table is of type FID-Offset. Otherwise, this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
+
+/* reg_sfmr_vtfp
+ * Valid Tunnel Flood Pointer.
+ * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
+
+/* reg_sfmr_nve_tunnel_flood_ptr
+ * Underlay Flooding and BC Pointer.
+ * Used as a pointer to the first entry of the group based link lists of
+ * flooding or BC entries (for NVE tunnels).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
+
+/* reg_sfmr_vv
+ * VNI Valid.
+ * If not set, then vni is reserved.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
+
+/* reg_sfmr_vni
+ * Virtual Network Identifier.
+ * When legacy bridge model is used, a given VNI can only be assigned to one
+ * FID. When unified bridge model is used, it configures only the FID->VNI,
+ * the VNI->FID is done by SVFA.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+
+/* reg_sfmr_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non valid RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
+
+/* reg_sfmr_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+
+/* reg_sfmr_smpe_valid
+ * SMPE is valid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe_valid, 0x28, 20, 1);
+
+/* reg_sfmr_smpe
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
+
+static inline void mlxsw_reg_sfmr_pack(char *payload,
+ enum mlxsw_reg_sfmr_op op, u16 fid,
+ u16 fid_offset, bool flood_rsp,
+ enum mlxsw_reg_bridge_type bridge_type,
+ bool smpe_valid, u16 smpe)
+{
+ MLXSW_REG_ZERO(sfmr, payload);
+ mlxsw_reg_sfmr_op_set(payload, op);
+ mlxsw_reg_sfmr_fid_set(payload, fid);
+ mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
+ mlxsw_reg_sfmr_vtfp_set(payload, false);
+ mlxsw_reg_sfmr_vv_set(payload, false);
+ mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_sfmr_smpe_set(payload, smpe);
+}
+
+/* SPVMLR - Switch Port VLAN MAC Learning Register
+ * -----------------------------------------------
+ * Controls the switch MAC learning policy per {Port, VID}.
+ */
+#define MLXSW_REG_SPVMLR_ID 0x2020
+#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
+#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
+ MLXSW_REG_SPVMLR_REC_LEN * \
+ MLXSW_REG_SPVMLR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN);
+
+/* reg_spvmlr_local_port
+ * Local ingress port.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32_LP(reg, spvmlr, 0x00, 16, 0x00, 12);
+
+/* reg_spvmlr_num_rec
+ * Number of records to update.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
+
+/* reg_spvmlr_rec_learn_enable
+ * 0 - Disable learning for {Port, VID}.
+ * 1 - Enable learning for {Port, VID}.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
+ 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+/* reg_spvmlr_rec_vid
+ * VLAN ID to be added/removed from port or for querying.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ int num_rec = vid_end - vid_begin + 1;
+ int i;
+
+ WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
+
+ MLXSW_REG_ZERO(spvmlr, payload);
+ mlxsw_reg_spvmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
+
+ for (i = 0; i < num_rec; i++) {
+ mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
+ mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
+/* SPVC - Switch Port VLAN Classification Register
+ * -----------------------------------------------
+ * Configures the port to identify packets as untagged / single tagged /
+ * double packets based on the packet EtherTypes.
+ * Ethertype IDs are configured by SVER.
+ */
+#define MLXSW_REG_SPVC_ID 0x2026
+#define MLXSW_REG_SPVC_LEN 0x0C
+
+MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN);
+
+/* reg_spvc_local_port
+ * Local port.
+ * Access: Index
+ *
+ * Note: applies both to Rx port and Tx port, so if a packet traverses
+ * through Rx port i and a Tx port j then port i and port j must have the
+ * same configuration.
+ */
+MLXSW_ITEM32_LP(reg, spvc, 0x00, 16, 0x00, 12);
+
+/* reg_spvc_inner_et2
+ * Vlan Tag1 EtherType2 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type2.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et2, 0x08, 17, 1);
+
+/* reg_spvc_et2
+ * Vlan Tag0 EtherType2 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type2.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et2, 0x08, 16, 1);
+
+/* reg_spvc_inner_et1
+ * Vlan Tag1 EtherType1 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type1.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et1, 0x08, 9, 1);
+
+/* reg_spvc_et1
+ * Vlan Tag0 EtherType1 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type1.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et1, 0x08, 8, 1);
+
+/* reg_inner_et0
+ * Vlan Tag1 EtherType0 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type0.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1);
+
+/* reg_et0
+ * Vlan Tag0 EtherType0 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type0.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1);
+
+static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1,
+ bool et0)
+{
+ MLXSW_REG_ZERO(spvc, payload);
+ mlxsw_reg_spvc_local_port_set(payload, local_port);
+ /* Enable inner_et1 and inner_et0 to enable identification of double
+ * tagged packets.
+ */
+ mlxsw_reg_spvc_inner_et1_set(payload, 1);
+ mlxsw_reg_spvc_inner_et0_set(payload, 1);
+ mlxsw_reg_spvc_et1_set(payload, et1);
+ mlxsw_reg_spvc_et0_set(payload, et0);
+}
+
+/* SPEVET - Switch Port Egress VLAN EtherType
+ * ------------------------------------------
+ * The switch port egress VLAN EtherType configures which EtherType to push at
+ * egress for packets incoming through a local port for which 'SPVID.egr_et_set'
+ * is set.
+ */
+#define MLXSW_REG_SPEVET_ID 0x202A
+#define MLXSW_REG_SPEVET_LEN 0x08
+
+MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN);
+
+/* reg_spevet_local_port
+ * Egress Local port number.
+ * Not supported to CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, spevet, 0x00, 16, 0x00, 12);
+
+/* reg_spevet_et_vlan
+ * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet:
+ * 0: ether_type0 - (default)
+ * 1: ether_type1
+ * 2: ether_type2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2);
+
+static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
+ u8 et_vlan)
+{
+ MLXSW_REG_ZERO(spevet, payload);
+ mlxsw_reg_spevet_local_port_set(payload, local_port);
+ mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
+}
+
+/* SMPE - Switch Multicast Port to Egress VID
+ * ------------------------------------------
+ * The switch multicast port to egress VID maps
+ * {egress_port, SMPE index} -> {VID}.
+ */
+#define MLXSW_REG_SMPE_ID 0x202B
+#define MLXSW_REG_SMPE_LEN 0x0C
+
+MLXSW_REG_DEFINE(smpe, MLXSW_REG_SMPE_ID, MLXSW_REG_SMPE_LEN);
+
+/* reg_smpe_local_port
+ * Local port number.
+ * CPU port is not supported.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, smpe, 0x00, 16, 0x00, 12);
+
+/* reg_smpe_smpe_index
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smpe, smpe_index, 0x04, 0, 16);
+
+/* reg_smpe_evid
+ * Egress VID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, smpe, evid, 0x08, 0, 12);
+
+static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
+ u16 smpe_index, u16 evid)
+{
+ MLXSW_REG_ZERO(smpe, payload);
+ mlxsw_reg_smpe_local_port_set(payload, local_port);
+ mlxsw_reg_smpe_smpe_index_set(payload, smpe_index);
+ mlxsw_reg_smpe_evid_set(payload, evid);
+}
+
+/* SMID-V2 - Switch Multicast ID Version 2 Register
+ * ------------------------------------------------
+ * The MID record maps from a MID (Multicast ID), which is a unique identifier
+ * of the multicast group within the stacking domain, into a list of local
+ * ports into which the packet is replicated.
+ */
+#define MLXSW_REG_SMID2_ID 0x2034
+#define MLXSW_REG_SMID2_LEN 0x120
+
+MLXSW_REG_DEFINE(smid2, MLXSW_REG_SMID2_ID, MLXSW_REG_SMID2_LEN);
+
+/* reg_smid2_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
+
+/* reg_smid2_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+
+/* reg_smid2_smpe_valid
+ * SMPE is valid.
+ * When not valid, the egress VID will not be modified by the SMPE table.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe_valid, 0x08, 20, 1);
+
+/* reg_smid2_smpe
+ * Switch multicast port to egress VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe, 0x08, 0, 16);
+
+/* reg_smid2_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
+
+/* reg_smid2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
+ bool set, bool smpe_valid, u16 smpe)
+{
+ MLXSW_REG_ZERO(smid2, payload);
+ mlxsw_reg_smid2_swid_set(payload, 0);
+ mlxsw_reg_smid2_mid_set(payload, mid);
+ mlxsw_reg_smid2_port_set(payload, port, set);
+ mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+ mlxsw_reg_smid2_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_smid2_smpe_set(payload, smpe_valid ? smpe : 0);
+}
+
+/* CWTP - Congetion WRED ECN TClass Profile
+ * ----------------------------------------
+ * Configures the profiles for queues of egress port and traffic class
+ */
+#define MLXSW_REG_CWTP_ID 0x2802
+#define MLXSW_REG_CWTP_BASE_LEN 0x28
+#define MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN 0x08
+#define MLXSW_REG_CWTP_LEN 0x40
+
+MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN);
+
+/* reg_cwtp_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, cwtp, 0x00, 16, 0x00, 12);
+
+/* reg_cwtp_traffic_class
+ * Traffic Class to configure
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtp, traffic_class, 32, 0, 8);
+
+/* reg_cwtp_profile_min
+ * Minimum Average Queue Size of the profile in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_min, MLXSW_REG_CWTP_BASE_LEN,
+ 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 0, false);
+
+/* reg_cwtp_profile_percent
+ * Percentage of WRED and ECN marking for maximum Average Queue size
+ * Range is 0 to 100, units of integer percentage
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_percent, MLXSW_REG_CWTP_BASE_LEN,
+ 24, 7, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false);
+
+/* reg_cwtp_profile_max
+ * Maximum Average Queue size of the profile in cells
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN,
+ 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false);
+
+#define MLXSW_REG_CWTP_MIN_VALUE 64
+#define MLXSW_REG_CWTP_MAX_PROFILE 2
+#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1
+
+static inline void mlxsw_reg_cwtp_pack(char *payload, u16 local_port,
+ u8 traffic_class)
+{
+ int i;
+
+ MLXSW_REG_ZERO(cwtp, payload);
+ mlxsw_reg_cwtp_local_port_set(payload, local_port);
+ mlxsw_reg_cwtp_traffic_class_set(payload, traffic_class);
+
+ for (i = 0; i <= MLXSW_REG_CWTP_MAX_PROFILE; i++) {
+ mlxsw_reg_cwtp_profile_min_set(payload, i,
+ MLXSW_REG_CWTP_MIN_VALUE);
+ mlxsw_reg_cwtp_profile_max_set(payload, i,
+ MLXSW_REG_CWTP_MIN_VALUE);
+ }
+}
+
+#define MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile) (profile - 1)
+
+static inline void
+mlxsw_reg_cwtp_profile_pack(char *payload, u8 profile, u32 min, u32 max,
+ u32 probability)
+{
+ u8 index = MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile);
+
+ mlxsw_reg_cwtp_profile_min_set(payload, index, min);
+ mlxsw_reg_cwtp_profile_max_set(payload, index, max);
+ mlxsw_reg_cwtp_profile_percent_set(payload, index, probability);
+}
+
+/* CWTPM - Congestion WRED ECN TClass and Pool Mapping
+ * ---------------------------------------------------
+ * The CWTPM register maps each egress port and traffic class to profile num.
+ */
+#define MLXSW_REG_CWTPM_ID 0x2803
+#define MLXSW_REG_CWTPM_LEN 0x44
+
+MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN);
+
+/* reg_cwtpm_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, cwtpm, 0x00, 16, 0x00, 12);
+
+/* reg_cwtpm_traffic_class
+ * Traffic Class to configure
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtpm, traffic_class, 32, 0, 8);
+
+/* reg_cwtpm_ew
+ * Control enablement of WRED for traffic class:
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ew, 36, 1, 1);
+
+/* reg_cwtpm_ee
+ * Control enablement of ECN for traffic class:
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ee, 36, 0, 1);
+
+/* reg_cwtpm_tcp_g
+ * TCP Green Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_g, 52, 0, 2);
+
+/* reg_cwtpm_tcp_y
+ * TCP Yellow Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_y, 56, 16, 2);
+
+/* reg_cwtpm_tcp_r
+ * TCP Red Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_r, 56, 0, 2);
+
+/* reg_cwtpm_ntcp_g
+ * Non-TCP Green Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_g, 60, 0, 2);
+
+/* reg_cwtpm_ntcp_y
+ * Non-TCP Yellow Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_y, 64, 16, 2);
+
+/* reg_cwtpm_ntcp_r
+ * Non-TCP Red Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2);
+
+#define MLXSW_REG_CWTPM_RESET_PROFILE 0
+
+static inline void mlxsw_reg_cwtpm_pack(char *payload, u16 local_port,
+ u8 traffic_class, u8 profile,
+ bool wred, bool ecn)
+{
+ MLXSW_REG_ZERO(cwtpm, payload);
+ mlxsw_reg_cwtpm_local_port_set(payload, local_port);
+ mlxsw_reg_cwtpm_traffic_class_set(payload, traffic_class);
+ mlxsw_reg_cwtpm_ew_set(payload, wred);
+ mlxsw_reg_cwtpm_ee_set(payload, ecn);
+ mlxsw_reg_cwtpm_tcp_g_set(payload, profile);
+ mlxsw_reg_cwtpm_tcp_y_set(payload, profile);
+ mlxsw_reg_cwtpm_tcp_r_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_g_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_y_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
+}
+
+/* PGCR - Policy-Engine General Configuration Register
+ * ---------------------------------------------------
+ * This register configures general Policy-Engine settings.
+ */
+#define MLXSW_REG_PGCR_ID 0x3001
+#define MLXSW_REG_PGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN);
+
+/* reg_pgcr_default_action_pointer_base
+ * Default action pointer base. Each region has a default action pointer
+ * which is equal to default_action_pointer_base + region_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base)
+{
+ MLXSW_REG_ZERO(pgcr, payload);
+ mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base);
+}
+
+/* PPBT - Policy-Engine Port Binding Table
+ * ---------------------------------------
+ * This register is used for configuration of the Port Binding Table.
+ */
+#define MLXSW_REG_PPBT_ID 0x3002
+#define MLXSW_REG_PPBT_LEN 0x14
+
+MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN);
+
+enum mlxsw_reg_pxbt_e {
+ MLXSW_REG_PXBT_E_IACL,
+ MLXSW_REG_PXBT_E_EACL,
+};
+
+/* reg_ppbt_e
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1);
+
+enum mlxsw_reg_pxbt_op {
+ MLXSW_REG_PXBT_OP_BIND,
+ MLXSW_REG_PXBT_OP_UNBIND,
+};
+
+/* reg_ppbt_op
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3);
+
+/* reg_ppbt_local_port
+ * Local port. Not including CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, ppbt, 0x00, 16, 0x00, 12);
+
+/* reg_ppbt_g
+ * group - When set, the binding is of an ACL group. When cleared,
+ * the binding is of an ACL.
+ * Must be set to 1 for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1);
+
+/* reg_ppbt_acl_info
+ * ACL/ACL group identifier. If the g bit is set, this field should hold
+ * the acl_group_id, else it should hold the acl_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16);
+
+static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
+ enum mlxsw_reg_pxbt_op op,
+ u16 local_port, u16 acl_info)
+{
+ MLXSW_REG_ZERO(ppbt, payload);
+ mlxsw_reg_ppbt_e_set(payload, e);
+ mlxsw_reg_ppbt_op_set(payload, op);
+ mlxsw_reg_ppbt_local_port_set(payload, local_port);
+ mlxsw_reg_ppbt_g_set(payload, true);
+ mlxsw_reg_ppbt_acl_info_set(payload, acl_info);
+}
+
+/* PACL - Policy-Engine ACL Register
+ * ---------------------------------
+ * This register is used for configuration of the ACL.
+ */
+#define MLXSW_REG_PACL_ID 0x3004
+#define MLXSW_REG_PACL_LEN 0x70
+
+MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN);
+
+/* reg_pacl_v
+ * Valid. Setting the v bit makes the ACL valid. It should not be cleared
+ * while the ACL is bounded to either a port, VLAN or ACL rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1);
+
+/* reg_pacl_acl_id
+ * An identifier representing the ACL (managed by software)
+ * Range 0 .. cap_max_acl_regions - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16);
+
+#define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16
+
+/* reg_pacl_tcam_region_info
+ * Opaque object that represents a TCAM region.
+ * Obtained through PTAR register.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id,
+ bool valid, const char *tcam_region_info)
+{
+ MLXSW_REG_ZERO(pacl, payload);
+ mlxsw_reg_pacl_acl_id_set(payload, acl_id);
+ mlxsw_reg_pacl_v_set(payload, valid);
+ mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+/* PAGT - Policy-Engine ACL Group Table
+ * ------------------------------------
+ * This register is used for configuration of the ACL Group Table.
+ */
+#define MLXSW_REG_PAGT_ID 0x3005
+#define MLXSW_REG_PAGT_BASE_LEN 0x30
+#define MLXSW_REG_PAGT_ACL_LEN 4
+#define MLXSW_REG_PAGT_ACL_MAX_NUM 16
+#define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \
+ MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN)
+
+MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN);
+
+/* reg_pagt_size
+ * Number of ACLs in the group.
+ * Size 0 invalidates a group.
+ * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now)
+ * Total number of ACLs in all groups must be lower or equal
+ * to cap_max_acl_tot_groups
+ * Note: a group which is binded must not be invalidated
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8);
+
+/* reg_pagt_acl_group_id
+ * An identifier (numbered from 0..cap_max_acl_groups-1) representing
+ * the ACL Group identifier (managed by software).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16);
+
+/* reg_pagt_multi
+ * Multi-ACL
+ * 0 - This ACL is the last ACL in the multi-ACL
+ * 1 - This ACL is part of a multi-ACL
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pagt, multi, 0x30, 31, 1, 0x04, 0x00, false);
+
+/* reg_pagt_acl_id
+ * ACL identifier
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id)
+{
+ MLXSW_REG_ZERO(pagt, payload);
+ mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id);
+}
+
+static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index,
+ u16 acl_id, bool multi)
+{
+ u8 size = mlxsw_reg_pagt_size_get(payload);
+
+ if (index >= size)
+ mlxsw_reg_pagt_size_set(payload, index + 1);
+ mlxsw_reg_pagt_multi_set(payload, index, multi);
+ mlxsw_reg_pagt_acl_id_set(payload, index, acl_id);
+}
+
+/* PTAR - Policy-Engine TCAM Allocation Register
+ * ---------------------------------------------
+ * This register is used for allocation of regions in the TCAM.
+ * Note: Query method is not supported on this register.
+ */
+#define MLXSW_REG_PTAR_ID 0x3006
+#define MLXSW_REG_PTAR_BASE_LEN 0x20
+#define MLXSW_REG_PTAR_KEY_ID_LEN 1
+#define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16
+#define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \
+ MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN)
+
+MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN);
+
+enum mlxsw_reg_ptar_op {
+ /* allocate a TCAM region */
+ MLXSW_REG_PTAR_OP_ALLOC,
+ /* resize a TCAM region */
+ MLXSW_REG_PTAR_OP_RESIZE,
+ /* deallocate TCAM region */
+ MLXSW_REG_PTAR_OP_FREE,
+ /* test allocation */
+ MLXSW_REG_PTAR_OP_TEST,
+};
+
+/* reg_ptar_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
+
+/* reg_ptar_action_set_type
+ * Type of action set to be used on this region.
+ * For Spectrum and Spectrum-2, this is always type 2 - "flexible"
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+
+enum mlxsw_reg_ptar_key_type {
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */
+};
+
+/* reg_ptar_key_type
+ * TCAM key type for the region.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
+
+/* reg_ptar_region_size
+ * TCAM region size. When allocating/resizing this is the requested size,
+ * the response is the actual size. Note that actual size may be
+ * larger than requested.
+ * Allowed range 1 .. cap_max_rules-1
+ * Reserved during op deallocate.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16);
+
+/* reg_ptar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16);
+
+/* reg_ptar_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Returned when allocating a region.
+ * Provided by software for ACL generation and region deallocation and resize.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptar_flexible_key_id
+ * Identifier of the Flexible Key.
+ * Only valid if key_type == "FLEX_KEY"
+ * The key size will be rounded up to one of the following values:
+ * 9B, 18B, 36B, 54B.
+ * This field is reserved for in resize operation.
+ * Access: WO
+ */
+MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
+ MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
+
+static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+ enum mlxsw_reg_ptar_key_type key_type,
+ u16 region_size, u16 region_id,
+ const char *tcam_region_info)
+{
+ MLXSW_REG_ZERO(ptar, payload);
+ mlxsw_reg_ptar_op_set(payload, op);
+ mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
+ mlxsw_reg_ptar_key_type_set(payload, key_type);
+ mlxsw_reg_ptar_region_size_set(payload, region_size);
+ mlxsw_reg_ptar_region_id_set(payload, region_id);
+ mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index,
+ u16 key_id)
+{
+ mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id);
+}
+
+static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info)
+{
+ mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info);
+}
+
+/* PPBS - Policy-Engine Policy Based Switching Register
+ * ----------------------------------------------------
+ * This register retrieves and sets Policy Based Switching Table entries.
+ */
+#define MLXSW_REG_PPBS_ID 0x300C
+#define MLXSW_REG_PPBS_LEN 0x14
+
+MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN);
+
+/* reg_ppbs_pbs_ptr
+ * Index into the PBS table.
+ * For Spectrum, the index points to the KVD Linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24);
+
+/* reg_ppbs_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16);
+
+static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr,
+ u16 system_port)
+{
+ MLXSW_REG_ZERO(ppbs, payload);
+ mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr);
+ mlxsw_reg_ppbs_system_port_set(payload, system_port);
+}
+
+/* PRCR - Policy-Engine Rules Copy Register
+ * ----------------------------------------
+ * This register is used for accessing rules within a TCAM region.
+ */
+#define MLXSW_REG_PRCR_ID 0x300D
+#define MLXSW_REG_PRCR_LEN 0x40
+
+MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN);
+
+enum mlxsw_reg_prcr_op {
+ /* Move rules. Moves the rules from "tcam_region_info" starting
+ * at offset "offset" to "dest_tcam_region_info"
+ * at offset "dest_offset."
+ */
+ MLXSW_REG_PRCR_OP_MOVE,
+ /* Copy rules. Copies the rules from "tcam_region_info" starting
+ * at offset "offset" to "dest_tcam_region_info"
+ * at offset "dest_offset."
+ */
+ MLXSW_REG_PRCR_OP_COPY,
+};
+
+/* reg_prcr_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4);
+
+/* reg_prcr_offset
+ * Offset within the source region to copy/move from.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16);
+
+/* reg_prcr_size
+ * The number of rules to copy/move.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16);
+
+/* reg_prcr_tcam_region_info
+ * Opaque object that represents the source TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_prcr_dest_offset
+ * Offset within the source region to copy/move to.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16);
+
+/* reg_prcr_dest_tcam_region_info
+ * Opaque object that represents the destination TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op,
+ const char *src_tcam_region_info,
+ u16 src_offset,
+ const char *dest_tcam_region_info,
+ u16 dest_offset, u16 size)
+{
+ MLXSW_REG_ZERO(prcr, payload);
+ mlxsw_reg_prcr_op_set(payload, op);
+ mlxsw_reg_prcr_offset_set(payload, src_offset);
+ mlxsw_reg_prcr_size_set(payload, size);
+ mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload,
+ src_tcam_region_info);
+ mlxsw_reg_prcr_dest_offset_set(payload, dest_offset);
+ mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload,
+ dest_tcam_region_info);
+}
+
+/* PEFA - Policy-Engine Extended Flexible Action Register
+ * ------------------------------------------------------
+ * This register is used for accessing an extended flexible action entry
+ * in the central KVD Linear Database.
+ */
+#define MLXSW_REG_PEFA_ID 0x300F
+#define MLXSW_REG_PEFA_LEN 0xB0
+
+MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
+
+/* reg_pefa_index
+ * Index in the KVD Linear Centralized Database.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+
+/* reg_pefa_a
+ * Index in the KVD Linear Centralized Database.
+ * Activity
+ * For a new entry: set if ca=0, clear if ca=1
+ * Set if a packet lookup has hit on the specific entry
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1);
+
+/* reg_pefa_ca
+ * Clear activity
+ * When write: activity is according to this field
+ * When read: after reading the activity is cleared according to ca
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1);
+
+#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
+
+/* reg_pefa_flex_action_set
+ * Action-set to perform when rule is matched.
+ * Must be zero padded if action set is shorter.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
+
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca,
+ const char *flex_action_set)
+{
+ MLXSW_REG_ZERO(pefa, payload);
+ mlxsw_reg_pefa_index_set(payload, index);
+ mlxsw_reg_pefa_ca_set(payload, ca);
+ if (flex_action_set)
+ mlxsw_reg_pefa_flex_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
+{
+ *p_a = mlxsw_reg_pefa_a_get(payload);
+}
+
+/* PEMRBT - Policy-Engine Multicast Router Binding Table Register
+ * --------------------------------------------------------------
+ * This register is used for binding Multicast router to an ACL group
+ * that serves the MC router.
+ * This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PEMRBT_ID 0x3014
+#define MLXSW_REG_PEMRBT_LEN 0x14
+
+MLXSW_REG_DEFINE(pemrbt, MLXSW_REG_PEMRBT_ID, MLXSW_REG_PEMRBT_LEN);
+
+enum mlxsw_reg_pemrbt_protocol {
+ MLXSW_REG_PEMRBT_PROTO_IPV4,
+ MLXSW_REG_PEMRBT_PROTO_IPV6,
+};
+
+/* reg_pemrbt_protocol
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pemrbt, protocol, 0x00, 0, 1);
+
+/* reg_pemrbt_group_id
+ * ACL group identifier.
+ * Range 0..cap_max_acl_groups-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pemrbt, group_id, 0x10, 0, 16);
+
+static inline void
+mlxsw_reg_pemrbt_pack(char *payload, enum mlxsw_reg_pemrbt_protocol protocol,
+ u16 group_id)
+{
+ MLXSW_REG_ZERO(pemrbt, payload);
+ mlxsw_reg_pemrbt_protocol_set(payload, protocol);
+ mlxsw_reg_pemrbt_group_id_set(payload, group_id);
+}
+
+/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
+ * -----------------------------------------------------
+ * This register is used for accessing rules within a TCAM region.
+ * It is a new version of PTCE in order to support wider key,
+ * mask and action within a TCAM region. This register is not supported
+ * by SwitchX and SwitchX-2.
+ */
+#define MLXSW_REG_PTCE2_ID 0x3017
+#define MLXSW_REG_PTCE2_LEN 0x1D8
+
+MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN);
+
+/* reg_ptce2_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1);
+
+/* reg_ptce2_a
+ * Activity. Set if a packet lookup has hit on the specific entry.
+ * To clear the "a" bit, use "clear activity" op or "clear on read" op.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1);
+
+enum mlxsw_reg_ptce2_op {
+ /* Read operation. */
+ MLXSW_REG_PTCE2_OP_QUERY_READ = 0,
+ /* clear on read operation. Used to read entry
+ * and clear Activity bit.
+ */
+ MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1,
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries - Note write with v = 0 will delete the entry.
+ */
+ MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0,
+ /* Update action. Only action set will be updated. */
+ MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1,
+ /* Clear activity. A bit is cleared for the entry. */
+ MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2,
+};
+
+/* reg_ptce2_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
+
+/* reg_ptce2_offset
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+
+/* reg_ptce2_priority
+ * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
+ * Note: priority does not have to be unique per rule.
+ * Within a region, higher priority should have lower offset (no limitation
+ * between regions in a multi-region).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
+
+/* reg_ptce2_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96
+
+/* reg_ptce2_flex_key_blocks
+ * ACL Key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce2_mask
+ * mask- in the same size as key. A bit that is set directs the TCAM
+ * to compare the corresponding bit in key. A bit that is clear directs
+ * the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce2_flex_action_set
+ * ACL action set.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
+ MLXSW_REG_FLEX_ACTION_SET_LEN);
+
+static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce2_op op,
+ const char *tcam_region_info,
+ u16 offset, u32 priority)
+{
+ MLXSW_REG_ZERO(ptce2, payload);
+ mlxsw_reg_ptce2_v_set(payload, valid);
+ mlxsw_reg_ptce2_op_set(payload, op);
+ mlxsw_reg_ptce2_offset_set(payload, offset);
+ mlxsw_reg_ptce2_priority_set(payload, priority);
+ mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+/* PERPT - Policy-Engine ERP Table Register
+ * ----------------------------------------
+ * This register adds and removes eRPs from the eRP table.
+ */
+#define MLXSW_REG_PERPT_ID 0x3021
+#define MLXSW_REG_PERPT_LEN 0x80
+
+MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN);
+
+/* reg_perpt_erpt_bank
+ * eRP table bank.
+ * Range 0 .. cap_max_erp_table_banks - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4);
+
+/* reg_perpt_erpt_index
+ * Index to eRP table within the eRP bank.
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8);
+
+enum mlxsw_reg_perpt_key_size {
+ MLXSW_REG_PERPT_KEY_SIZE_2KB,
+ MLXSW_REG_PERPT_KEY_SIZE_4KB,
+ MLXSW_REG_PERPT_KEY_SIZE_8KB,
+ MLXSW_REG_PERPT_KEY_SIZE_12KB,
+};
+
+/* reg_perpt_key_size
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4);
+
+/* reg_perpt_bf_bypass
+ * 0 - The eRP is used only if bloom filter state is set for the given
+ * rule.
+ * 1 - The eRP is used regardless of bloom filter state.
+ * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1);
+
+/* reg_perpt_erp_id
+ * eRP ID for use by the rules.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4);
+
+/* reg_perpt_erpt_base_bank
+ * Base eRP table bank, points to head of erp_vector
+ * Range is 0 .. cap_max_erp_table_banks - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4);
+
+/* reg_perpt_erpt_base_index
+ * Base index to eRP table within the eRP bank
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8);
+
+/* reg_perpt_erp_index_in_vector
+ * eRP index in the vector.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4);
+
+/* reg_perpt_erp_vector
+ * eRP vector.
+ * Access: OP
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1);
+
+/* reg_perpt_mask
+ * Mask
+ * 0 - A-TCAM will ignore the bit in key
+ * 1 - A-TCAM will compare the bit in key
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_perpt_erp_vector_set(payload, bit, true);
+}
+
+static inline void
+mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
+ enum mlxsw_reg_perpt_key_size key_size, u8 erp_id,
+ u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index,
+ char *mask)
+{
+ MLXSW_REG_ZERO(perpt, payload);
+ mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
+ mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
+ mlxsw_reg_perpt_key_size_set(payload, key_size);
+ mlxsw_reg_perpt_bf_bypass_set(payload, false);
+ mlxsw_reg_perpt_erp_id_set(payload, erp_id);
+ mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
+ mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
+ mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index);
+ mlxsw_reg_perpt_mask_memcpy_to(payload, mask);
+}
+
+/* PERAR - Policy-Engine Region Association Register
+ * -------------------------------------------------
+ * This register associates a hw region for region_id's. Changing on the fly
+ * is supported by the device.
+ */
+#define MLXSW_REG_PERAR_ID 0x3026
+#define MLXSW_REG_PERAR_LEN 0x08
+
+MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN);
+
+/* reg_perar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16);
+
+static inline unsigned int
+mlxsw_reg_perar_hw_regions_needed(unsigned int block_num)
+{
+ return DIV_ROUND_UP(block_num, 4);
+}
+
+/* reg_perar_hw_region
+ * HW Region
+ * Range 0 .. cap_max_regions-1
+ * Default: hw_region = region_id
+ * For a 8 key block region, 2 consecutive regions are used
+ * For a 12 key block region, 3 consecutive regions are used
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16);
+
+static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
+ u16 hw_region)
+{
+ MLXSW_REG_ZERO(perar, payload);
+ mlxsw_reg_perar_region_id_set(payload, region_id);
+ mlxsw_reg_perar_hw_region_set(payload, hw_region);
+}
+
+/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3
+ * -----------------------------------------------------
+ * This register is a new version of PTCE-V2 in order to support the
+ * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PTCE3_ID 0x3027
+#define MLXSW_REG_PTCE3_LEN 0xF0
+
+MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN);
+
+/* reg_ptce3_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1);
+
+enum mlxsw_reg_ptce3_op {
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries. Write with v = 0 will delete the entry. Must
+ * not be used if an entry exists.
+ */
+ MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0,
+ /* Update operation */
+ MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1,
+ /* Read operation */
+ MLXSW_REG_PTCE3_OP_QUERY_READ = 0,
+};
+
+/* reg_ptce3_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3);
+
+/* reg_ptce3_priority
+ * Priority of the rule. Higher values win.
+ * For Spectrum-2 range is 1..cap_kvd_size - 1
+ * Note: Priority does not have to be unique per rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24);
+
+/* reg_ptce3_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptce3_flex2_key_blocks
+ * ACL key. The key must be masked according to eRP (if exists) or
+ * according to master mask.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce3_erp_id
+ * eRP ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4);
+
+/* reg_ptce3_delta_start
+ * Start point of delta_value and delta_mask, in bits. Must not exceed
+ * num_key_blocks * 36 - 8. Reserved when delta_mask = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10);
+
+/* reg_ptce3_delta_mask
+ * Delta mask.
+ * 0 - Ignore relevant bit in delta_value
+ * 1 - Compare relevant bit in delta_value
+ * Delta mask must not be set for reserved fields in the key blocks.
+ * Note: No delta when no eRPs. Thus, for regions with
+ * PERERP.erpt_pointer_valid = 0 the delta mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8);
+
+/* reg_ptce3_delta_value
+ * Delta value.
+ * Bits which are masked by delta_mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8);
+
+/* reg_ptce3_prune_vector
+ * Pruning vector relative to the PERPT.erp_id.
+ * Used for reducing lookups.
+ * 0 - NEED: Do a lookup using the eRP.
+ * 1 - PRUNE: Do not perform a lookup using the eRP.
+ * Maybe be modified by PEAPBL and PEAPBM.
+ * Note: In Spectrum-2, a region of 8 key blocks must be set to either
+ * all 1's or all 0's.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1);
+
+/* reg_ptce3_prune_ctcam
+ * Pruning on C-TCAM. Used for reducing lookups.
+ * 0 - NEED: Do a lookup in the C-TCAM.
+ * 1 - PRUNE: Do not perform a lookup in the C-TCAM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1);
+
+/* reg_ptce3_large_exists
+ * Large entry key ID exists.
+ * Within the region:
+ * 0 - SINGLE: The large_entry_key_id is not currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will be added.
+ * For rule delete: The MSB of the key will be removed.
+ * 1 - NON_SINGLE: The large_entry_key_id is currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will not be added.
+ * For rule delete: The MSB of the key will not be removed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1);
+
+/* reg_ptce3_large_entry_key_id
+ * Large entry key ID.
+ * A key for 12 key blocks rules. Reserved when region has less than 12 key
+ * blocks. Must be different for different keys which have the same common
+ * 6 key blocks (MSB, blocks 6..11) key within a region.
+ * Range is 0..cap_max_pe_large_key_id - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24);
+
+/* reg_ptce3_action_pointer
+ * Pointer to action.
+ * Range is 0..cap_max_kvd_action_sets - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24);
+
+static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce3_op op,
+ u32 priority,
+ const char *tcam_region_info,
+ const char *key, u8 erp_id,
+ u16 delta_start, u8 delta_mask,
+ u8 delta_value, bool large_exists,
+ u32 lkey_id, u32 action_pointer)
+{
+ MLXSW_REG_ZERO(ptce3, payload);
+ mlxsw_reg_ptce3_v_set(payload, valid);
+ mlxsw_reg_ptce3_op_set(payload, op);
+ mlxsw_reg_ptce3_priority_set(payload, priority);
+ mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
+ mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
+ mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
+ mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
+ mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
+ mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
+ mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
+ mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
+}
+
+/* PERCR - Policy-Engine Region Configuration Register
+ * ---------------------------------------------------
+ * This register configures the region parameters. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERCR_ID 0x302A
+#define MLXSW_REG_PERCR_LEN 0x80
+
+MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN);
+
+/* reg_percr_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16);
+
+/* reg_percr_atcam_ignore_prune
+ * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1);
+
+/* reg_percr_ctcam_ignore_prune
+ * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1);
+
+/* reg_percr_bf_bypass
+ * Bloom filter bypass.
+ * 0 - Bloom filter is used (default)
+ * 1 - Bloom filter is bypassed. The bypass is an OR condition of
+ * region_id or eRP. See PERPT.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1);
+
+/* reg_percr_master_mask
+ * Master mask. Logical OR mask of all masks of all rules of a region
+ * (both A-TCAM and C-TCAM). When there are no eRPs
+ * (erpt_pointer_valid = 0), then this provides the mask.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96);
+
+static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
+{
+ MLXSW_REG_ZERO(percr, payload);
+ mlxsw_reg_percr_region_id_set(payload, region_id);
+ mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_bf_bypass_set(payload, false);
+}
+
+/* PERERP - Policy-Engine Region eRP Register
+ * ------------------------------------------
+ * This register configures the region eRP. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERERP_ID 0x302B
+#define MLXSW_REG_PERERP_LEN 0x1C
+
+MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN);
+
+/* reg_pererp_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16);
+
+/* reg_pererp_ctcam_le
+ * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1);
+
+/* reg_pererp_erpt_pointer_valid
+ * erpt_pointer is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1);
+
+/* reg_pererp_erpt_bank_pointer
+ * Pointer to eRP table bank. May be modified at any time.
+ * Range 0..cap_max_erp_table_banks-1
+ * Reserved when erpt_pointer_valid = 0
+ */
+MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4);
+
+/* reg_pererp_erpt_pointer
+ * Pointer to eRP table within the eRP bank. Can be changed for an
+ * existing region.
+ * Range 0..cap_max_erp_table_size-1
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8);
+
+/* reg_pererp_erpt_vector
+ * Vector of allowed eRP indexes starting from erpt_pointer within the
+ * erpt_bank_pointer. Next entries will be in next bank.
+ * Note that eRP index is used and not eRP ID.
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
+
+/* reg_pererp_master_rp_id
+ * Master RP ID. When there are no eRPs, then this provides the eRP ID
+ * for the lookup. Can be changed for an existing region.
+ * Reserved when erpt_pointer_valid = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
+
+static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_pererp_erpt_vector_set(payload, bit, true);
+}
+
+static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
+ bool ctcam_le, bool erpt_pointer_valid,
+ u8 erpt_bank_pointer, u8 erpt_pointer,
+ u8 master_rp_id)
+{
+ MLXSW_REG_ZERO(pererp, payload);
+ mlxsw_reg_pererp_region_id_set(payload, region_id);
+ mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le);
+ mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid);
+ mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer);
+ mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer);
+ mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
+}
+
+/* PEABFE - Policy-Engine Algorithmic Bloom Filter Entries Register
+ * ----------------------------------------------------------------
+ * This register configures the Bloom filter entries.
+ */
+#define MLXSW_REG_PEABFE_ID 0x3022
+#define MLXSW_REG_PEABFE_BASE_LEN 0x10
+#define MLXSW_REG_PEABFE_BF_REC_LEN 0x4
+#define MLXSW_REG_PEABFE_BF_REC_MAX_COUNT 256
+#define MLXSW_REG_PEABFE_LEN (MLXSW_REG_PEABFE_BASE_LEN + \
+ MLXSW_REG_PEABFE_BF_REC_LEN * \
+ MLXSW_REG_PEABFE_BF_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(peabfe, MLXSW_REG_PEABFE_ID, MLXSW_REG_PEABFE_LEN);
+
+/* reg_peabfe_size
+ * Number of BF entries to be updated.
+ * Range 1..256
+ * Access: Op
+ */
+MLXSW_ITEM32(reg, peabfe, size, 0x00, 0, 9);
+
+/* reg_peabfe_bf_entry_state
+ * Bloom filter state
+ * 0 - Clear
+ * 1 - Set
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_state,
+ MLXSW_REG_PEABFE_BASE_LEN, 31, 1,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+/* reg_peabfe_bf_entry_bank
+ * Bloom filter bank ID
+ * Range 0..cap_max_erp_table_banks-1
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_bank,
+ MLXSW_REG_PEABFE_BASE_LEN, 24, 4,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+/* reg_peabfe_bf_entry_index
+ * Bloom filter entry index
+ * Range 0..2^cap_max_bf_log-1
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_index,
+ MLXSW_REG_PEABFE_BASE_LEN, 0, 24,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_peabfe_pack(char *payload)
+{
+ MLXSW_REG_ZERO(peabfe, payload);
+}
+
+static inline void mlxsw_reg_peabfe_rec_pack(char *payload, int rec_index,
+ u8 state, u8 bank, u32 bf_index)
+{
+ u8 num_rec = mlxsw_reg_peabfe_size_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_peabfe_size_set(payload, rec_index + 1);
+ mlxsw_reg_peabfe_bf_entry_state_set(payload, rec_index, state);
+ mlxsw_reg_peabfe_bf_entry_bank_set(payload, rec_index, bank);
+ mlxsw_reg_peabfe_bf_entry_index_set(payload, rec_index, bf_index);
+}
+
+/* IEDR - Infrastructure Entry Delete Register
+ * ----------------------------------------------------
+ * This register is used for deleting entries from the entry tables.
+ * It is legitimate to attempt to delete a nonexisting entry (the device will
+ * respond as a good flow).
+ */
+#define MLXSW_REG_IEDR_ID 0x3804
+#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_IEDR_REC_MAX_COUNT 64
+#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \
+ MLXSW_REG_IEDR_REC_LEN * \
+ MLXSW_REG_IEDR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN);
+
+/* reg_iedr_num_rec
+ * Number of records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8);
+
+/* reg_iedr_rec_type
+ * Resource type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_size
+ * Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 13,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_index_start
+ * Resource index start.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24,
+ MLXSW_REG_IEDR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_iedr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(iedr, payload);
+}
+
+static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index,
+ u8 rec_type, u16 rec_size,
+ u32 rec_index_start)
+{
+ u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type);
+ mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size);
+ mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start);
+}
+
+/* QPTS - QoS Priority Trust State Register
+ * ----------------------------------------
+ * This register controls the port policy to calculate the switch priority and
+ * packet color based on incoming packet fields.
+ */
+#define MLXSW_REG_QPTS_ID 0x4002
+#define MLXSW_REG_QPTS_LEN 0x8
+
+MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
+
+/* reg_qpts_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32_LP(reg, qpts, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_qpts_trust_state {
+ MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */
+};
+
+/* reg_qpts_trust_state
+ * Trust state for a given port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
+
+static inline void mlxsw_reg_qpts_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ MLXSW_REG_ZERO(qpts, payload);
+
+ mlxsw_reg_qpts_local_port_set(payload, local_port);
+ mlxsw_reg_qpts_trust_state_set(payload, ts);
+}
+
+/* QPCR - QoS Policer Configuration Register
+ * -----------------------------------------
+ * The QPCR register is used to create policers - that limit
+ * the rate of bytes or packets via some trap group.
+ */
+#define MLXSW_REG_QPCR_ID 0x4004
+#define MLXSW_REG_QPCR_LEN 0x28
+
+MLXSW_REG_DEFINE(qpcr, MLXSW_REG_QPCR_ID, MLXSW_REG_QPCR_LEN);
+
+enum mlxsw_reg_qpcr_g {
+ MLXSW_REG_QPCR_G_GLOBAL = 2,
+ MLXSW_REG_QPCR_G_STORM_CONTROL = 3,
+};
+
+/* reg_qpcr_g
+ * The policer type.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpcr, g, 0x00, 14, 2);
+
+/* reg_qpcr_pid
+ * Policer ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpcr, pid, 0x00, 0, 14);
+
+/* reg_qpcr_clear_counter
+ * Clear counters.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, qpcr, clear_counter, 0x04, 31, 1);
+
+/* reg_qpcr_color_aware
+ * Is the policer aware of colors.
+ * Must be 0 (unaware) for cpu port.
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, color_aware, 0x04, 15, 1);
+
+/* reg_qpcr_bytes
+ * Is policer limit is for bytes per sec or packets per sec.
+ * 0 - packets
+ * 1 - bytes
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, bytes, 0x04, 14, 1);
+
+enum mlxsw_reg_qpcr_ir_units {
+ MLXSW_REG_QPCR_IR_UNITS_M,
+ MLXSW_REG_QPCR_IR_UNITS_K,
+};
+
+/* reg_qpcr_ir_units
+ * Policer's units for cir and eir fields (for bytes limits only)
+ * 1 - 10^3
+ * 0 - 10^6
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, qpcr, ir_units, 0x04, 12, 1);
+
+enum mlxsw_reg_qpcr_rate_type {
+ MLXSW_REG_QPCR_RATE_TYPE_SINGLE = 1,
+ MLXSW_REG_QPCR_RATE_TYPE_DOUBLE = 2,
+};
+
+/* reg_qpcr_rate_type
+ * Policer can have one limit (single rate) or 2 limits with specific operation
+ * for packets that exceed the lower rate but not the upper one.
+ * (For cpu port must be single rate)
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, rate_type, 0x04, 8, 2);
+
+/* reg_qpc_cbs
+ * Policer's committed burst size.
+ * The policer is working with time slices of 50 nano sec. By default every
+ * slice is granted the proportionate share of the committed rate. If we want to
+ * allow a slice to exceed that share (while still keeping the rate per sec) we
+ * can allow burst. The burst size is between the default proportionate share
+ * (and no lower than 8) to 32Gb. (Even though giving a number higher than the
+ * committed rate will result in exceeding the rate). The burst size must be a
+ * log of 2 and will be determined by 2^cbs.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpcr, cbs, 0x08, 24, 6);
+
+/* reg_qpcr_cir
+ * Policer's committed rate.
+ * The rate used for sungle rate, the lower rate for double rate.
+ * For bytes limits, the rate will be this value * the unit from ir_units.
+ * (Resolution error is up to 1%).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpcr, cir, 0x0C, 0, 32);
+
+/* reg_qpcr_eir
+ * Policer's exceed rate.
+ * The higher rate for double rate, reserved for single rate.
+ * Lower rate for double rate policer.
+ * For bytes limits, the rate will be this value * the unit from ir_units.
+ * (Resolution error is up to 1%).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpcr, eir, 0x10, 0, 32);
+
+#define MLXSW_REG_QPCR_DOUBLE_RATE_ACTION 2
+
+/* reg_qpcr_exceed_action.
+ * What to do with packets between the 2 limits for double rate.
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, exceed_action, 0x14, 0, 4);
+
+enum mlxsw_reg_qpcr_action {
+ /* Discard */
+ MLXSW_REG_QPCR_ACTION_DISCARD = 1,
+ /* Forward and set color to red.
+ * If the packet is intended to cpu port, it will be dropped.
+ */
+ MLXSW_REG_QPCR_ACTION_FORWARD = 2,
+};
+
+/* reg_qpcr_violate_action
+ * What to do with packets that cross the cir limit (for single rate) or the eir
+ * limit (for double rate).
+ * Access: RW for unbounded policer. RO for bounded policer.
+ */
+MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4);
+
+/* reg_qpcr_violate_count
+ * Counts the number of times violate_action happened on this PID.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, qpcr, violate_count, 0x20, 0, 64);
+
+/* Packets */
+#define MLXSW_REG_QPCR_LOWEST_CIR 1
+#define MLXSW_REG_QPCR_HIGHEST_CIR (2 * 1000 * 1000 * 1000) /* 2Gpps */
+#define MLXSW_REG_QPCR_LOWEST_CBS 4
+#define MLXSW_REG_QPCR_HIGHEST_CBS 24
+
+/* Bandwidth */
+#define MLXSW_REG_QPCR_LOWEST_CIR_BITS 1024 /* bps */
+#define MLXSW_REG_QPCR_HIGHEST_CIR_BITS 2000000000000ULL /* 2Tbps */
+#define MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP1 4
+#define MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP2 4
+#define MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP1 25
+#define MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP2 31
+
+static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid,
+ enum mlxsw_reg_qpcr_ir_units ir_units,
+ bool bytes, u32 cir, u16 cbs)
+{
+ MLXSW_REG_ZERO(qpcr, payload);
+ mlxsw_reg_qpcr_pid_set(payload, pid);
+ mlxsw_reg_qpcr_g_set(payload, MLXSW_REG_QPCR_G_GLOBAL);
+ mlxsw_reg_qpcr_rate_type_set(payload, MLXSW_REG_QPCR_RATE_TYPE_SINGLE);
+ mlxsw_reg_qpcr_violate_action_set(payload,
+ MLXSW_REG_QPCR_ACTION_DISCARD);
+ mlxsw_reg_qpcr_cir_set(payload, cir);
+ mlxsw_reg_qpcr_ir_units_set(payload, ir_units);
+ mlxsw_reg_qpcr_bytes_set(payload, bytes);
+ mlxsw_reg_qpcr_cbs_set(payload, cbs);
+}
+
+/* QTCT - QoS Switch Traffic Class Table
+ * -------------------------------------
+ * Configures the mapping between the packet switch priority and the
+ * traffic class on the transmit port.
+ */
+#define MLXSW_REG_QTCT_ID 0x400A
+#define MLXSW_REG_QTCT_LEN 0x08
+
+MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN);
+
+/* reg_qtct_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32_LP(reg, qtct, 0x00, 16, 0x00, 12);
+
+/* reg_qtct_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
+
+/* reg_qtct_switch_prio
+ * Switch priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
+
+/* reg_qtct_tclass
+ * Traffic class.
+ * Default values:
+ * switch_prio 0 : tclass 1
+ * switch_prio 1 : tclass 0
+ * switch_prio i : tclass i, for i > 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qtct_pack(char *payload, u16 local_port,
+ u8 switch_prio, u8 tclass)
+{
+ MLXSW_REG_ZERO(qtct, payload);
+ mlxsw_reg_qtct_local_port_set(payload, local_port);
+ mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
+ mlxsw_reg_qtct_tclass_set(payload, tclass);
+}
+
+/* QEEC - QoS ETS Element Configuration Register
+ * ---------------------------------------------
+ * Configures the ETS elements.
+ */
+#define MLXSW_REG_QEEC_ID 0x400D
+#define MLXSW_REG_QEEC_LEN 0x20
+
+MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
+
+/* reg_qeec_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32_LP(reg, qeec, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_qeec_hr {
+ MLXSW_REG_QEEC_HR_PORT,
+ MLXSW_REG_QEEC_HR_GROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ MLXSW_REG_QEEC_HR_TC,
+};
+
+/* reg_qeec_element_hierarchy
+ * 0 - Port
+ * 1 - Group
+ * 2 - Subgroup
+ * 3 - Traffic Class
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
+
+/* reg_qeec_element_index
+ * The index of the element in the hierarchy.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
+
+/* reg_qeec_next_element_index
+ * The index of the next (lower) element in the hierarchy.
+ * Access: RW
+ *
+ * Note: Reserved for element_hierarchy 0.
+ */
+MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+
+/* reg_qeec_mise
+ * Min shaper configuration enable. Enables configuration of the min
+ * shaper on this ETS element
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
+
+/* reg_qeec_ptps
+ * PTP shaper
+ * 0: regular shaper mode
+ * 1: PTP oriented shaper
+ * Allowed only for hierarchy 0
+ * Not supported for CPU port
+ * Note that ptps mode may affect the shaper rates of all hierarchies
+ * Supported only on Spectrum-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, ptps, 0x0C, 29, 1);
+
+enum {
+ MLXSW_REG_QEEC_BYTES_MODE,
+ MLXSW_REG_QEEC_PACKETS_MODE,
+};
+
+/* reg_qeec_pb
+ * Packets or bytes mode.
+ * 0 - Bytes mode
+ * 1 - Packets mode
+ * Access: RW
+ *
+ * Note: Used for max shaper configuration. For Spectrum, packets mode
+ * is supported only for traffic classes of CPU port.
+ */
+MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+
+/* The smallest permitted min shaper rate. */
+#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */
+
+/* reg_qeec_min_shaper_rate
+ * Min shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
+
+/* reg_qeec_mase
+ * Max shaper configuration enable. Enables configuration of the max
+ * shaper on this ETS element.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
+
+/* The largest max shaper value possible to disable the shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS ((1u << 31) - 1) /* Kbps */
+
+/* reg_qeec_max_shaper_rate
+ * Max shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 31);
+
+/* reg_qeec_de
+ * DWRR configuration enable. Enables configuration of the dwrr and
+ * dwrr_weight.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
+
+/* reg_qeec_dwrr
+ * Transmission selection algorithm to use on the link going down from
+ * the ETS element.
+ * 0 - Strict priority
+ * 1 - DWRR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
+
+/* reg_qeec_dwrr_weight
+ * DWRR weight on the link going down from the ETS element. The
+ * percentage of bandwidth guaranteed to an ETS element within
+ * its hierarchy. The sum of all weights across all ETS elements
+ * within one hierarchy should be equal to 100. Reserved when
+ * transmission selection algorithm is strict priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+
+/* reg_qeec_max_shaper_bs
+ * Max shaper burst size
+ * Burst size is 2^max_shaper_bs * 512 bits
+ * For Spectrum-1: Range is: 5..25
+ * For Spectrum-2: Range is: 11..25
+ * Reserved when ptps = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
+
+#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4 11
+
+static inline void mlxsw_reg_qeec_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index)
+{
+ MLXSW_REG_ZERO(qeec, payload);
+ mlxsw_reg_qeec_local_port_set(payload, local_port);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
+ mlxsw_reg_qeec_element_index_set(payload, index);
+ mlxsw_reg_qeec_next_element_index_set(payload, next_index);
+}
+
+static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u16 local_port,
+ bool ptps)
+{
+ MLXSW_REG_ZERO(qeec, payload);
+ mlxsw_reg_qeec_local_port_set(payload, local_port);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, MLXSW_REG_QEEC_HR_PORT);
+ mlxsw_reg_qeec_ptps_set(payload, ptps);
+}
+
+/* QRWE - QoS ReWrite Enable
+ * -------------------------
+ * This register configures the rewrite enable per receive port.
+ */
+#define MLXSW_REG_QRWE_ID 0x400F
+#define MLXSW_REG_QRWE_LEN 0x08
+
+MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
+
+/* reg_qrwe_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported. No support for router port.
+ */
+MLXSW_ITEM32_LP(reg, qrwe, 0x00, 16, 0x00, 12);
+
+/* reg_qrwe_dscp
+ * Whether to enable DSCP rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
+
+/* reg_qrwe_pcp
+ * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
+
+static inline void mlxsw_reg_qrwe_pack(char *payload, u16 local_port,
+ bool rewrite_pcp, bool rewrite_dscp)
+{
+ MLXSW_REG_ZERO(qrwe, payload);
+ mlxsw_reg_qrwe_local_port_set(payload, local_port);
+ mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp);
+ mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp);
+}
+
+/* QPDSM - QoS Priority to DSCP Mapping
+ * ------------------------------------
+ * QoS Priority to DSCP Mapping Register
+ */
+#define MLXSW_REG_QPDSM_ID 0x4011
+#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16
+#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
+
+/* reg_qpdsm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, qpdsm, 0x00, 16, 0x00, 12);
+
+/* reg_qpdsm_prio_entry_color0_e
+ * Enable update of the entry for color 0 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 31, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color0_dscp
+ * DSCP field in the outer label of the packet for color 0 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 24, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_e
+ * Enable update of the entry for color 1 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 23, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_dscp
+ * DSCP field in the outer label of the packet for color 1 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 16, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_e
+ * Enable update of the entry for color 2 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_dscp
+ * DSCP field in the outer label of the packet for color 2 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(qpdsm, payload);
+ mlxsw_reg_qpdsm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
+{
+ mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
+}
+
+/* QPDP - QoS Port DSCP to Priority Mapping Register
+ * -------------------------------------------------
+ * This register controls the port default Switch Priority and Color. The
+ * default Switch Priority and Color are used for frames where the trust state
+ * uses default values. All member ports of a LAG should be configured with the
+ * same default values.
+ */
+#define MLXSW_REG_QPDP_ID 0x4007
+#define MLXSW_REG_QPDP_LEN 0x8
+
+MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
+
+/* reg_qpdp_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, qpdp, 0x00, 16, 0x00, 12);
+
+/* reg_qpdp_switch_prio
+ * Default port Switch Priority (default 0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qpdp_pack(char *payload, u16 local_port,
+ u8 switch_prio)
+{
+ MLXSW_REG_ZERO(qpdp, payload);
+ mlxsw_reg_qpdp_local_port_set(payload, local_port);
+ mlxsw_reg_qpdp_switch_prio_set(payload, switch_prio);
+}
+
+/* QPDPM - QoS Port DSCP to Priority Mapping Register
+ * --------------------------------------------------
+ * This register controls the mapping from DSCP field to
+ * Switch Priority for IP packets.
+ */
+#define MLXSW_REG_QPDPM_ID 0x4013
+#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
+
+/* reg_qpdpm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, qpdpm, 0x00, 16, 0x00, 12);
+
+/* reg_qpdpm_dscp_e
+ * Enable update of the specific entry. When cleared, the switch_prio and color
+ * fields are ignored and the previous switch_prio and color values are
+ * preserved.
+ * Access: WO
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdpm_dscp_prio
+ * The new Switch Priority value for the relevant DSCP value.
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
+ MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(qpdpm, payload);
+ mlxsw_reg_qpdpm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio)
+{
+ mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1);
+ mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio);
+}
+
+/* QTCTM - QoS Switch Traffic Class Table is Multicast-Aware Register
+ * ------------------------------------------------------------------
+ * This register configures if the Switch Priority to Traffic Class mapping is
+ * based on Multicast packet indication. If so, then multicast packets will get
+ * a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by
+ * QTCT.
+ * By default, Switch Priority to Traffic Class mapping is not based on
+ * Multicast packet indication.
+ */
+#define MLXSW_REG_QTCTM_ID 0x401A
+#define MLXSW_REG_QTCTM_LEN 0x08
+
+MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
+
+/* reg_qtctm_local_port
+ * Local port number.
+ * No support for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, qtctm, 0x00, 16, 0x00, 12);
+
+/* reg_qtctm_mc
+ * Multicast Mode
+ * Whether Switch Priority to Traffic Class mapping is based on Multicast packet
+ * indication (default is 0, not based on Multicast packet indication).
+ */
+MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
+
+static inline void
+mlxsw_reg_qtctm_pack(char *payload, u16 local_port, bool mc)
+{
+ MLXSW_REG_ZERO(qtctm, payload);
+ mlxsw_reg_qtctm_local_port_set(payload, local_port);
+ mlxsw_reg_qtctm_mc_set(payload, mc);
+}
+
+/* QPSC - QoS PTP Shaper Configuration Register
+ * --------------------------------------------
+ * The QPSC allows advanced configuration of the shapers when QEEC.ptps=1.
+ * Supported only on Spectrum-1.
+ */
+#define MLXSW_REG_QPSC_ID 0x401B
+#define MLXSW_REG_QPSC_LEN 0x28
+
+MLXSW_REG_DEFINE(qpsc, MLXSW_REG_QPSC_ID, MLXSW_REG_QPSC_LEN);
+
+enum mlxsw_reg_qpsc_port_speed {
+ MLXSW_REG_QPSC_PORT_SPEED_100M,
+ MLXSW_REG_QPSC_PORT_SPEED_1G,
+ MLXSW_REG_QPSC_PORT_SPEED_10G,
+ MLXSW_REG_QPSC_PORT_SPEED_25G,
+};
+
+/* reg_qpsc_port_speed
+ * Port speed.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpsc, port_speed, 0x00, 0, 4);
+
+/* reg_qpsc_shaper_time_exp
+ * The base-time-interval for updating the shapers tokens (for all hierarchies).
+ * shaper_update_rate = 2 ^ shaper_time_exp * (1 + shaper_time_mantissa) * 32nSec
+ * shaper_rate = 64bit * shaper_inc / shaper_update_rate
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_time_exp, 0x04, 16, 4);
+
+/* reg_qpsc_shaper_time_mantissa
+ * The base-time-interval for updating the shapers tokens (for all hierarchies).
+ * shaper_update_rate = 2 ^ shaper_time_exp * (1 + shaper_time_mantissa) * 32nSec
+ * shaper_rate = 64bit * shaper_inc / shaper_update_rate
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_time_mantissa, 0x04, 0, 5);
+
+/* reg_qpsc_shaper_inc
+ * Number of tokens added to shaper on each update.
+ * Units of 8B.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_inc, 0x08, 0, 5);
+
+/* reg_qpsc_shaper_bs
+ * Max shaper Burst size.
+ * Burst size is 2 ^ max_shaper_bs * 512 [bits]
+ * Range is: 5..25 (from 2KB..2GB)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_bs, 0x0C, 0, 6);
+
+/* reg_qpsc_ptsc_we
+ * Write enable to port_to_shaper_credits.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, qpsc, ptsc_we, 0x10, 31, 1);
+
+/* reg_qpsc_port_to_shaper_credits
+ * For split ports: range 1..57
+ * For non-split ports: range 1..112
+ * Written only when ptsc_we is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, port_to_shaper_credits, 0x10, 0, 8);
+
+/* reg_qpsc_ing_timestamp_inc
+ * Ingress timestamp increment.
+ * 2's complement.
+ * The timestamp of MTPPTR at ingress will be incremented by this value. Global
+ * value for all ports.
+ * Same units as used by MTPPTR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, ing_timestamp_inc, 0x20, 0, 32);
+
+/* reg_qpsc_egr_timestamp_inc
+ * Egress timestamp increment.
+ * 2's complement.
+ * The timestamp of MTPPTR at egress will be incremented by this value. Global
+ * value for all ports.
+ * Same units as used by MTPPTR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, egr_timestamp_inc, 0x24, 0, 32);
+
+static inline void
+mlxsw_reg_qpsc_pack(char *payload, enum mlxsw_reg_qpsc_port_speed port_speed,
+ u8 shaper_time_exp, u8 shaper_time_mantissa, u8 shaper_inc,
+ u8 shaper_bs, u8 port_to_shaper_credits,
+ int ing_timestamp_inc, int egr_timestamp_inc)
+{
+ MLXSW_REG_ZERO(qpsc, payload);
+ mlxsw_reg_qpsc_port_speed_set(payload, port_speed);
+ mlxsw_reg_qpsc_shaper_time_exp_set(payload, shaper_time_exp);
+ mlxsw_reg_qpsc_shaper_time_mantissa_set(payload, shaper_time_mantissa);
+ mlxsw_reg_qpsc_shaper_inc_set(payload, shaper_inc);
+ mlxsw_reg_qpsc_shaper_bs_set(payload, shaper_bs);
+ mlxsw_reg_qpsc_ptsc_we_set(payload, true);
+ mlxsw_reg_qpsc_port_to_shaper_credits_set(payload, port_to_shaper_credits);
+ mlxsw_reg_qpsc_ing_timestamp_inc_set(payload, ing_timestamp_inc);
+ mlxsw_reg_qpsc_egr_timestamp_inc_set(payload, egr_timestamp_inc);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+MLXSW_REG_DEFINE(pmlp, MLXSW_REG_PMLP_ID, MLXSW_REG_PMLP_LEN);
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmlp, 0x00, 16, 0x00, 12);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * 8 - Lanes 0-7 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
+
+/* reg_pmlp_slot_index
+ * Module number.
+ * Slot_index
+ * Slot_index = 0 represent the onboard (motherboard).
+ * In case of non-modular system only slot_index = 0 is available.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(pmlp, payload);
+ mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN);
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmtu, 0x00, 16, 0x00, 12);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u16 local_port,
+ u16 new_mtu)
+{
+ MLXSW_REG_ZERO(pmtu, payload);
+ mlxsw_reg_pmtu_local_port_set(payload, local_port);
+ mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+ mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+ mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN);
+
+/* an_disable_admin
+ * Auto negotiation disable administrative configuration
+ * 0 - Device doesn't support AN disable.
+ * 1 - Device supports AN disable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1);
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, ptys, 0x00, 16, 0x00, 12);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0)
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+enum {
+ MLXSW_REG_PTYS_AN_STATUS_NA,
+ MLXSW_REG_PTYS_AN_STATUS_OK,
+ MLXSW_REG_PTYS_AN_STATUS_FAIL,
+};
+
+/* reg_ptys_an_status
+ * Autonegotiation status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M BIT(0)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII BIT(1)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R BIT(3)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G BIT(4)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G BIT(5)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR BIT(6)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 BIT(7)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
+
+/* reg_ptys_ext_eth_proto_cap
+ * Extended Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2 BIT(18)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_ext_eth_proto_admin
+ * Extended speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, ext_eth_proto_admin, 0x14, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_ext_eth_proto_oper
+ * The extended current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ext_eth_proto_oper, 0x20, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+enum mlxsw_reg_ptys_connector_type {
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER,
+};
+
+/* reg_ptys_connector_type
+ * Connector type indication.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4);
+
+static inline void mlxsw_reg_ptys_eth_pack(char *payload, u16 local_port,
+ u32 proto_admin, bool autoneg)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+ mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
+}
+
+static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u16 local_port,
+ u32 proto_admin, bool autoneg)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_ext_eth_proto_admin_set(payload, proto_admin);
+ mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
+}
+
+static inline void mlxsw_reg_ptys_eth_unpack(char *payload,
+ u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap =
+ mlxsw_reg_ptys_eth_proto_cap_get(payload);
+ if (p_eth_proto_admin)
+ *p_eth_proto_admin =
+ mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper =
+ mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
+ u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap =
+ mlxsw_reg_ptys_ext_eth_proto_cap_get(payload);
+ if (p_eth_proto_admin)
+ *p_eth_proto_admin =
+ mlxsw_reg_ptys_ext_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper =
+ mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+MLXSW_REG_DEFINE(ppad, MLXSW_REG_PPAD_ID, MLXSW_REG_PPAD_LEN);
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32_LP(reg, ppad, 0x00, 16, 0x00, 24);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+ u16 local_port)
+{
+ MLXSW_REG_ZERO(ppad, payload);
+ mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+ mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+MLXSW_REG_DEFINE(paos, MLXSW_REG_PAOS_ID, MLXSW_REG_PAOS_LEN);
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, paos, 0x00, 16, 0x00, 12);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u16 local_port,
+ enum mlxsw_port_admin_status status)
+{
+ MLXSW_REG_ZERO(paos, payload);
+ mlxsw_reg_paos_swid_set(payload, 0);
+ mlxsw_reg_paos_local_port_set(payload, local_port);
+ mlxsw_reg_paos_admin_status_set(payload, status);
+ mlxsw_reg_paos_oper_status_set(payload, 0);
+ mlxsw_reg_paos_ase_set(payload, 1);
+ mlxsw_reg_paos_ee_set(payload, 1);
+ mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PFCC - Ports Flow Control Configuration Register
+ * ------------------------------------------------
+ * Configures and retrieves the per port flow control configuration.
+ */
+#define MLXSW_REG_PFCC_ID 0x5007
+#define MLXSW_REG_PFCC_LEN 0x20
+
+MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN);
+
+/* reg_pfcc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pfcc, 0x00, 16, 0x00, 12);
+
+/* reg_pfcc_pnat
+ * Port number access type. Determines the way local_port is interpreted:
+ * 0 - Local port number.
+ * 1 - IB / label port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
+
+/* reg_pfcc_shl_cap
+ * Send to higher layers capabilities:
+ * 0 - No capability of sending Pause and PFC frames to higher layers.
+ * 1 - Device has capability of sending Pause and PFC frames to higher
+ * layers.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
+
+/* reg_pfcc_shl_opr
+ * Send to higher layers operation:
+ * 0 - Pause and PFC frames are handled by the port (default).
+ * 1 - Pause and PFC frames are handled by the port and also sent to
+ * higher layers. Only valid if shl_cap = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
+
+/* reg_pfcc_ppan
+ * Pause policy auto negotiation.
+ * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
+ * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
+ * based on the auto-negotiation resolution.
+ * Access: RW
+ *
+ * Note: The auto-negotiation advertisement is set according to pptx and
+ * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
+ */
+MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
+
+/* reg_pfcc_prio_mask_tx
+ * Bit per priority indicating if Tx flow control policy should be
+ * updated based on bit pfctx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
+
+/* reg_pfcc_prio_mask_rx
+ * Bit per priority indicating if Rx flow control policy should be
+ * updated based on bit pfcrx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
+
+/* reg_pfcc_pptx
+ * Admin Pause policy on Tx.
+ * 0 - Never generate Pause frames (default).
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
+
+/* reg_pfcc_aptx
+ * Active (operational) Pause policy on Tx.
+ * 0 - Never generate Pause frames.
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
+
+/* reg_pfcc_pfctx
+ * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
+ * 0 - Never generate priority Pause frames on the specified priority
+ * (default).
+ * 1 - Generate priority Pause frames according to Rx buffer threshold on
+ * the specified priority.
+ * Access: RW
+ *
+ * Note: pfctx and pptx must be mutually exclusive.
+ */
+MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
+
+/* reg_pfcc_pprx
+ * Admin Pause policy on Rx.
+ * 0 - Ignore received Pause frames (default).
+ * 1 - Respect received Pause frames.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
+
+/* reg_pfcc_aprx
+ * Active (operational) Pause policy on Rx.
+ * 0 - Ignore received Pause frames.
+ * 1 - Respect received Pause frames.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
+
+/* reg_pfcc_pfcrx
+ * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
+ * 0 - Ignore incoming priority Pause frames on the specified priority
+ * (default).
+ * 1 - Respect incoming priority Pause frames on the specified priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
+
+#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
+{
+ mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
+ mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
+}
+
+static inline void mlxsw_reg_pfcc_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(pfcc, payload);
+ mlxsw_reg_pfcc_local_port_set(payload, local_port);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+#define MLXSW_REG_PPCNT_COUNTERS_OFFSET 0x08
+
+MLXSW_REG_DEFINE(ppcnt, MLXSW_REG_PPCNT_ID, MLXSW_REG_PPCNT_LEN);
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, ppcnt, 0x00, 16, 0x00, 12);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+enum mlxsw_reg_ppcnt_grp {
+ MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1,
+ MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
+ MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3,
+ MLXSW_REG_PPCNT_EXT_CNT = 0x5,
+ MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
+ MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
+ MLXSW_REG_PPCNT_TC_CNT = 0x11,
+ MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13,
+};
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x6: Ethernet Discard Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * 0x13: Per Traffic Class Congestion Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_lp_gl
+ * Local port global variable.
+ * 0: local_port 255 = all ports of the device.
+ * 1: local_port indicates local port number for all ports.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, lp_gl, 0x04, 30, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* Ethernet IEEE 802.3 Counter Group */
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* Ethernet RFC 2863 Counter Group */
+
+/* reg_ppcnt_if_in_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_in_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_if_out_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_if_out_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* Ethernet RFC 2819 Counter Group */
+
+/* reg_ppcnt_ether_stats_undersize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_ether_stats_oversize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_ether_stats_fragments
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts64octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts65to127octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts128to255octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts256to511octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts512to1023octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1024to1518octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1519to2047octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts2048to4095octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts4096to8191octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts8192to10239octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+
+/* Ethernet RFC 3635 Counter Group */
+
+/* reg_ppcnt_dot3stats_fcs_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_dot3stats_symbol_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_dot3control_in_unknown_opcodes
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_dot3in_pause_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* Ethernet Extended Counter Group Counters */
+
+/* reg_ppcnt_ecn_marked
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* Ethernet Discard Counter Group Counters */
+
+/* reg_ppcnt_ingress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ingress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_ingress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_ingress_tag_frame_type
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_egress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_loopback_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, loopback_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_egress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_egress_hoq
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_hoq,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_egress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_ingress_tx_link_down
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_egress_stp_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_stp_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_egress_sll
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_sll,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* Ethernet Per Priority Group Counters */
+
+/* reg_ppcnt_rx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_rx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_tx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_tx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64);
+
+/* reg_ppcnt_rx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_rx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_tx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_tx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_rx_pause_transition
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* Ethernet Per Traffic Class Counters */
+
+/* reg_ppcnt_tc_transmit_queue
+ * Contains the transmit queue depth in cells of traffic class
+ * selected by prio_tc and the port selected by local_port.
+ * The field cannot be cleared.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_tc_no_buffer_discard_uc
+ * The number of unicast packets dropped due to lack of shared
+ * buffer resources.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* Ethernet Per Traffic Class Congestion Group Counters */
+
+/* reg_ppcnt_wred_discard
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, wred_discard,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ecn_marked_tc
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_ppcnt_grp grp,
+ u8 prio_tc)
+{
+ MLXSW_REG_ZERO(ppcnt, payload);
+ mlxsw_reg_ppcnt_swid_set(payload, 0);
+ mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+ mlxsw_reg_ppcnt_pnat_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, grp);
+ mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_lp_gl_set(payload, 1);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
+}
+
+/* PPTB - Port Prio To Buffer Register
+ * -----------------------------------
+ * Configures the switch priority to buffer table.
+ */
+#define MLXSW_REG_PPTB_ID 0x500B
+#define MLXSW_REG_PPTB_LEN 0x10
+
+MLXSW_REG_DEFINE(pptb, MLXSW_REG_PPTB_ID, MLXSW_REG_PPTB_LEN);
+
+enum {
+ MLXSW_REG_PPTB_MM_UM,
+ MLXSW_REG_PPTB_MM_UNICAST,
+ MLXSW_REG_PPTB_MM_MULTICAST,
+};
+
+/* reg_pptb_mm
+ * Mapping mode.
+ * 0 - Map both unicast and multicast packets to the same buffer.
+ * 1 - Map only unicast packets.
+ * 2 - Map only multicast packets.
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports the first option.
+ */
+MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
+
+/* reg_pptb_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pptb, 0x00, 16, 0x00, 12);
+
+/* reg_pptb_um
+ * Enables the update of the untagged_buf field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
+
+/* reg_pptb_pm
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
+
+/* reg_pptb_prio_to_buff
+ * Mapping of switch priority <i> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
+
+/* reg_pptb_pm_msb
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
+
+/* reg_pptb_untagged_buff
+ * Mapping of untagged frames to one of the allocated receive port buffers.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
+ * Spectrum, as it maps untagged packets based on the default switch priority.
+ */
+MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
+#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pptb_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(pptb, payload);
+ mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
+ mlxsw_reg_pptb_local_port_set(payload, local_port);
+ mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
+}
+
+/* PBMC - Port Buffer Management Control Register
+ * ----------------------------------------------
+ * The PBMC register configures and retrieves the port packet buffer
+ * allocation for different Prios, and the Pause threshold management.
+ */
+#define MLXSW_REG_PBMC_ID 0x500C
+#define MLXSW_REG_PBMC_LEN 0x6C
+
+MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN);
+
+/* reg_pbmc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pbmc, 0x00, 16, 0x00, 12);
+
+/* reg_pbmc_xoff_timer_value
+ * When device generates a pause frame, it uses this value as the pause
+ * timer (time for the peer port to pause in quota-512 bit time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
+
+/* reg_pbmc_xoff_refresh
+ * The time before a new pause frame should be sent to refresh the pause RW
+ * state. Using the same units as xoff_timer_value above (in quota-512 bit
+ * time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+
+#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
+
+/* reg_pbmc_buf_lossy
+ * The field indicates if the buffer is lossy.
+ * 0 - Lossless
+ * 1 - Lossy
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_epsb
+ * Eligible for Port Shared buffer.
+ * If epsb is set, packets assigned to buffer are allowed to insert the port
+ * shared buffer.
+ * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_size
+ * The part of the packet buffer array is allocated for the specific buffer.
+ * Units are represented in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_xoff_threshold
+ * Once the amount of data in the buffer goes above this value, device
+ * starts sending PFC frames for all priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
+ 0x08, 0x04, false);
+
+/* reg_pbmc_buf_xon_threshold
+ * When the amount of data in the buffer goes below this value, device
+ * stops sending PFC frames for the priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
+ 0x08, 0x04, false);
+
+static inline void mlxsw_reg_pbmc_pack(char *payload, u16 local_port,
+ u16 xoff_timer_value, u16 xoff_refresh)
+{
+ MLXSW_REG_ZERO(pbmc, payload);
+ mlxsw_reg_pbmc_local_port_set(payload, local_port);
+ mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
+ mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
+}
+
+static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
+ int buf_index,
+ u16 size)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+}
+
+static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
+ int buf_index, u16 size,
+ u16 threshold)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+ mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
+ mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500D
+#define MLXSW_REG_PSPA_LEN 0x8
+
+MLXSW_REG_DEFINE(pspa, MLXSW_REG_PSPA_ID, MLXSW_REG_PSPA_LEN);
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pspa, 0x00, 16, 0x00, 0);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u16 local_port)
+{
+ MLXSW_REG_ZERO(pspa, payload);
+ mlxsw_reg_pspa_swid_set(payload, swid);
+ mlxsw_reg_pspa_local_port_set(payload, local_port);
+ mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* PMAOS - Ports Module Administrative and Operational Status
+ * ----------------------------------------------------------
+ * This register configures and retrieves the per module status.
+ */
+#define MLXSW_REG_PMAOS_ID 0x5012
+#define MLXSW_REG_PMAOS_LEN 0x10
+
+MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
+
+/* reg_pmaos_rst
+ * Module reset toggle.
+ * Note: Setting reset while module is plugged-in will result in transition to
+ * "initializing" operational state.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1);
+
+/* reg_pmaos_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4);
+
+/* reg_pmaos_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmaos_admin_status {
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1,
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2,
+ /* If the module is active and then unplugged, or experienced an error
+ * event, the operational status should go to "disabled" and can only
+ * be enabled upon explicit enable command.
+ */
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3,
+};
+
+/* reg_pmaos_admin_status
+ * Module administrative state (the desired state of the module).
+ * Note: To disable a module, all ports associated with the port must be
+ * administatively down first.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4);
+
+/* reg_pmaos_ase
+ * Admin state update enable.
+ * If this bit is set, admin state will be updated based on admin_state field.
+ * Only relevant on Set() operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmaos, ase, 0x04, 31, 1);
+
+/* reg_pmaos_ee
+ * Event update enable.
+ * If this bit is set, event generation will be updated based on the e field.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmaos, ee, 0x04, 30, 1);
+
+enum mlxsw_reg_pmaos_e {
+ MLXSW_REG_PMAOS_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT,
+ MLXSW_REG_PMAOS_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmaos_e
+ * Event Generation on operational state change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmaos_module_set(payload, module);
+}
+
+/* PPLR - Port Physical Loopback Register
+ * --------------------------------------
+ * This register allows configuration of the port's loopback mode.
+ */
+#define MLXSW_REG_PPLR_ID 0x5018
+#define MLXSW_REG_PPLR_LEN 0x8
+
+MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN);
+
+/* reg_pplr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pplr, 0x00, 16, 0x00, 12);
+
+/* Phy local loopback. When set the port's egress traffic is looped back
+ * to the receiver and the port transmitter is disabled.
+ */
+#define MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL BIT(1)
+
+/* reg_pplr_lb_en
+ * Loopback enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8);
+
+static inline void mlxsw_reg_pplr_pack(char *payload, u16 local_port,
+ bool phy_local)
+{
+ MLXSW_REG_ZERO(pplr, payload);
+ mlxsw_reg_pplr_local_port_set(payload, local_port);
+ mlxsw_reg_pplr_lb_en_set(payload,
+ phy_local ?
+ MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
+}
+
+/* PMTDB - Port Module To local DataBase Register
+ * ----------------------------------------------
+ * The PMTDB register allows to query the possible module<->local port
+ * mapping than can be used in PMLP. It does not represent the actual/current
+ * mapping of the local to module. Actual mapping is only defined by PMLP.
+ */
+#define MLXSW_REG_PMTDB_ID 0x501A
+#define MLXSW_REG_PMTDB_LEN 0x40
+
+MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN);
+
+/* reg_pmtdb_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4);
+
+/* reg_pmtdb_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8);
+
+/* reg_pmtdb_ports_width
+ * Port's width
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4);
+
+/* reg_pmtdb_num_ports
+ * Number of ports in a single module (split/breakout)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4);
+
+enum mlxsw_reg_pmtdb_status {
+ MLXSW_REG_PMTDB_STATUS_SUCCESS,
+};
+
+/* reg_pmtdb_status
+ * Status
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
+
+/* reg_pmtdb_port_num
+ * The local_port value which can be assigned to the module.
+ * In case of more than one port, port<x> represent the /<x> port of
+ * the module.
+ * Access: RO
+ */
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 10, 0x02, 0x00, false);
+
+static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
+ u8 ports_width, u8 num_ports)
+{
+ MLXSW_REG_ZERO(pmtdb, payload);
+ mlxsw_reg_pmtdb_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtdb_module_set(payload, module);
+ mlxsw_reg_pmtdb_ports_width_set(payload, ports_width);
+ mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
+}
+
+/* PMECR - Ports Mapping Event Configuration Register
+ * --------------------------------------------------
+ * The PMECR register is used to enable/disable event triggering
+ * in case of local port mapping change.
+ */
+#define MLXSW_REG_PMECR_ID 0x501B
+#define MLXSW_REG_PMECR_LEN 0x20
+
+MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN);
+
+/* reg_pmecr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12);
+
+/* reg_pmecr_ee
+ * Event update enable. If this bit is set, event generation will be updated
+ * based on the e field. Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1);
+
+/* reg_pmecr_eswi
+ * Software ignore enable bit. If this bit is set, the value of swi is used.
+ * If this bit is clear, the value of swi is ignored.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1);
+
+/* reg_pmecr_swi
+ * Software ignore. If this bit is set, the device shouldn't generate events
+ * in case of PMLP SET operation but only upon self local port mapping change
+ * (if applicable according to e configuration). This is supplementary
+ * configuration on top of e value.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1);
+
+enum mlxsw_reg_pmecr_e {
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmecr_e
+ * Event generation on local port mapping change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_pmecr_e e)
+{
+ MLXSW_REG_ZERO(pmecr, payload);
+ mlxsw_reg_pmecr_local_port_set(payload, local_port);
+ mlxsw_reg_pmecr_e_set(payload, e);
+ mlxsw_reg_pmecr_ee_set(payload, true);
+ mlxsw_reg_pmecr_swi_set(payload, true);
+ mlxsw_reg_pmecr_eswi_set(payload, true);
+}
+
+/* PMPE - Port Module Plug/Unplug Event Register
+ * ---------------------------------------------
+ * This register reports any operational status change of a module.
+ * A change in the module’s state will generate an event only if the change
+ * happens after arming the event mechanism. Any changes to the module state
+ * while the event mechanism is not armed will not be reported. Software can
+ * query the PMPE register for module status.
+ */
+#define MLXSW_REG_PMPE_ID 0x5024
+#define MLXSW_REG_PMPE_LEN 0x10
+
+MLXSW_REG_DEFINE(pmpe, MLXSW_REG_PMPE_ID, MLXSW_REG_PMPE_LEN);
+
+/* reg_pmpe_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmpe, slot_index, 0x00, 24, 4);
+
+/* reg_pmpe_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmpe, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmpe_module_status {
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ENABLED = 1,
+ MLXSW_REG_PMPE_MODULE_STATUS_UNPLUGGED,
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ERROR,
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_DISABLED,
+};
+
+/* reg_pmpe_module_status
+ * Module status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmpe, module_status, 0x00, 0, 4);
+
+/* reg_pmpe_error_type
+ * Module error details.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmpe, error_type, 0x04, 8, 4);
+
+/* PDDR - Port Diagnostics Database Register
+ * -----------------------------------------
+ * The PDDR enables to read the Phy debug database
+ */
+#define MLXSW_REG_PDDR_ID 0x5031
+#define MLXSW_REG_PDDR_LEN 0x100
+
+MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN);
+
+/* reg_pddr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pddr, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_pddr_page_select {
+ MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1,
+};
+
+/* reg_pddr_page_select
+ * Page select index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pddr, page_select, 0x04, 0, 8);
+
+enum mlxsw_reg_pddr_trblsh_group_opcode {
+ /* Monitor opcodes */
+ MLXSW_REG_PDDR_TRBLSH_GROUP_OPCODE_MONITOR,
+};
+
+/* reg_pddr_group_opcode
+ * Group selector.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16);
+
+/* reg_pddr_status_opcode
+ * Group selector.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_pddr_pack(char *payload, u16 local_port,
+ u8 page_select)
+{
+ MLXSW_REG_ZERO(pddr, payload);
+ mlxsw_reg_pddr_local_port_set(payload, local_port);
+ mlxsw_reg_pddr_page_select_set(payload, page_select);
+}
+
+/* PMMP - Port Module Memory Map Properties Register
+ * -------------------------------------------------
+ * The PMMP register allows to override the module memory map advertisement.
+ * The register can only be set when the module is disabled by PMAOS register.
+ */
+#define MLXSW_REG_PMMP_ID 0x5044
+#define MLXSW_REG_PMMP_LEN 0x2C
+
+MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
+
+/* reg_pmmp_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+
+/* reg_pmmp_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4);
+
+/* reg_pmmp_sticky
+ * When set, will keep eeprom_override values after plug-out event.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1);
+
+/* reg_pmmp_eeprom_override_mask
+ * Write mask bit (negative polarity).
+ * 0 - Allow write
+ * 1 - Ignore write
+ * On write, indicates which of the bits from eeprom_override field are
+ * updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16);
+
+enum {
+ /* Set module to low power mode */
+ MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_pmmp_eeprom_override
+ * Override / ignore EEPROM advertisement properties bitmask
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
+
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmmp_module_set(payload, module);
+}
+
+/* PLLP - Port Local port to Label Port mapping Register
+ * -----------------------------------------------------
+ * The PLLP register returns the mapping from Local Port into Label Port.
+ */
+#define MLXSW_REG_PLLP_ID 0x504A
+#define MLXSW_REG_PLLP_LEN 0x10
+
+MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
+
+/* reg_pllp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pllp, 0x00, 16, 0x00, 12);
+
+/* reg_pllp_label_port
+ * Front panel label of the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8);
+
+/* reg_pllp_split_num
+ * Label split mapping for local_port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
+
+/* reg_pllp_slot_index
+ * Slot index (0: Main board).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
+
+static inline void mlxsw_reg_pllp_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(pllp, payload);
+ mlxsw_reg_pllp_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
+ u8 *split_num, u8 *slot_index)
+{
+ *label_port = mlxsw_reg_pllp_label_port_get(payload);
+ *split_num = mlxsw_reg_pllp_split_num_get(payload);
+ *slot_index = mlxsw_reg_pllp_slot_index_get(payload);
+}
+
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM register allows query or configuration of module types.
+ * The register can only be set when the module is disabled by PMAOS register
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, slot_index, 0x00, 24, 4);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_4_LANES = 0,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP = 1,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP = 2,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_SINGLE_LANE = 4,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_2_LANES = 8,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP4X = 10,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP2X = 11,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP1X = 12,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
+ MLXSW_REG_PMTM_MODULE_TYPE_OSFP = 15,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD = 16,
+ MLXSW_REG_PMTM_MODULE_TYPE_DSFP = 17,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP8X = 18,
+ MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR = 19,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 5);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x20
+
+MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN);
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+enum mlxsw_reg_htgt_trap_group {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
+
+ __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
+};
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+ MLXSW_REG_HTGT_POLICER_DISABLE,
+ MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+#define MLXSW_REG_HTGT_INVALID_POLICER 0xff
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+#define MLXSW_REG_HTGT_DEFAULT_PRIORITY 0
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+#define MLXSW_REG_HTGT_DEFAULT_TC 7
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+enum mlxsw_reg_htgt_local_path_rdq {
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL = 0x13,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX = 0x14,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD = 0x15,
+ MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SIB_EMAD = 0x15,
+};
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 group, u8 policer_id,
+ u8 priority, u8 tc)
+{
+ MLXSW_REG_ZERO(htgt, payload);
+
+ if (policer_id == MLXSW_REG_HTGT_INVALID_POLICER) {
+ mlxsw_reg_htgt_pide_set(payload,
+ MLXSW_REG_HTGT_POLICER_DISABLE);
+ } else {
+ mlxsw_reg_htgt_pide_set(payload,
+ MLXSW_REG_HTGT_POLICER_ENABLE);
+ mlxsw_reg_htgt_pid_set(payload, policer_id);
+ }
+
+ mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+ mlxsw_reg_htgt_trap_group_set(payload, group);
+ mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+ mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+ mlxsw_reg_htgt_priority_set(payload, priority);
+ mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, tc);
+ mlxsw_reg_htgt_local_path_rdq_set(payload, group);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+MLXSW_REG_DEFINE(hpkt, MLXSW_REG_HPKT_ID, MLXSW_REG_HPKT_LEN);
+
+enum {
+ MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+ MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+ MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT = 15,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * 6 - Trap to CPU (CPU receives sole copy) and count it as error.
+ * 15 - Restore the firmware's default action.
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 10);
+
+enum {
+ MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * Ignored by SwitchX-2.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id,
+ enum mlxsw_reg_htgt_trap_group trap_group,
+ bool is_ctrl)
+{
+ MLXSW_REG_ZERO(hpkt, payload);
+ mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+ mlxsw_reg_hpkt_action_set(payload, action);
+ mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+ mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+ mlxsw_reg_hpkt_ctrl_set(payload, is_ctrl ?
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER :
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER);
+}
+
+/* RGCR - Router General Configuration Register
+ * --------------------------------------------
+ * The register is used for setting up the router configuration.
+ */
+#define MLXSW_REG_RGCR_ID 0x8001
+#define MLXSW_REG_RGCR_LEN 0x28
+
+MLXSW_REG_DEFINE(rgcr, MLXSW_REG_RGCR_ID, MLXSW_REG_RGCR_LEN);
+
+/* reg_rgcr_ipv4_en
+ * IPv4 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
+
+/* reg_rgcr_ipv6_en
+ * IPv6 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
+
+/* reg_rgcr_max_router_interfaces
+ * Defines the maximum number of active router interfaces for all virtual
+ * routers.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
+
+/* reg_rgcr_usp
+ * Update switch priority and packet color.
+ * 0 - Preserve the value of Switch Priority and packet color.
+ * 1 - Recalculate the value of Switch Priority and packet color.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
+
+/* reg_rgcr_pcp_rw
+ * Indicates how to handle the pcp_rewrite_en value:
+ * 0 - Preserve the value of pcp_rewrite_en.
+ * 2 - Disable PCP rewrite.
+ * 3 - Enable PCP rewrite.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
+
+/* reg_rgcr_activity_dis
+ * Activity disable:
+ * 0 - Activity will be set when an entry is hit (default).
+ * 1 - Activity will not be set when an entry is hit.
+ *
+ * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
+ * (RALUE).
+ * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
+ * Entry (RAUHT).
+ * Bits 2:7 are reserved.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
+ */
+MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
+
+static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en,
+ bool ipv6_en)
+{
+ MLXSW_REG_ZERO(rgcr, payload);
+ mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
+ mlxsw_reg_rgcr_ipv6_en_set(payload, ipv6_en);
+}
+
+/* RITR - Router Interface Table Register
+ * --------------------------------------
+ * The register is used to configure the router interface table.
+ */
+#define MLXSW_REG_RITR_ID 0x8002
+#define MLXSW_REG_RITR_LEN 0x40
+
+MLXSW_REG_DEFINE(ritr, MLXSW_REG_RITR_ID, MLXSW_REG_RITR_LEN);
+
+/* reg_ritr_enable
+ * Enables routing on the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
+
+/* reg_ritr_ipv4
+ * IPv4 routing enable. Enables routing of IPv4 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
+
+/* reg_ritr_ipv6
+ * IPv6 routing enable. Enables routing of IPv6 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
+
+/* reg_ritr_ipv4_mc
+ * IPv4 multicast routing enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1);
+
+/* reg_ritr_ipv6_mc
+ * IPv6 multicast routing enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_mc, 0x00, 26, 1);
+
+enum mlxsw_reg_ritr_if_type {
+ /* VLAN interface. */
+ MLXSW_REG_RITR_VLAN_IF,
+ /* FID interface. */
+ MLXSW_REG_RITR_FID_IF,
+ /* Sub-port interface. */
+ MLXSW_REG_RITR_SP_IF,
+ /* Loopback Interface. */
+ MLXSW_REG_RITR_LOOPBACK_IF,
+};
+
+/* reg_ritr_type
+ * Router interface type as per enum mlxsw_reg_ritr_if_type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
+
+enum {
+ MLXSW_REG_RITR_RIF_CREATE,
+ MLXSW_REG_RITR_RIF_DEL,
+};
+
+/* reg_ritr_op
+ * Opcode:
+ * 0 - Create or edit RIF.
+ * 1 - Delete RIF.
+ * Reserved for SwitchX-2. For Spectrum, editing of interface properties
+ * is not supported. An interface must be deleted and re-created in order
+ * to update properties.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
+
+/* reg_ritr_rif
+ * Router interface index. A pointer to the Router Interface Table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
+
+/* reg_ritr_ipv4_fe
+ * IPv4 Forwarding Enable.
+ * Enables routing of IPv4 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
+
+/* reg_ritr_ipv6_fe
+ * IPv6 Forwarding Enable.
+ * Enables routing of IPv6 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+
+/* reg_ritr_ipv4_mc_fe
+ * IPv4 Multicast Forwarding Enable.
+ * When disabled, forwarding is blocked but local traffic (traps and IP to me)
+ * will be enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1);
+
+/* reg_ritr_ipv6_mc_fe
+ * IPv6 Multicast Forwarding Enable.
+ * When disabled, forwarding is blocked but local traffic (traps and IP to me)
+ * will be enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_mc_fe, 0x04, 26, 1);
+
+/* reg_ritr_lb_en
+ * Loop-back filter enable for unicast packets.
+ * If the flag is set then loop-back filter for unicast packets is
+ * implemented on the RIF. Multicast packets are always subject to
+ * loop-back filtering.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
+
+/* reg_ritr_virtual_router
+ * Virtual router ID associated with the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
+
+/* reg_ritr_mtu
+ * Router interface MTU.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
+
+/* reg_ritr_if_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
+
+/* reg_ritr_if_mac_profile_id
+ * MAC msb profile ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_mac_profile_id, 0x10, 16, 4);
+
+/* reg_ritr_if_mac
+ * Router interface MAC address.
+ * In Spectrum, all MAC addresses must have the same 38 MSBits.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+
+/* reg_ritr_if_vrrp_id_ipv6
+ * VRRP ID for IPv6
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8);
+
+/* reg_ritr_if_vrrp_id_ipv4
+ * VRRP ID for IPv4
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
+
+/* VLAN Interface */
+
+/* reg_ritr_vlan_if_vlan_id
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_vlan_id, 0x08, 0, 12);
+
+/* reg_ritr_vlan_if_efid
+ * Egress FID.
+ * Used to connect the RIF to a bridge.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-1.
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_efid, 0x0C, 0, 16);
+
+/* FID Interface */
+
+/* reg_ritr_fid_if_fid
+ * Filtering ID. Used to connect a bridge to the router.
+ * When legacy bridge model is used, only FIDs from the vFID range are
+ * supported. When unified bridge model is used, this is the egress FID for
+ * router to bridge.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
+
+/* Sub-port Interface */
+
+/* reg_ritr_sp_if_lag
+ * LAG indication. When this bit is set the system_port field holds the
+ * LAG identifier.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
+
+/* reg_ritr_sp_system_port
+ * Port unique indentifier. When lag bit is set, this field holds the
+ * lag_id in bits 0:9.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+
+/* reg_ritr_sp_if_efid
+ * Egress filtering ID.
+ * Used to connect the eRIF to a bridge if eRIF-ACL has modified the DMAC or
+ * the VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_efid, 0x0C, 0, 16);
+
+/* reg_ritr_sp_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
+
+/* Loopback Interface */
+
+enum mlxsw_reg_ritr_loopback_protocol {
+ /* IPinIP IPv4 underlay Unicast */
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4,
+ /* IPinIP IPv6 underlay Unicast */
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6,
+ /* IPinIP generic - used for Spectrum-2 underlay RIF */
+ MLXSW_REG_RITR_LOOPBACK_GENERIC,
+};
+
+/* reg_ritr_loopback_protocol
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_protocol, 0x08, 28, 4);
+
+enum mlxsw_reg_ritr_loopback_ipip_type {
+ /* Tunnel is IPinIP. */
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_IP,
+ /* Tunnel is GRE, no key. */
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP,
+ /* Tunnel is GRE, with a key. */
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP,
+};
+
+/* reg_ritr_loopback_ipip_type
+ * Encapsulation type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_type, 0x10, 24, 4);
+
+enum mlxsw_reg_ritr_loopback_ipip_options {
+ /* The key is defined by gre_key. */
+ MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
+};
+
+/* reg_ritr_loopback_ipip_options
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_options, 0x10, 20, 4);
+
+/* reg_ritr_loopback_ipip_uvr
+ * Underlay Virtual Router ID.
+ * Range is 0..cap_max_virtual_routers-1.
+ * Reserved for Spectrum-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_uvr, 0x10, 0, 16);
+
+/* reg_ritr_loopback_ipip_underlay_rif
+ * Underlay ingress router interface.
+ * Reserved for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_underlay_rif, 0x14, 0, 16);
+
+/* reg_ritr_loopback_ipip_usip*
+ * Encapsulation Underlay source IP.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ritr, loopback_ipip_usip6, 0x18, 16);
+MLXSW_ITEM32(reg, ritr, loopback_ipip_usip4, 0x24, 0, 32);
+
+/* reg_ritr_loopback_ipip_gre_key
+ * GRE Key.
+ * Reserved when ipip_type is not IP_IN_GRE_KEY_IN_IP.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_gre_key, 0x28, 0, 32);
+
+/* Shared between ingress/egress */
+enum mlxsw_reg_ritr_counter_set_type {
+ /* No Count. */
+ MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT = 0x0,
+ /* Basic. Used for router interfaces, counting the following:
+ * - Error and Discard counters.
+ * - Unicast, Multicast and Broadcast counters. Sharing the
+ * same set of counters for the different type of traffic
+ * (IPv4, IPv6 and mpls).
+ */
+ MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC = 0x9,
+};
+
+/* reg_ritr_ingress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_index, 0x38, 0, 24);
+
+/* reg_ritr_ingress_counter_set_type
+ * Igress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_set_type, 0x38, 24, 8);
+
+/* reg_ritr_egress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_index, 0x3C, 0, 24);
+
+/* reg_ritr_egress_counter_set_type
+ * Egress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_set_type, 0x3C, 24, 8);
+
+static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
+ bool enable, bool egress)
+{
+ enum mlxsw_reg_ritr_counter_set_type set_type;
+
+ if (enable)
+ set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
+ else
+ set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
+
+ if (egress) {
+ mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
+ mlxsw_reg_ritr_egress_counter_index_set(payload, index);
+ } else {
+ mlxsw_reg_ritr_ingress_counter_set_type_set(payload, set_type);
+ mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+ }
+}
+
+static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
+{
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+}
+
+static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
+ u16 system_port, u16 efid, u16 vid)
+{
+ mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
+ mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
+ mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 rif, u16 vr_id, u16 mtu)
+{
+ bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
+
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_enable_set(payload, enable);
+ mlxsw_reg_ritr_ipv4_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_set(payload, 1);
+ mlxsw_reg_ritr_ipv4_mc_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_mc_set(payload, 1);
+ mlxsw_reg_ritr_type_set(payload, type);
+ mlxsw_reg_ritr_op_set(payload, op);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+ mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_mc_fe_set(payload, 1);
+ mlxsw_reg_ritr_lb_en_set(payload, 1);
+ mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
+ mlxsw_reg_ritr_mtu_set(payload, mtu);
+}
+
+static inline void mlxsw_reg_ritr_mac_pack(char *payload, const char *mac)
+{
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+}
+
+static inline void
+mlxsw_reg_ritr_vlan_if_pack(char *payload, bool enable, u16 rif, u16 vr_id,
+ u16 mtu, const char *mac, u8 mac_profile_id,
+ u16 vlan_id, u16 efid)
+{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_VLAN_IF;
+
+ mlxsw_reg_ritr_pack(payload, enable, type, rif, vr_id, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(payload, mac_profile_id);
+ mlxsw_reg_ritr_vlan_if_vlan_id_set(payload, vlan_id);
+ mlxsw_reg_ritr_vlan_if_efid_set(payload, efid);
+}
+
+static inline void
+mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif, u32 gre_key)
+{
+ mlxsw_reg_ritr_loopback_ipip_type_set(payload, ipip_type);
+ mlxsw_reg_ritr_loopback_ipip_options_set(payload, options);
+ mlxsw_reg_ritr_loopback_ipip_uvr_set(payload, uvr_id);
+ mlxsw_reg_ritr_loopback_ipip_underlay_rif_set(payload, underlay_rif);
+ mlxsw_reg_ritr_loopback_ipip_gre_key_set(payload, gre_key);
+}
+
+static inline void
+mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif, u32 usip, u32 gre_key)
+{
+ mlxsw_reg_ritr_loopback_protocol_set(payload,
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4);
+ mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
+ uvr_id, underlay_rif, gre_key);
+ mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
+}
+
+static inline void
+mlxsw_reg_ritr_loopback_ipip6_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif,
+ const struct in6_addr *usip, u32 gre_key)
+{
+ enum mlxsw_reg_ritr_loopback_protocol protocol =
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6;
+
+ mlxsw_reg_ritr_loopback_protocol_set(payload, protocol);
+ mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
+ uvr_id, underlay_rif, gre_key);
+ mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload,
+ (const char *)usip);
+}
+
+/* RTAR - Router TCAM Allocation Register
+ * --------------------------------------
+ * This register is used for allocation of regions in the TCAM table.
+ */
+#define MLXSW_REG_RTAR_ID 0x8004
+#define MLXSW_REG_RTAR_LEN 0x20
+
+MLXSW_REG_DEFINE(rtar, MLXSW_REG_RTAR_ID, MLXSW_REG_RTAR_LEN);
+
+enum mlxsw_reg_rtar_op {
+ MLXSW_REG_RTAR_OP_ALLOCATE,
+ MLXSW_REG_RTAR_OP_RESIZE,
+ MLXSW_REG_RTAR_OP_DEALLOCATE,
+};
+
+/* reg_rtar_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, op, 0x00, 28, 4);
+
+enum mlxsw_reg_rtar_key_type {
+ MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST = 1,
+ MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST = 3
+};
+
+/* reg_rtar_key_type
+ * TCAM key type for the region.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, key_type, 0x00, 0, 8);
+
+/* reg_rtar_region_size
+ * TCAM region size. When allocating/resizing this is the requested
+ * size, the response is the actual size.
+ * Note: Actual size may be larger than requested.
+ * Reserved for op = Deallocate
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, region_size, 0x04, 0, 16);
+
+static inline void mlxsw_reg_rtar_pack(char *payload,
+ enum mlxsw_reg_rtar_op op,
+ enum mlxsw_reg_rtar_key_type key_type,
+ u16 region_size)
+{
+ MLXSW_REG_ZERO(rtar, payload);
+ mlxsw_reg_rtar_op_set(payload, op);
+ mlxsw_reg_rtar_key_type_set(payload, key_type);
+ mlxsw_reg_rtar_region_size_set(payload, region_size);
+}
+
+/* RATR - Router Adjacency Table Register
+ * --------------------------------------
+ * The RATR register is used to configure the Router Adjacency (next-hop)
+ * Table.
+ */
+#define MLXSW_REG_RATR_ID 0x8008
+#define MLXSW_REG_RATR_LEN 0x2C
+
+MLXSW_REG_DEFINE(ratr, MLXSW_REG_RATR_ID, MLXSW_REG_RATR_LEN);
+
+enum mlxsw_reg_ratr_op {
+ /* Read */
+ MLXSW_REG_RATR_OP_QUERY_READ = 0,
+ /* Read and clear activity */
+ MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
+ /* Write Adjacency entry */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
+ /* Write Adjacency entry only if the activity is cleared.
+ * The write may not succeed if the activity is set. There is not
+ * direct feedback if the write has succeeded or not, however
+ * the get will reveal the actual entry (SW can compare the get
+ * response to the set command).
+ */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
+};
+
+/* reg_ratr_op
+ * Note that Write operation may also be used for updating
+ * counter_set_type and counter_index. In this case all other
+ * fields must not be updated.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
+
+/* reg_ratr_v
+ * Valid bit. Indicates if the adjacency entry is valid.
+ * Note: the device may need some time before reusing an invalidated
+ * entry. During this time the entry can not be reused. It is
+ * recommended to use another entry before reusing an invalidated
+ * entry (e.g. software can put it at the end of the list for
+ * reusing). Trying to access an invalidated entry not yet cleared
+ * by the device results with failure indicating "Try Again" status.
+ * When valid is '0' then egress_router_interface,trap_action,
+ * adjacency_parameters and counters are reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
+
+/* reg_ratr_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry. To clear the a bit, use "clear activity".
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
+
+enum mlxsw_reg_ratr_type {
+ /* Ethernet */
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ /* IPoIB Unicast without GRH.
+ * Reserved for Spectrum.
+ */
+ MLXSW_REG_RATR_TYPE_IPOIB_UC,
+ /* IPoIB Unicast with GRH. Supported only in table 0 (Ethernet unicast
+ * adjacency).
+ * Reserved for Spectrum.
+ */
+ MLXSW_REG_RATR_TYPE_IPOIB_UC_W_GRH,
+ /* IPoIB Multicast.
+ * Reserved for Spectrum.
+ */
+ MLXSW_REG_RATR_TYPE_IPOIB_MC,
+ /* MPLS.
+ * Reserved for SwitchX/-2.
+ */
+ MLXSW_REG_RATR_TYPE_MPLS,
+ /* IPinIP Encap.
+ * Reserved for SwitchX/-2.
+ */
+ MLXSW_REG_RATR_TYPE_IPIP,
+};
+
+/* reg_ratr_type
+ * Adjacency entry type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, type, 0x04, 28, 4);
+
+/* reg_ratr_adjacency_index_low
+ * Bits 15:0 of index into the adjacency table.
+ * For SwitchX and SwitchX-2, the adjacency table is linear and
+ * used for adjacency entries only.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
+
+/* reg_ratr_egress_router_interface
+ * Range is 0 .. cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
+
+enum mlxsw_reg_ratr_trap_action {
+ MLXSW_REG_RATR_TRAP_ACTION_NOP,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_ratr_trap_action
+ * see mlxsw_reg_ratr_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
+
+/* reg_ratr_adjacency_index_high
+ * Bits 23:16 of the adjacency_index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
+
+enum mlxsw_reg_ratr_trap_id {
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0,
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1,
+};
+
+/* reg_ratr_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
+
+/* reg_ratr_eth_destination_mac
+ * MAC address of the destination next-hop.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
+
+enum mlxsw_reg_ratr_ipip_type {
+ /* IPv4, address set by mlxsw_reg_ratr_ipip_ipv4_udip. */
+ MLXSW_REG_RATR_IPIP_TYPE_IPV4,
+ /* IPv6, address set by mlxsw_reg_ratr_ipip_ipv6_ptr. */
+ MLXSW_REG_RATR_IPIP_TYPE_IPV6,
+};
+
+/* reg_ratr_ipip_type
+ * Underlay destination ip type.
+ * Note: the type field must match the protocol of the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, ipip_type, 0x10, 16, 4);
+
+/* reg_ratr_ipip_ipv4_udip
+ * Underlay ipv4 dip.
+ * Reserved when ipip_type is IPv6.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, ipip_ipv4_udip, 0x18, 0, 32);
+
+/* reg_ratr_ipip_ipv6_ptr
+ * Pointer to IPv6 underlay destination ip address.
+ * For Spectrum: Pointer to KVD linear space.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, ipip_ipv6_ptr, 0x1C, 0, 24);
+
+enum mlxsw_reg_flow_counter_set_type {
+ /* No count */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_ratr_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, counter_set_type, 0x28, 24, 8);
+
+/* reg_ratr_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, counter_index, 0x28, 0, 24);
+
+static inline void
+mlxsw_reg_ratr_pack(char *payload,
+ enum mlxsw_reg_ratr_op op, bool valid,
+ enum mlxsw_reg_ratr_type type,
+ u32 adjacency_index, u16 egress_rif)
+{
+ MLXSW_REG_ZERO(ratr, payload);
+ mlxsw_reg_ratr_op_set(payload, op);
+ mlxsw_reg_ratr_v_set(payload, valid);
+ mlxsw_reg_ratr_type_set(payload, type);
+ mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
+ mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
+}
+
+static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
+ const char *dest_mac)
+{
+ mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
+}
+
+static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
+{
+ mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV4);
+ mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
+}
+
+static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr)
+{
+ mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6);
+ mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr);
+}
+
+static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
+ bool counter_enable)
+{
+ enum mlxsw_reg_flow_counter_set_type set_type;
+
+ if (counter_enable)
+ set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES;
+ else
+ set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT;
+
+ mlxsw_reg_ratr_counter_index_set(payload, counter_index);
+ mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
+}
+
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+ MLXSW_REG_RDPM_LEN - \
+ MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+ u8 prio)
+{
+ mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+ mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
+/* RICNT - Router Interface Counter Register
+ * -----------------------------------------
+ * The RICNT register retrieves per port performance counters
+ */
+#define MLXSW_REG_RICNT_ID 0x800B
+#define MLXSW_REG_RICNT_LEN 0x100
+
+MLXSW_REG_DEFINE(ricnt, MLXSW_REG_RICNT_ID, MLXSW_REG_RICNT_LEN);
+
+/* reg_ricnt_counter_index
+ * Counter index
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_index, 0x04, 0, 24);
+
+enum mlxsw_reg_ricnt_counter_set_type {
+ /* No Count. */
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Basic. Used for router interfaces, counting the following:
+ * - Error and Discard counters.
+ * - Unicast, Multicast and Broadcast counters. Sharing the
+ * same set of counters for the different type of traffic
+ * (IPv4, IPv6 and mpls).
+ */
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC = 0x09,
+};
+
+/* reg_ricnt_counter_set_type
+ * Counter Set Type for router interface counter
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_set_type, 0x04, 24, 8);
+
+enum mlxsw_reg_ricnt_opcode {
+ /* Nop. Supported only for read access*/
+ MLXSW_REG_RICNT_OPCODE_NOP = 0x00,
+ /* Clear. Setting the clr bit will reset the counter value for
+ * all counters of the specified Router Interface.
+ */
+ MLXSW_REG_RICNT_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_ricnt_opcode
+ * Opcode
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, op, 0x00, 28, 4);
+
+/* reg_ricnt_good_unicast_packets
+ * good unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_packets, 0x08, 0, 64);
+
+/* reg_ricnt_good_multicast_packets
+ * good multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_packets, 0x10, 0, 64);
+
+/* reg_ricnt_good_broadcast_packets
+ * good broadcast packets
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_packets, 0x18, 0, 64);
+
+/* reg_ricnt_good_unicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good unicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_bytes, 0x20, 0, 64);
+
+/* reg_ricnt_good_multicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good multicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_bytes, 0x28, 0, 64);
+
+/* reg_ritr_good_broadcast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good broadcast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_bytes, 0x30, 0, 64);
+
+/* reg_ricnt_error_packets
+ * A count of errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_packets, 0x38, 0, 64);
+
+/* reg_ricnt_discrad_packets
+ * A count of non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_packets, 0x40, 0, 64);
+
+/* reg_ricnt_error_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for errored frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_bytes, 0x48, 0, 64);
+
+/* reg_ricnt_discard_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_bytes, 0x50, 0, 64);
+
+static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index,
+ enum mlxsw_reg_ricnt_opcode op)
+{
+ MLXSW_REG_ZERO(ricnt, payload);
+ mlxsw_reg_ricnt_op_set(payload, op);
+ mlxsw_reg_ricnt_counter_index_set(payload, index);
+ mlxsw_reg_ricnt_counter_set_type_set(payload,
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC);
+}
+
+/* RRCR - Router Rules Copy Register Layout
+ * ----------------------------------------
+ * This register is used for moving and copying route entry rules.
+ */
+#define MLXSW_REG_RRCR_ID 0x800F
+#define MLXSW_REG_RRCR_LEN 0x24
+
+MLXSW_REG_DEFINE(rrcr, MLXSW_REG_RRCR_ID, MLXSW_REG_RRCR_LEN);
+
+enum mlxsw_reg_rrcr_op {
+ /* Move rules */
+ MLXSW_REG_RRCR_OP_MOVE,
+ /* Copy rules */
+ MLXSW_REG_RRCR_OP_COPY,
+};
+
+/* reg_rrcr_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rrcr, op, 0x00, 28, 4);
+
+/* reg_rrcr_offset
+ * Offset within the region from which to copy/move.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, offset, 0x00, 0, 16);
+
+/* reg_rrcr_size
+ * The number of rules to copy/move.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rrcr, size, 0x04, 0, 16);
+
+/* reg_rrcr_table_id
+ * Identifier of the table on which to perform the operation. Encoding is the
+ * same as in RTAR.key_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, table_id, 0x10, 0, 4);
+
+/* reg_rrcr_dest_offset
+ * Offset within the region to which to copy/move
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, dest_offset, 0x20, 0, 16);
+
+static inline void mlxsw_reg_rrcr_pack(char *payload, enum mlxsw_reg_rrcr_op op,
+ u16 offset, u16 size,
+ enum mlxsw_reg_rtar_key_type table_id,
+ u16 dest_offset)
+{
+ MLXSW_REG_ZERO(rrcr, payload);
+ mlxsw_reg_rrcr_op_set(payload, op);
+ mlxsw_reg_rrcr_offset_set(payload, offset);
+ mlxsw_reg_rrcr_size_set(payload, size);
+ mlxsw_reg_rrcr_table_id_set(payload, table_id);
+ mlxsw_reg_rrcr_dest_offset_set(payload, dest_offset);
+}
+
+/* RALTA - Router Algorithmic LPM Tree Allocation Register
+ * -------------------------------------------------------
+ * RALTA is used to allocate the LPM trees of the SHSPM method.
+ */
+#define MLXSW_REG_RALTA_ID 0x8010
+#define MLXSW_REG_RALTA_LEN 0x04
+
+MLXSW_REG_DEFINE(ralta, MLXSW_REG_RALTA_ID, MLXSW_REG_RALTA_LEN);
+
+/* reg_ralta_op
+ * opcode (valid for Write, must be 0 on Read)
+ * 0 - allocate a tree
+ * 1 - deallocate a tree
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
+
+enum mlxsw_reg_ralxx_protocol {
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_REG_RALXX_PROTOCOL_IPV6,
+};
+
+/* reg_ralta_protocol
+ * Protocol.
+ * Deallocation opcode: Reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
+
+/* reg_ralta_tree_id
+ * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
+ * the tree identifier (managed by software).
+ * Note that tree_id 0 is allocated for a default-route tree.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralta, payload);
+ mlxsw_reg_ralta_op_set(payload, !alloc);
+ mlxsw_reg_ralta_protocol_set(payload, protocol);
+ mlxsw_reg_ralta_tree_id_set(payload, tree_id);
+}
+
+/* RALST - Router Algorithmic LPM Structure Tree Register
+ * ------------------------------------------------------
+ * RALST is used to set and query the structure of an LPM tree.
+ * The structure of the tree must be sorted as a sorted binary tree, while
+ * each node is a bin that is tagged as the length of the prefixes the lookup
+ * will refer to. Therefore, bin X refers to a set of entries with prefixes
+ * of X bits to match with the destination address. The bin 0 indicates
+ * the default action, when there is no match of any prefix.
+ */
+#define MLXSW_REG_RALST_ID 0x8011
+#define MLXSW_REG_RALST_LEN 0x104
+
+MLXSW_REG_DEFINE(ralst, MLXSW_REG_RALST_ID, MLXSW_REG_RALST_LEN);
+
+/* reg_ralst_root_bin
+ * The bin number of the root bin.
+ * 0<root_bin=<(length of IP address)
+ * For a default-route tree configure 0xff
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
+
+/* reg_ralst_tree_id
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
+
+#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
+#define MLXSW_REG_RALST_BIN_OFFSET 0x04
+#define MLXSW_REG_RALST_BIN_COUNT 128
+
+/* reg_ralst_left_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
+
+/* reg_ralst_right_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
+ false);
+
+static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralst, payload);
+
+ /* Initialize all bins to have no left or right child */
+ memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
+ MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
+
+ mlxsw_reg_ralst_root_bin_set(payload, root_bin);
+ mlxsw_reg_ralst_tree_id_set(payload, tree_id);
+}
+
+static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ int bin_index = bin_number - 1;
+
+ mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
+ mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
+ right_child_bin);
+}
+
+/* RALTB - Router Algorithmic LPM Tree Binding Register
+ * ----------------------------------------------------
+ * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
+ */
+#define MLXSW_REG_RALTB_ID 0x8012
+#define MLXSW_REG_RALTB_LEN 0x04
+
+MLXSW_REG_DEFINE(raltb, MLXSW_REG_RALTB_ID, MLXSW_REG_RALTB_LEN);
+
+/* reg_raltb_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
+
+/* reg_raltb_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
+
+/* reg_raltb_tree_id
+ * Tree to be used for the {virtual_router, protocol}
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(raltb, payload);
+ mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raltb_protocol_set(payload, protocol);
+ mlxsw_reg_raltb_tree_id_set(payload, tree_id);
+}
+
+/* RALUE - Router Algorithmic LPM Unicast Entry Register
+ * -----------------------------------------------------
+ * RALUE is used to configure and query LPM entries that serve
+ * the Unicast protocols.
+ */
+#define MLXSW_REG_RALUE_ID 0x8013
+#define MLXSW_REG_RALUE_LEN 0x38
+
+MLXSW_REG_DEFINE(ralue, MLXSW_REG_RALUE_ID, MLXSW_REG_RALUE_LEN);
+
+/* reg_ralue_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
+
+enum mlxsw_reg_ralue_op {
+ /* Read operation. If entry doesn't exist, the operation fails. */
+ MLXSW_REG_RALUE_OP_QUERY_READ = 0,
+ /* Clear on read operation. Used to read entry and
+ * clear Activity bit.
+ */
+ MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
+ /* Write operation. Used to write a new entry to the table. All RW
+ * fields are written for new entry. Activity bit is set
+ * for new entries.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
+ /* Update operation. Used to update an existing route entry and
+ * only update the RW fields that are detailed in the field
+ * op_u_mask. If entry doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
+ /* Clear activity. The Activity bit (the field a) is cleared
+ * for the entry.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
+ /* Delete operation. Used to delete an existing entry. If entry
+ * doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
+};
+
+/* reg_ralue_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
+
+/* reg_ralue_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry, only if the entry is a route. To clear the a bit, use
+ * "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
+
+/* reg_ralue_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
+
+#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
+#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
+#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
+
+/* reg_ralue_op_u_mask
+ * opcode update mask.
+ * On read operation, this field is reserved.
+ * This field is valid for update opcode, otherwise - reserved.
+ * This field is a bitmask of the fields that should be updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
+
+/* reg_ralue_prefix_len
+ * Number of bits in the prefix of the LPM route.
+ * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
+ * two entries in the physical HW table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
+
+/* reg_ralue_dip*
+ * The prefix of the route or of the marker that the object of the LPM
+ * is compared with. The most significant bits of the dip are the prefix.
+ * The least significant bits must be '0' if the prefix_len is smaller
+ * than 128 for IPv6 or smaller than 32 for IPv4.
+ * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
+MLXSW_ITEM_BUF(reg, ralue, dip6, 0x0C, 16);
+
+enum mlxsw_reg_ralue_entry_type {
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
+};
+
+/* reg_ralue_entry_type
+ * Entry type.
+ * Note - for Marker entries, the action_type and action fields are reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
+
+/* reg_ralue_bmp_len
+ * The best match prefix length in the case that there is no match for
+ * longer prefixes.
+ * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
+ * Note for any update operation with entry_type modification this
+ * field must be set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
+
+enum mlxsw_reg_ralue_action_type {
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
+};
+
+/* reg_ralue_action_type
+ * Action Type
+ * Indicates how the IP address is connected.
+ * It can be connected to a local subnet through local_erif or can be
+ * on a remote subnet connected through a next-hop router,
+ * or transmitted to the CPU.
+ * Reserved when entry_type = MARKER_ENTRY
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
+
+enum mlxsw_reg_ralue_trap_action {
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP,
+ MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
+};
+
+/* reg_ralue_trap_action
+ * Trap action.
+ * For IP2ME action, only NOP and MIRROR are possible.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
+
+/* reg_ralue_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
+
+/* reg_ralue_adjacency_index
+ * Points to the first entry of the group-based ECMP.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
+
+/* reg_ralue_ecmp_size
+ * Amount of sequential entries starting
+ * from the adjacency_index (the number of ECMPs).
+ * The valid range is 1-64, 512, 1024, 2048 and 4096.
+ * Reserved when trap_action is TRAP or DISCARD_ERROR.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
+
+/* reg_ralue_local_erif
+ * Egress Router Interface.
+ * Only relevant in case of LOCAL action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
+
+/* reg_ralue_ip2me_v
+ * Valid bit for the tunnel_ptr field.
+ * If valid = 0 then trap to CPU as IP2ME trap ID.
+ * If valid = 1 and the packet format allows NVE or IPinIP tunnel
+ * decapsulation then tunnel decapsulation is done.
+ * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
+ * decapsulation then trap as IP2ME trap ID.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, ip2me_v, 0x24, 31, 1);
+
+/* reg_ralue_ip2me_tunnel_ptr
+ * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
+ * For Spectrum, pointer to KVD Linear.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, ip2me_tunnel_ptr, 0x24, 0, 24);
+
+static inline void mlxsw_reg_ralue_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len)
+{
+ MLXSW_REG_ZERO(ralue, payload);
+ mlxsw_reg_ralue_protocol_set(payload, protocol);
+ mlxsw_reg_ralue_op_set(payload, op);
+ mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
+ mlxsw_reg_ralue_entry_type_set(payload,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
+ mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
+}
+
+static inline void mlxsw_reg_ralue_pack4(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len,
+ u32 dip)
+{
+ mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
+}
+
+static inline void mlxsw_reg_ralue_pack6(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len,
+ const void *dip)
+{
+ mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+}
+
+static inline void
+mlxsw_reg_ralue_act_remote_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
+}
+
+static inline void
+mlxsw_reg_ralue_act_local_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_local_erif_set(payload, local_erif);
+}
+
+static inline void
+mlxsw_reg_ralue_act_ip2me_pack(char *payload)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
+}
+
+static inline void
+mlxsw_reg_ralue_act_ip2me_tun_pack(char *payload, u32 tunnel_ptr)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
+ mlxsw_reg_ralue_ip2me_v_set(payload, 1);
+ mlxsw_reg_ralue_ip2me_tunnel_ptr_set(payload, tunnel_ptr);
+}
+
+/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
+ * ----------------------------------------------------------
+ * The RAUHT register is used to configure and query the Unicast Host table in
+ * devices that implement the Algorithmic LPM.
+ */
+#define MLXSW_REG_RAUHT_ID 0x8014
+#define MLXSW_REG_RAUHT_LEN 0x74
+
+MLXSW_REG_DEFINE(rauht, MLXSW_REG_RAUHT_ID, MLXSW_REG_RAUHT_LEN);
+
+enum mlxsw_reg_rauht_type {
+ MLXSW_REG_RAUHT_TYPE_IPV4,
+ MLXSW_REG_RAUHT_TYPE_IPV6,
+};
+
+/* reg_rauht_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
+
+enum mlxsw_reg_rauht_op {
+ MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
+ /* Read operation */
+ MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
+ /* Clear on read operation. Used to read entry and clear
+ * activity bit.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
+ /* Add. Used to write a new entry to the table. All R/W fields are
+ * relevant for new entry. Activity bit is set for new entries.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
+ /* Update action. Used to update an existing route entry and
+ * only update the following fields:
+ * trap_action, trap_id, mac, counter_set_type, counter_index
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
+ /* Clear activity. A bit is cleared for the entry. */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
+ /* Delete entry */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
+ /* Delete all host entries on a RIF. In this command, dip
+ * field is reserved.
+ */
+};
+
+/* reg_rauht_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
+
+/* reg_rauht_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry.
+ * To clear the a bit, use "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
+
+/* reg_rauht_rif
+ * Router Interface
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
+
+/* reg_rauht_dip*
+ * Destination address.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
+MLXSW_ITEM_BUF(reg, rauht, dip6, 0x10, 16);
+
+enum mlxsw_reg_rauht_trap_action {
+ MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_rauht_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
+
+enum mlxsw_reg_rauht_trap_id {
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
+};
+
+/* reg_rauht_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR,
+ * trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
+
+/* reg_rauht_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
+
+/* reg_rauht_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
+
+/* reg_rauht_mac
+ * MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
+
+static inline void mlxsw_reg_rauht_pack(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac)
+{
+ MLXSW_REG_ZERO(rauht, payload);
+ mlxsw_reg_rauht_op_set(payload, op);
+ mlxsw_reg_rauht_rif_set(payload, rif);
+ mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
+}
+
+static inline void mlxsw_reg_rauht_pack4(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac, u32 dip)
+{
+ mlxsw_reg_rauht_pack(payload, op, rif, mac);
+ mlxsw_reg_rauht_dip4_set(payload, dip);
+}
+
+static inline void mlxsw_reg_rauht_pack6(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac, const char *dip)
+{
+ mlxsw_reg_rauht_pack(payload, op, rif, mac);
+ mlxsw_reg_rauht_type_set(payload, MLXSW_REG_RAUHT_TYPE_IPV6);
+ mlxsw_reg_rauht_dip6_memcpy_to(payload, dip);
+}
+
+static inline void mlxsw_reg_rauht_pack_counter(char *payload,
+ u64 counter_index)
+{
+ mlxsw_reg_rauht_counter_index_set(payload, counter_index);
+ mlxsw_reg_rauht_counter_set_type_set(payload,
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
+}
+
+/* RALEU - Router Algorithmic LPM ECMP Update Register
+ * ---------------------------------------------------
+ * The register enables updating the ECMP section in the action for multiple
+ * LPM Unicast entries in a single operation. The update is executed to
+ * all entries of a {virtual router, protocol} tuple using the same ECMP group.
+ */
+#define MLXSW_REG_RALEU_ID 0x8015
+#define MLXSW_REG_RALEU_LEN 0x28
+
+MLXSW_REG_DEFINE(raleu, MLXSW_REG_RALEU_ID, MLXSW_REG_RALEU_LEN);
+
+/* reg_raleu_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
+
+/* reg_raleu_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
+
+/* reg_raleu_adjacency_index
+ * Adjacency Index used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
+
+/* reg_raleu_ecmp_size
+ * ECMP Size used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
+
+/* reg_raleu_new_adjacency_index
+ * New Adjacency Index.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
+
+/* reg_raleu_new_ecmp_size
+ * New ECMP Size.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
+
+static inline void mlxsw_reg_raleu_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u16 virtual_router,
+ u32 adjacency_index, u16 ecmp_size,
+ u32 new_adjacency_index,
+ u16 new_ecmp_size)
+{
+ MLXSW_REG_ZERO(raleu, payload);
+ mlxsw_reg_raleu_protocol_set(payload, protocol);
+ mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
+ mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
+}
+
+/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
+ * ----------------------------------------------------------------
+ * The RAUHTD register allows dumping entries from the Router Unicast Host
+ * Table. For a given session an entry is dumped no more than one time. The
+ * first RAUHTD access after reset is a new session. A session ends when the
+ * num_rec response is smaller than num_rec request or for IPv4 when the
+ * num_entries is smaller than 4. The clear activity affect the current session
+ * or the last session if a new session has not started.
+ */
+#define MLXSW_REG_RAUHTD_ID 0x8018
+#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
+#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
+ MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
+#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
+
+MLXSW_REG_DEFINE(rauhtd, MLXSW_REG_RAUHTD_ID, MLXSW_REG_RAUHTD_LEN);
+
+#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
+#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
+
+/* reg_rauhtd_filter_fields
+ * if a bit is '0' then the relevant field is ignored and dump is done
+ * regardless of the field value
+ * Bit0 - filter by activity: entry_a
+ * Bit3 - filter by entry rip: entry_rif
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
+
+enum mlxsw_reg_rauhtd_op {
+ MLXSW_REG_RAUHTD_OP_DUMP,
+ MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
+};
+
+/* reg_rauhtd_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
+
+/* reg_rauhtd_num_rec
+ * At request: number of records requested
+ * At response: number of records dumped
+ * For IPv4, each record has 4 entries at request and up to 4 entries
+ * at response
+ * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
+
+/* reg_rauhtd_entry_a
+ * Dump only if activity has value of entry_a
+ * Reserved if filter_fields bit0 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
+
+enum mlxsw_reg_rauhtd_type {
+ MLXSW_REG_RAUHTD_TYPE_IPV4,
+ MLXSW_REG_RAUHTD_TYPE_IPV6,
+};
+
+/* reg_rauhtd_type
+ * Dump only if record type is:
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
+
+/* reg_rauhtd_entry_rif
+ * Dump only if RIF has value of entry_rif
+ * Reserved if filter_fields bit3 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_rauhtd_pack(char *payload,
+ enum mlxsw_reg_rauhtd_type type)
+{
+ MLXSW_REG_ZERO(rauhtd, payload);
+ mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
+ mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
+ mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
+ mlxsw_reg_rauhtd_entry_a_set(payload, 1);
+ mlxsw_reg_rauhtd_type_set(payload, type);
+}
+
+/* reg_rauhtd_ipv4_rec_num_entries
+ * Number of valid entries in this record:
+ * 0 - 1 valid entry
+ * 1 - 2 valid entries
+ * 2 - 3 valid entries
+ * 3 - 4 valid entries
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
+ MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+/* reg_rauhtd_rec_type
+ * Record type.
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
+
+/* reg_rauhtd_ipv4_ent_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
+ MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_rif
+ * Router interface.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_dip
+ * Destination IPv4 address.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
+
+#define MLXSW_REG_RAUHTD_IPV6_ENT_LEN 0x20
+
+/* reg_rauhtd_ipv6_ent_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
+ MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv6_ent_rif
+ * Router interface.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv6_ent_dip
+ * Destination IPv6 address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, rauhtd, ipv6_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN,
+ 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x10);
+
+static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
+ int ent_index, u16 *p_rif,
+ u32 *p_dip)
+{
+ *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
+ *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
+}
+
+static inline void mlxsw_reg_rauhtd_ent_ipv6_unpack(char *payload,
+ int rec_index, u16 *p_rif,
+ char *p_dip)
+{
+ *p_rif = mlxsw_reg_rauhtd_ipv6_ent_rif_get(payload, rec_index);
+ mlxsw_reg_rauhtd_ipv6_ent_dip_memcpy_from(payload, rec_index, p_dip);
+}
+
+/* RTDP - Routing Tunnel Decap Properties Register
+ * -----------------------------------------------
+ * The RTDP register is used for configuring the tunnel decap properties of NVE
+ * and IPinIP.
+ */
+#define MLXSW_REG_RTDP_ID 0x8020
+#define MLXSW_REG_RTDP_LEN 0x44
+
+MLXSW_REG_DEFINE(rtdp, MLXSW_REG_RTDP_ID, MLXSW_REG_RTDP_LEN);
+
+enum mlxsw_reg_rtdp_type {
+ MLXSW_REG_RTDP_TYPE_NVE,
+ MLXSW_REG_RTDP_TYPE_IPIP,
+};
+
+/* reg_rtdp_type
+ * Type of the RTDP entry as per enum mlxsw_reg_rtdp_type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, type, 0x00, 28, 4);
+
+/* reg_rtdp_tunnel_index
+ * Index to the Decap entry.
+ * For Spectrum, Index to KVD Linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rtdp, tunnel_index, 0x00, 0, 24);
+
+/* reg_rtdp_egress_router_interface
+ * Underlay egress router interface.
+ * Valid range is from 0 to cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, egress_router_interface, 0x40, 0, 16);
+
+/* IPinIP */
+
+/* reg_rtdp_ipip_irif
+ * Ingress Router Interface for the overlay router
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_irif, 0x04, 16, 16);
+
+enum mlxsw_reg_rtdp_ipip_sip_check {
+ /* No sip checks. */
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_NO,
+ /* Filter packet if underlay is not IPv4 or if underlay SIP does not
+ * equal ipv4_usip.
+ */
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV4,
+ /* Filter packet if underlay is not IPv6 or if underlay SIP does not
+ * equal ipv6_usip.
+ */
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6 = 3,
+};
+
+/* reg_rtdp_ipip_sip_check
+ * SIP check to perform. If decapsulation failed due to these configurations
+ * then trap_id is IPIP_DECAP_ERROR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_sip_check, 0x04, 0, 3);
+
+/* If set, allow decapsulation of IPinIP (without GRE). */
+#define MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_IPIP BIT(0)
+/* If set, allow decapsulation of IPinGREinIP without a key. */
+#define MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE BIT(1)
+/* If set, allow decapsulation of IPinGREinIP with a key. */
+#define MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY BIT(2)
+
+/* reg_rtdp_ipip_type_check
+ * Flags as per MLXSW_REG_RTDP_IPIP_TYPE_CHECK_*. If decapsulation failed due to
+ * these configurations then trap_id is IPIP_DECAP_ERROR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_type_check, 0x08, 24, 3);
+
+/* reg_rtdp_ipip_gre_key_check
+ * Whether GRE key should be checked. When check is enabled:
+ * - A packet received as IPinIP (without GRE) will always pass.
+ * - A packet received as IPinGREinIP without a key will not pass the check.
+ * - A packet received as IPinGREinIP with a key will pass the check only if the
+ * key in the packet is equal to expected_gre_key.
+ * If decapsulation failed due to GRE key then trap_id is IPIP_DECAP_ERROR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_gre_key_check, 0x08, 23, 1);
+
+/* reg_rtdp_ipip_ipv4_usip
+ * Underlay IPv4 address for ipv4 source address check.
+ * Reserved when sip_check is not '1'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_ipv4_usip, 0x0C, 0, 32);
+
+/* reg_rtdp_ipip_ipv6_usip_ptr
+ * This field is valid when sip_check is "sipv6 check explicitly". This is a
+ * pointer to the IPv6 DIP which is configured by RIPS. For Spectrum, the index
+ * is to the KVD linear.
+ * Reserved when sip_check is not MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_ipv6_usip_ptr, 0x10, 0, 24);
+
+/* reg_rtdp_ipip_expected_gre_key
+ * GRE key for checking.
+ * Reserved when gre_key_check is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, ipip_expected_gre_key, 0x14, 0, 32);
+
+static inline void mlxsw_reg_rtdp_pack(char *payload,
+ enum mlxsw_reg_rtdp_type type,
+ u32 tunnel_index)
+{
+ MLXSW_REG_ZERO(rtdp, payload);
+ mlxsw_reg_rtdp_type_set(payload, type);
+ mlxsw_reg_rtdp_tunnel_index_set(payload, tunnel_index);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_irif_set(payload, irif);
+ mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check);
+ mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check);
+ mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check);
+ mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv4_usip, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv6_usip_ptr, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr);
+}
+
+/* RIPS - Router IP version Six Register
+ * -------------------------------------
+ * The RIPS register is used to store IPv6 addresses for use by the NVE and
+ * IPinIP
+ */
+#define MLXSW_REG_RIPS_ID 0x8021
+#define MLXSW_REG_RIPS_LEN 0x14
+
+MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN);
+
+/* reg_rips_index
+ * Index to IPv6 address.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24);
+
+/* reg_rips_ipv6
+ * IPv6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16);
+
+static inline void mlxsw_reg_rips_pack(char *payload, u32 index,
+ const struct in6_addr *ipv6)
+{
+ MLXSW_REG_ZERO(rips, payload);
+ mlxsw_reg_rips_index_set(payload, index);
+ mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6);
+}
+
+/* RATRAD - Router Adjacency Table Activity Dump Register
+ * ------------------------------------------------------
+ * The RATRAD register is used to dump and optionally clear activity bits of
+ * router adjacency table entries.
+ */
+#define MLXSW_REG_RATRAD_ID 0x8022
+#define MLXSW_REG_RATRAD_LEN 0x210
+
+MLXSW_REG_DEFINE(ratrad, MLXSW_REG_RATRAD_ID, MLXSW_REG_RATRAD_LEN);
+
+enum {
+ /* Read activity */
+ MLXSW_REG_RATRAD_OP_READ_ACTIVITY,
+ /* Read and clear activity */
+ MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY,
+};
+
+/* reg_ratrad_op
+ * Access: Operation
+ */
+MLXSW_ITEM32(reg, ratrad, op, 0x00, 30, 2);
+
+/* reg_ratrad_ecmp_size
+ * ecmp_size is the amount of sequential entries from adjacency_index. Valid
+ * ranges:
+ * Spectrum-1: 32-64, 512, 1024, 2048, 4096
+ * Spectrum-2/3: 32-128, 256, 512, 1024, 2048, 4096
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratrad, ecmp_size, 0x00, 0, 13);
+
+/* reg_ratrad_adjacency_index
+ * Index into the adjacency table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratrad, adjacency_index, 0x04, 0, 24);
+
+/* reg_ratrad_activity_vector
+ * Activity bit per adjacency index.
+ * Bits higher than ecmp_size are reserved.
+ * Access: RO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ratrad, activity_vector, 0x10, 0x200, 1);
+
+static inline void mlxsw_reg_ratrad_pack(char *payload, u32 adjacency_index,
+ u16 ecmp_size)
+{
+ MLXSW_REG_ZERO(ratrad, payload);
+ mlxsw_reg_ratrad_op_set(payload,
+ MLXSW_REG_RATRAD_OP_READ_CLEAR_ACTIVITY);
+ mlxsw_reg_ratrad_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_ratrad_adjacency_index_set(payload, adjacency_index);
+}
+
+/* RIGR-V2 - Router Interface Group Register Version 2
+ * ---------------------------------------------------
+ * The RIGR_V2 register is used to add, remove and query egress interface list
+ * of a multicast forwarding entry.
+ */
+#define MLXSW_REG_RIGR2_ID 0x8023
+#define MLXSW_REG_RIGR2_LEN 0xB0
+
+#define MLXSW_REG_RIGR2_MAX_ERIFS 32
+
+MLXSW_REG_DEFINE(rigr2, MLXSW_REG_RIGR2_ID, MLXSW_REG_RIGR2_LEN);
+
+/* reg_rigr2_rigr_index
+ * KVD Linear index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rigr2, rigr_index, 0x04, 0, 24);
+
+/* reg_rigr2_vnext
+ * Next RIGR Index is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, vnext, 0x08, 31, 1);
+
+/* reg_rigr2_next_rigr_index
+ * Next RIGR Index. The index is to the KVD linear.
+ * Reserved when vnxet = '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, next_rigr_index, 0x08, 0, 24);
+
+/* reg_rigr2_vrmid
+ * RMID Index is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, vrmid, 0x20, 31, 1);
+
+/* reg_rigr2_rmid_index
+ * RMID Index.
+ * Range 0 .. max_mid - 1
+ * Reserved when vrmid = '0'.
+ * The index is to the Port Group Table (PGT)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, rmid_index, 0x20, 0, 16);
+
+/* reg_rigr2_erif_entry_v
+ * Egress Router Interface is valid.
+ * Note that low-entries must be set if high-entries are set. For
+ * example: if erif_entry[2].v is set then erif_entry[1].v and
+ * erif_entry[0].v must be set.
+ * Index can be from 0 to cap_mc_erif_list_entries-1
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_v, 0x24, 31, 1, 4, 0, false);
+
+/* reg_rigr2_erif_entry_erif
+ * Egress Router Interface.
+ * Valid range is from 0 to cap_max_router_interfaces - 1
+ * Index can be from 0 to MLXSW_REG_RIGR2_MAX_ERIFS - 1
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_erif, 0x24, 0, 16, 4, 0, false);
+
+static inline void mlxsw_reg_rigr2_pack(char *payload, u32 rigr_index,
+ bool vnext, u32 next_rigr_index)
+{
+ MLXSW_REG_ZERO(rigr2, payload);
+ mlxsw_reg_rigr2_rigr_index_set(payload, rigr_index);
+ mlxsw_reg_rigr2_vnext_set(payload, vnext);
+ mlxsw_reg_rigr2_next_rigr_index_set(payload, next_rigr_index);
+ mlxsw_reg_rigr2_vrmid_set(payload, 0);
+ mlxsw_reg_rigr2_rmid_index_set(payload, 0);
+}
+
+static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index,
+ bool v, u16 erif)
+{
+ mlxsw_reg_rigr2_erif_entry_v_set(payload, index, v);
+ mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif);
+}
+
+/* RECR-V2 - Router ECMP Configuration Version 2 Register
+ * ------------------------------------------------------
+ */
+#define MLXSW_REG_RECR2_ID 0x8025
+#define MLXSW_REG_RECR2_LEN 0x38
+
+MLXSW_REG_DEFINE(recr2, MLXSW_REG_RECR2_ID, MLXSW_REG_RECR2_LEN);
+
+/* reg_recr2_pp
+ * Per-port configuration
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, recr2, pp, 0x00, 24, 1);
+
+/* reg_recr2_sh
+ * Symmetric hash
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, recr2, sh, 0x00, 8, 1);
+
+/* reg_recr2_seed
+ * Seed
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, recr2, seed, 0x08, 0, 32);
+
+enum {
+ /* Enable IPv4 fields if packet is not TCP and not UDP */
+ MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP = 3,
+ /* Enable IPv4 fields if packet is TCP or UDP */
+ MLXSW_REG_RECR2_IPV4_EN_TCP_UDP = 4,
+ /* Enable IPv6 fields if packet is not TCP and not UDP */
+ MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP = 5,
+ /* Enable IPv6 fields if packet is TCP or UDP */
+ MLXSW_REG_RECR2_IPV6_EN_TCP_UDP = 6,
+ /* Enable TCP/UDP header fields if packet is IPv4 */
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV4 = 7,
+ /* Enable TCP/UDP header fields if packet is IPv6 */
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6 = 8,
+
+ __MLXSW_REG_RECR2_HEADER_CNT,
+};
+
+/* reg_recr2_outer_header_enables
+ * Bit mask where each bit enables a specific layer to be included in
+ * the hash calculation.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_enables, 0x10, 0x04, 1);
+
+enum {
+ /* IPv4 Source IP */
+ MLXSW_REG_RECR2_IPV4_SIP0 = 9,
+ MLXSW_REG_RECR2_IPV4_SIP3 = 12,
+ /* IPv4 Destination IP */
+ MLXSW_REG_RECR2_IPV4_DIP0 = 13,
+ MLXSW_REG_RECR2_IPV4_DIP3 = 16,
+ /* IP Protocol */
+ MLXSW_REG_RECR2_IPV4_PROTOCOL = 17,
+ /* IPv6 Source IP */
+ MLXSW_REG_RECR2_IPV6_SIP0_7 = 21,
+ MLXSW_REG_RECR2_IPV6_SIP8 = 29,
+ MLXSW_REG_RECR2_IPV6_SIP15 = 36,
+ /* IPv6 Destination IP */
+ MLXSW_REG_RECR2_IPV6_DIP0_7 = 37,
+ MLXSW_REG_RECR2_IPV6_DIP8 = 45,
+ MLXSW_REG_RECR2_IPV6_DIP15 = 52,
+ /* IPv6 Next Header */
+ MLXSW_REG_RECR2_IPV6_NEXT_HEADER = 53,
+ /* IPv6 Flow Label */
+ MLXSW_REG_RECR2_IPV6_FLOW_LABEL = 57,
+ /* TCP/UDP Source Port */
+ MLXSW_REG_RECR2_TCP_UDP_SPORT = 74,
+ /* TCP/UDP Destination Port */
+ MLXSW_REG_RECR2_TCP_UDP_DPORT = 75,
+
+ __MLXSW_REG_RECR2_FIELD_CNT,
+};
+
+/* reg_recr2_outer_header_fields_enable
+ * Packet fields to enable for ECMP hash subject to outer_header_enable.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1);
+
+/* reg_recr2_inner_header_enables
+ * Bit mask where each bit enables a specific inner layer to be included in the
+ * hash calculation. Same values as reg_recr2_outer_header_enables.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_enables, 0x2C, 0x04, 1);
+
+enum {
+ /* Inner IPv4 Source IP */
+ MLXSW_REG_RECR2_INNER_IPV4_SIP0 = 3,
+ MLXSW_REG_RECR2_INNER_IPV4_SIP3 = 6,
+ /* Inner IPv4 Destination IP */
+ MLXSW_REG_RECR2_INNER_IPV4_DIP0 = 7,
+ MLXSW_REG_RECR2_INNER_IPV4_DIP3 = 10,
+ /* Inner IP Protocol */
+ MLXSW_REG_RECR2_INNER_IPV4_PROTOCOL = 11,
+ /* Inner IPv6 Source IP */
+ MLXSW_REG_RECR2_INNER_IPV6_SIP0_7 = 12,
+ MLXSW_REG_RECR2_INNER_IPV6_SIP8 = 20,
+ MLXSW_REG_RECR2_INNER_IPV6_SIP15 = 27,
+ /* Inner IPv6 Destination IP */
+ MLXSW_REG_RECR2_INNER_IPV6_DIP0_7 = 28,
+ MLXSW_REG_RECR2_INNER_IPV6_DIP8 = 36,
+ MLXSW_REG_RECR2_INNER_IPV6_DIP15 = 43,
+ /* Inner IPv6 Next Header */
+ MLXSW_REG_RECR2_INNER_IPV6_NEXT_HEADER = 44,
+ /* Inner IPv6 Flow Label */
+ MLXSW_REG_RECR2_INNER_IPV6_FLOW_LABEL = 45,
+ /* Inner TCP/UDP Source Port */
+ MLXSW_REG_RECR2_INNER_TCP_UDP_SPORT = 46,
+ /* Inner TCP/UDP Destination Port */
+ MLXSW_REG_RECR2_INNER_TCP_UDP_DPORT = 47,
+
+ __MLXSW_REG_RECR2_INNER_FIELD_CNT,
+};
+
+/* reg_recr2_inner_header_fields_enable
+ * Inner packet fields to enable for ECMP hash subject to inner_header_enables.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_fields_enable, 0x30, 0x08, 1);
+
+static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed)
+{
+ MLXSW_REG_ZERO(recr2, payload);
+ mlxsw_reg_recr2_pp_set(payload, false);
+ mlxsw_reg_recr2_sh_set(payload, true);
+ mlxsw_reg_recr2_seed_set(payload, seed);
+}
+
+/* RMFT-V2 - Router Multicast Forwarding Table Version 2 Register
+ * --------------------------------------------------------------
+ * The RMFT_V2 register is used to configure and query the multicast table.
+ */
+#define MLXSW_REG_RMFT2_ID 0x8027
+#define MLXSW_REG_RMFT2_LEN 0x174
+
+MLXSW_REG_DEFINE(rmft2, MLXSW_REG_RMFT2_ID, MLXSW_REG_RMFT2_LEN);
+
+/* reg_rmft2_v
+ * Valid
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, v, 0x00, 31, 1);
+
+enum mlxsw_reg_rmft2_type {
+ MLXSW_REG_RMFT2_TYPE_IPV4,
+ MLXSW_REG_RMFT2_TYPE_IPV6
+};
+
+/* reg_rmft2_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rmft2, type, 0x00, 28, 2);
+
+enum mlxsw_sp_reg_rmft2_op {
+ /* For Write:
+ * Write operation. Used to write a new entry to the table. All RW
+ * fields are relevant for new entry. Activity bit is set for new
+ * entries - Note write with v (Valid) 0 will delete the entry.
+ * For Query:
+ * Read operation
+ */
+ MLXSW_REG_RMFT2_OP_READ_WRITE,
+};
+
+/* reg_rmft2_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rmft2, op, 0x00, 20, 2);
+
+/* reg_rmft2_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the specific
+ * entry.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rmft2, a, 0x00, 16, 1);
+
+/* reg_rmft2_offset
+ * Offset within the multicast forwarding table to write to.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rmft2, offset, 0x00, 0, 16);
+
+/* reg_rmft2_virtual_router
+ * Virtual Router ID. Range from 0..cap_max_virtual_routers-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, virtual_router, 0x04, 0, 16);
+
+enum mlxsw_reg_rmft2_irif_mask {
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE,
+ MLXSW_REG_RMFT2_IRIF_MASK_COMPARE
+};
+
+/* reg_rmft2_irif_mask
+ * Ingress RIF mask.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1);
+
+/* reg_rmft2_irif
+ * Ingress RIF index.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16);
+
+/* reg_rmft2_dip{4,6}
+ * Destination IPv4/6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, dip6, 0x10, 16);
+MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32);
+
+/* reg_rmft2_dip{4,6}_mask
+ * A bit that is set directs the TCAM to compare the corresponding bit in key. A
+ * bit that is clear directs the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, dip6_mask, 0x20, 16);
+MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32);
+
+/* reg_rmft2_sip{4,6}
+ * Source IPv4/6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, sip6, 0x30, 16);
+MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32);
+
+/* reg_rmft2_sip{4,6}_mask
+ * A bit that is set directs the TCAM to compare the corresponding bit in key. A
+ * bit that is clear directs the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, sip6_mask, 0x40, 16);
+MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32);
+
+/* reg_rmft2_flexible_action_set
+ * ACL action set. The only supported action types in this field and in any
+ * action-set pointed from here are as follows:
+ * 00h: ACTION_NULL
+ * 01h: ACTION_MAC_TTL, only TTL configuration is supported.
+ * 03h: ACTION_TRAP
+ * 06h: ACTION_QOS
+ * 08h: ACTION_POLICING_MONITORING
+ * 10h: ACTION_ROUTER_MC
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80,
+ MLXSW_REG_FLEX_ACTION_SET_LEN);
+
+static inline void
+mlxsw_reg_rmft2_common_pack(char *payload, bool v, u16 offset,
+ u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ const char *flex_action_set)
+{
+ MLXSW_REG_ZERO(rmft2, payload);
+ mlxsw_reg_rmft2_v_set(payload, v);
+ mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE);
+ mlxsw_reg_rmft2_offset_set(payload, offset);
+ mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask);
+ mlxsw_reg_rmft2_irif_set(payload, irif);
+ if (flex_action_set)
+ mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void
+mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
+ const char *flexible_action_set)
+{
+ mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
+ irif_mask, irif, flexible_action_set);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
+ mlxsw_reg_rmft2_dip4_set(payload, dip4);
+ mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask);
+ mlxsw_reg_rmft2_sip4_set(payload, sip4);
+ mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask);
+}
+
+static inline void
+mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ struct in6_addr dip6, struct in6_addr dip6_mask,
+ struct in6_addr sip6, struct in6_addr sip6_mask,
+ const char *flexible_action_set)
+{
+ mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
+ irif_mask, irif, flexible_action_set);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV6);
+ mlxsw_reg_rmft2_dip6_memcpy_to(payload, (void *)&dip6);
+ mlxsw_reg_rmft2_dip6_mask_memcpy_to(payload, (void *)&dip6_mask);
+ mlxsw_reg_rmft2_sip6_memcpy_to(payload, (void *)&sip6);
+ mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
+}
+
+/* REIV - Router Egress Interface to VID Register
+ * ----------------------------------------------
+ * The REIV register maps {eRIF, egress_port} -> VID.
+ * This mapping is done at the egress, after the ACLs.
+ * This mapping always takes effect after router, regardless of cast
+ * (for unicast/multicast/port-base multicast), regardless of eRIF type and
+ * regardless of bridge decisions (e.g. SFD for unicast or SMPE).
+ * Reserved when the RIF is a loopback RIF.
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+#define MLXSW_REG_REIV_ID 0x8034
+#define MLXSW_REG_REIV_BASE_LEN 0x20 /* base length, without records */
+#define MLXSW_REG_REIV_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_REIV_REC_MAX_COUNT 256 /* firmware limitation */
+#define MLXSW_REG_REIV_LEN (MLXSW_REG_REIV_BASE_LEN + \
+ MLXSW_REG_REIV_REC_LEN * \
+ MLXSW_REG_REIV_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(reiv, MLXSW_REG_REIV_ID, MLXSW_REG_REIV_LEN);
+
+/* reg_reiv_port_page
+ * Port page - elport_record[0] is 256*port_page.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, reiv, port_page, 0x00, 0, 4);
+
+/* reg_reiv_erif
+ * Egress RIF.
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, reiv, erif, 0x04, 0, 16);
+
+/* reg_reiv_rec_update
+ * Update enable (when write):
+ * 0 - Do not update the entry.
+ * 1 - Update the entry.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_update, MLXSW_REG_REIV_BASE_LEN, 31, 1,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
+
+/* reg_reiv_rec_evid
+ * Egress VID.
+ * Range is 0..4095.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_evid, MLXSW_REG_REIV_BASE_LEN, 0, 12,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_reiv_pack(char *payload, u8 port_page, u16 erif)
+{
+ MLXSW_REG_ZERO(reiv, payload);
+ mlxsw_reg_reiv_port_page_set(payload, port_page);
+ mlxsw_reg_reiv_erif_set(payload, erif);
+}
+
+/* MFCR - Management Fan Control Register
+ * --------------------------------------
+ * This register controls the settings of the Fan Speed PWM mechanism.
+ */
+#define MLXSW_REG_MFCR_ID 0x9001
+#define MLXSW_REG_MFCR_LEN 0x08
+
+MLXSW_REG_DEFINE(mfcr, MLXSW_REG_MFCR_ID, MLXSW_REG_MFCR_LEN);
+
+enum mlxsw_reg_mfcr_pwm_frequency {
+ MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00,
+ MLXSW_REG_MFCR_PWM_FEQ_14_7HZ = 0x01,
+ MLXSW_REG_MFCR_PWM_FEQ_22_1HZ = 0x02,
+ MLXSW_REG_MFCR_PWM_FEQ_1_4KHZ = 0x40,
+ MLXSW_REG_MFCR_PWM_FEQ_5KHZ = 0x41,
+ MLXSW_REG_MFCR_PWM_FEQ_20KHZ = 0x42,
+ MLXSW_REG_MFCR_PWM_FEQ_22_5KHZ = 0x43,
+ MLXSW_REG_MFCR_PWM_FEQ_25KHZ = 0x44,
+};
+
+/* reg_mfcr_pwm_frequency
+ * Controls the frequency of the PWM signal.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 7);
+
+#define MLXSW_MFCR_TACHOS_MAX 10
+
+/* reg_mfcr_tacho_active
+ * Indicates which of the tachometer is active (bit per tachometer).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfcr, tacho_active, 0x04, 16, MLXSW_MFCR_TACHOS_MAX);
+
+#define MLXSW_MFCR_PWMS_MAX 5
+
+/* reg_mfcr_pwm_active
+ * Indicates which of the PWM control is active (bit per PWM).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfcr, pwm_active, 0x04, 0, MLXSW_MFCR_PWMS_MAX);
+
+static inline void
+mlxsw_reg_mfcr_pack(char *payload,
+ enum mlxsw_reg_mfcr_pwm_frequency pwm_frequency)
+{
+ MLXSW_REG_ZERO(mfcr, payload);
+ mlxsw_reg_mfcr_pwm_frequency_set(payload, pwm_frequency);
+}
+
+static inline void
+mlxsw_reg_mfcr_unpack(char *payload,
+ enum mlxsw_reg_mfcr_pwm_frequency *p_pwm_frequency,
+ u16 *p_tacho_active, u8 *p_pwm_active)
+{
+ *p_pwm_frequency = mlxsw_reg_mfcr_pwm_frequency_get(payload);
+ *p_tacho_active = mlxsw_reg_mfcr_tacho_active_get(payload);
+ *p_pwm_active = mlxsw_reg_mfcr_pwm_active_get(payload);
+}
+
+/* MFSC - Management Fan Speed Control Register
+ * --------------------------------------------
+ * This register controls the settings of the Fan Speed PWM mechanism.
+ */
+#define MLXSW_REG_MFSC_ID 0x9002
+#define MLXSW_REG_MFSC_LEN 0x08
+
+MLXSW_REG_DEFINE(mfsc, MLXSW_REG_MFSC_ID, MLXSW_REG_MFSC_LEN);
+
+/* reg_mfsc_pwm
+ * Fan pwm to control / monitor.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mfsc, pwm, 0x00, 24, 3);
+
+/* reg_mfsc_pwm_duty_cycle
+ * Controls the duty cycle of the PWM. Value range from 0..255 to
+ * represent duty cycle of 0%...100%.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfsc, pwm_duty_cycle, 0x04, 0, 8);
+
+static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm,
+ u8 pwm_duty_cycle)
+{
+ MLXSW_REG_ZERO(mfsc, payload);
+ mlxsw_reg_mfsc_pwm_set(payload, pwm);
+ mlxsw_reg_mfsc_pwm_duty_cycle_set(payload, pwm_duty_cycle);
+}
+
+/* MFSM - Management Fan Speed Measurement
+ * ---------------------------------------
+ * This register controls the settings of the Tacho measurements and
+ * enables reading the Tachometer measurements.
+ */
+#define MLXSW_REG_MFSM_ID 0x9003
+#define MLXSW_REG_MFSM_LEN 0x08
+
+MLXSW_REG_DEFINE(mfsm, MLXSW_REG_MFSM_ID, MLXSW_REG_MFSM_LEN);
+
+/* reg_mfsm_tacho
+ * Fan tachometer index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mfsm, tacho, 0x00, 24, 4);
+
+/* reg_mfsm_rpm
+ * Fan speed (round per minute).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfsm, rpm, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho)
+{
+ MLXSW_REG_ZERO(mfsm, payload);
+ mlxsw_reg_mfsm_tacho_set(payload, tacho);
+}
+
+/* MFSL - Management Fan Speed Limit Register
+ * ------------------------------------------
+ * The Fan Speed Limit register is used to configure the fan speed
+ * event / interrupt notification mechanism. Fan speed threshold are
+ * defined for both under-speed and over-speed.
+ */
+#define MLXSW_REG_MFSL_ID 0x9004
+#define MLXSW_REG_MFSL_LEN 0x0C
+
+MLXSW_REG_DEFINE(mfsl, MLXSW_REG_MFSL_ID, MLXSW_REG_MFSL_LEN);
+
+/* reg_mfsl_tacho
+ * Fan tachometer index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mfsl, tacho, 0x00, 24, 4);
+
+/* reg_mfsl_tach_min
+ * Tachometer minimum value (minimum RPM).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfsl, tach_min, 0x04, 0, 16);
+
+/* reg_mfsl_tach_max
+ * Tachometer maximum value (maximum RPM).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfsl, tach_max, 0x08, 0, 16);
+
+static inline void mlxsw_reg_mfsl_pack(char *payload, u8 tacho,
+ u16 tach_min, u16 tach_max)
+{
+ MLXSW_REG_ZERO(mfsl, payload);
+ mlxsw_reg_mfsl_tacho_set(payload, tacho);
+ mlxsw_reg_mfsl_tach_min_set(payload, tach_min);
+ mlxsw_reg_mfsl_tach_max_set(payload, tach_max);
+}
+
+static inline void mlxsw_reg_mfsl_unpack(char *payload, u8 tacho,
+ u16 *p_tach_min, u16 *p_tach_max)
+{
+ if (p_tach_min)
+ *p_tach_min = mlxsw_reg_mfsl_tach_min_get(payload);
+
+ if (p_tach_max)
+ *p_tach_max = mlxsw_reg_mfsl_tach_max_get(payload);
+}
+
+/* FORE - Fan Out of Range Event Register
+ * --------------------------------------
+ * This register reports the status of the controlled fans compared to the
+ * range defined by the MFSL register.
+ */
+#define MLXSW_REG_FORE_ID 0x9007
+#define MLXSW_REG_FORE_LEN 0x0C
+
+MLXSW_REG_DEFINE(fore, MLXSW_REG_FORE_ID, MLXSW_REG_FORE_LEN);
+
+/* fan_under_limit
+ * Fan speed is below the low limit defined in MFSL register. Each bit relates
+ * to a single tachometer and indicates the specific tachometer reading is
+ * below the threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, fore, fan_under_limit, 0x00, 16, 10);
+
+static inline void mlxsw_reg_fore_unpack(char *payload, u8 tacho,
+ bool *fault)
+{
+ u16 limit;
+
+ if (fault) {
+ limit = mlxsw_reg_fore_fan_under_limit_get(payload);
+ *fault = limit & BIT(tacho);
+ }
+}
+
+/* MTCAP - Management Temperature Capabilities
+ * -------------------------------------------
+ * This register exposes the capabilities of the device and
+ * system temperature sensing.
+ */
+#define MLXSW_REG_MTCAP_ID 0x9009
+#define MLXSW_REG_MTCAP_LEN 0x08
+
+MLXSW_REG_DEFINE(mtcap, MLXSW_REG_MTCAP_ID, MLXSW_REG_MTCAP_LEN);
+
+/* reg_mtcap_sensor_count
+ * Number of sensors supported by the device.
+ * This includes the QSFP module sensors (if exists in the QSFP module).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
+
+/* MTMP - Management Temperature
+ * -----------------------------
+ * This register controls the settings of the temperature measurements
+ * and enables reading the temperature measurements. Note that temperature
+ * is in 0.125 degrees Celsius.
+ */
+#define MLXSW_REG_MTMP_ID 0x900A
+#define MLXSW_REG_MTMP_LEN 0x20
+
+MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+
+/* reg_mtmp_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4);
+
+#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
+#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
+/* reg_mtmp_sensor_index
+ * Sensors index to access.
+ * 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially
+ * (module 0 is mapped to sensor_index 64).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 12);
+
+/* Convert to milli degrees Celsius */
+#define MLXSW_REG_MTMP_TEMP_TO_MC(val) ({ typeof(val) v_ = (val); \
+ ((v_) >= 0) ? ((v_) * 125) : \
+ ((s16)((GENMASK(15, 0) + (v_) + 1) \
+ * 125)); })
+
+/* reg_mtmp_max_operational_temperature
+ * The highest temperature in the nominal operational range. Reading is in
+ * 0.125 Celsius degrees units.
+ * In case of module this is SFF critical temperature threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtmp, max_operational_temperature, 0x04, 16, 16);
+
+/* reg_mtmp_temperature
+ * Temperature reading from the sensor. Reading is in 0.125 Celsius
+ * degrees units.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtmp, temperature, 0x04, 0, 16);
+
+/* reg_mtmp_mte
+ * Max Temperature Enable - enables measuring the max temperature on a sensor.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtmp, mte, 0x08, 31, 1);
+
+/* reg_mtmp_mtr
+ * Max Temperature Reset - clears the value of the max temperature register.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
+
+/* reg_mtmp_max_temperature
+ * The highest measured temperature from the sensor.
+ * When the bit mte is cleared, the field max_temperature is reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
+
+/* reg_mtmp_tee
+ * Temperature Event Enable.
+ * 0 - Do not generate event
+ * 1 - Generate event
+ * 2 - Generate single event
+ * Access: RW
+ */
+
+enum mlxsw_reg_mtmp_tee {
+ MLXSW_REG_MTMP_TEE_NO_EVENT,
+ MLXSW_REG_MTMP_TEE_GENERATE_EVENT,
+ MLXSW_REG_MTMP_TEE_GENERATE_SINGLE_EVENT,
+};
+
+MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
+
+#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
+
+/* reg_mtmp_temperature_threshold_hi
+ * High threshold for Temperature Warning Event. In 0.125 Celsius.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
+
+#define MLXSW_REG_MTMP_HYSTERESIS_TEMP 0x28 /* 5 Celsius */
+/* reg_mtmp_temperature_threshold_lo
+ * Low threshold for Temperature Warning Event. In 0.125 Celsius.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
+
+#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
+
+/* reg_mtmp_sensor_name
+ * Sensor Name
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
+
+static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index,
+ u16 sensor_index, bool max_temp_enable,
+ bool max_temp_reset)
+{
+ MLXSW_REG_ZERO(mtmp, payload);
+ mlxsw_reg_mtmp_slot_index_set(payload, slot_index);
+ mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
+ mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
+ mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
+ mlxsw_reg_mtmp_temperature_threshold_hi_set(payload,
+ MLXSW_REG_MTMP_THRESH_HI);
+}
+
+static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
+ int *p_max_temp, int *p_temp_hi,
+ int *p_max_oper_temp,
+ char *sensor_name)
+{
+ s16 temp;
+
+ if (p_temp) {
+ temp = mlxsw_reg_mtmp_temperature_get(payload);
+ *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
+ if (p_max_temp) {
+ temp = mlxsw_reg_mtmp_max_temperature_get(payload);
+ *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
+ if (p_temp_hi) {
+ temp = mlxsw_reg_mtmp_temperature_threshold_hi_get(payload);
+ *p_temp_hi = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
+ if (p_max_oper_temp) {
+ temp = mlxsw_reg_mtmp_max_operational_temperature_get(payload);
+ *p_max_oper_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
+ if (sensor_name)
+ mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
+}
+
+/* MTWE - Management Temperature Warning Event
+ * -------------------------------------------
+ * This register is used for over temperature warning.
+ */
+#define MLXSW_REG_MTWE_ID 0x900B
+#define MLXSW_REG_MTWE_LEN 0x10
+
+MLXSW_REG_DEFINE(mtwe, MLXSW_REG_MTWE_ID, MLXSW_REG_MTWE_LEN);
+
+/* reg_mtwe_sensor_warning
+ * Bit vector indicating which of the sensor reading is above threshold.
+ * Address 00h bit31 is sensor_warning[127].
+ * Address 0Ch bit0 is sensor_warning[0].
+ * Access: RO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
+
+/* MTBR - Management Temperature Bulk Register
+ * -------------------------------------------
+ * This register is used for bulk temperature reading.
+ */
+#define MLXSW_REG_MTBR_ID 0x900F
+#define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */
+#define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \
+ MLXSW_REG_MTBR_REC_LEN * \
+ MLXSW_REG_MTBR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
+
+/* reg_mtbr_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4);
+
+/* reg_mtbr_base_sensor_index
+ * Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors,
+ * 64-127 are mapped to the SFP+/QSFP modules sequentially).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 12);
+
+/* reg_mtbr_num_rec
+ * Request: Number of records to read
+ * Response: Number of records read
+ * See above description for more details.
+ * Range 1..255
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtbr, num_rec, 0x04, 0, 8);
+
+/* reg_mtbr_rec_max_temp
+ * The highest measured temperature from the sensor.
+ * When the bit mte is cleared, the field max_temperature is reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
+ 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false);
+
+/* reg_mtbr_rec_temp
+ * Temperature reading from the sensor. Reading is in 0..125 Celsius
+ * degrees units.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
+ MLXSW_REG_MTBR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
+ u16 base_sensor_index, u8 num_rec)
+{
+ MLXSW_REG_ZERO(mtbr, payload);
+ mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
+ mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
+ mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
+}
+
+/* Error codes from temperatute reading */
+enum mlxsw_reg_mtbr_temp_status {
+ MLXSW_REG_MTBR_NO_CONN = 0x8000,
+ MLXSW_REG_MTBR_NO_TEMP_SENS = 0x8001,
+ MLXSW_REG_MTBR_INDEX_NA = 0x8002,
+ MLXSW_REG_MTBR_BAD_SENS_INFO = 0x8003,
+};
+
+/* Base index for reading modules temperature */
+#define MLXSW_REG_MTBR_BASE_MODULE_INDEX 64
+
+static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind,
+ u16 *p_temp, u16 *p_max_temp)
+{
+ if (p_temp)
+ *p_temp = mlxsw_reg_mtbr_rec_temp_get(payload, rec_ind);
+ if (p_max_temp)
+ *p_max_temp = mlxsw_reg_mtbr_rec_max_temp_get(payload, rec_ind);
+}
+
+/* MCIA - Management Cable Info Access
+ * -----------------------------------
+ * MCIA register is used to access the SFP+ and QSFP connector's EPROM.
+ */
+
+#define MLXSW_REG_MCIA_ID 0x9014
+#define MLXSW_REG_MCIA_LEN 0x40
+
+MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN);
+
+/* reg_mcia_l
+ * Lock bit. Setting this bit will lock the access to the specific
+ * cable. Used for updating a full page in a cable EPROM. Any access
+ * other then subsequence writes will fail while the port is locked.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
+
+/* reg_mcia_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+
+/* reg_mcia_slot_index
+ * Slot index (0: Main board)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4);
+
+enum {
+ MLXSW_REG_MCIA_STATUS_GOOD = 0,
+ /* No response from module's EEPROM. */
+ MLXSW_REG_MCIA_STATUS_NO_EEPROM_MODULE = 1,
+ /* Module type not supported by the device. */
+ MLXSW_REG_MCIA_STATUS_MODULE_NOT_SUPPORTED = 2,
+ /* No module present indication. */
+ MLXSW_REG_MCIA_STATUS_MODULE_NOT_CONNECTED = 3,
+ /* Error occurred while trying to access module's EEPROM using I2C. */
+ MLXSW_REG_MCIA_STATUS_I2C_ERROR = 9,
+ /* Module is disabled. */
+ MLXSW_REG_MCIA_STATUS_MODULE_DISABLED = 16,
+};
+
+/* reg_mcia_status
+ * Module status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcia, status, 0x00, 0, 8);
+
+/* reg_mcia_i2c_device_address
+ * I2C device address.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, i2c_device_address, 0x04, 24, 8);
+
+/* reg_mcia_page_number
+ * Page number.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, page_number, 0x04, 16, 8);
+
+/* reg_mcia_device_address
+ * Device address.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
+
+/* reg_mcia_bank_number
+ * Bank number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, bank_number, 0x08, 16, 8);
+
+/* reg_mcia_size
+ * Number of bytes to read/write (up to 48 bytes).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
+
+#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
+#define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128
+#define MLXSW_REG_MCIA_EEPROM_SIZE 48
+#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
+#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
+#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0
+#define MLXSW_REG_MCIA_TH_ITEM_SIZE 2
+#define MLXSW_REG_MCIA_TH_PAGE_NUM 3
+#define MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM 2
+#define MLXSW_REG_MCIA_PAGE0_LO 0
+#define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80
+#define MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY BIT(7)
+
+enum mlxsw_reg_mcia_eeprom_module_info_rev_id {
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
+};
+
+enum mlxsw_reg_mcia_eeprom_module_info_id {
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP = 0x03,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP = 0x19,
+};
+
+enum mlxsw_reg_mcia_eeprom_module_info {
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE,
+};
+
+/* reg_mcia_eeprom
+ * Bytes to read/write.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
+
+/* This is used to access the optional upper pages (1-3) in the QSFP+
+ * memory map. Page 1 is available on offset 256 through 383, page 2 -
+ * on offset 384 through 511, page 3 - on offset 512 through 639.
+ */
+#define MLXSW_REG_MCIA_PAGE_GET(off) (((off) - \
+ MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
+ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
+
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
+ u8 lock, u8 page_number,
+ u16 device_addr, u8 size,
+ u8 i2c_device_addr)
+{
+ MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_slot_set(payload, slot_index);
+ mlxsw_reg_mcia_module_set(payload, module);
+ mlxsw_reg_mcia_l_set(payload, lock);
+ mlxsw_reg_mcia_page_number_set(payload, page_number);
+ mlxsw_reg_mcia_device_address_set(payload, device_addr);
+ mlxsw_reg_mcia_size_set(payload, size);
+ mlxsw_reg_mcia_i2c_device_address_set(payload, i2c_device_addr);
+}
+
+/* MPAT - Monitoring Port Analyzer Table
+ * -------------------------------------
+ * MPAT Register is used to query and configure the Switch PortAnalyzer Table.
+ * For an enabled analyzer, all fields except e (enable) cannot be modified.
+ */
+#define MLXSW_REG_MPAT_ID 0x901A
+#define MLXSW_REG_MPAT_LEN 0x78
+
+MLXSW_REG_DEFINE(mpat, MLXSW_REG_MPAT_ID, MLXSW_REG_MPAT_LEN);
+
+/* reg_mpat_pa_id
+ * Port Analyzer ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
+
+/* reg_mpat_session_id
+ * Mirror Session ID.
+ * Used for MIRROR_SESSION<i> trap.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, session_id, 0x00, 24, 4);
+
+/* reg_mpat_system_port
+ * A unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
+
+/* reg_mpat_e
+ * Enable. Indicating the Port Analyzer is enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
+
+/* reg_mpat_qos
+ * Quality Of Service Mode.
+ * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
+ * PCP, DEI, DSCP or VL) are configured.
+ * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
+ * same as in the original packet that has triggered the mirroring. For
+ * SPAN also the pcp,dei are maintained.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
+
+/* reg_mpat_be
+ * Best effort mode. Indicates mirroring traffic should not cause packet
+ * drop or back pressure, but will discard the mirrored packets. Mirrored
+ * packets will be forwarded on a best effort manner.
+ * 0: Do not discard mirrored packets
+ * 1: Discard mirrored packets if causing congestion
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
+
+enum mlxsw_reg_mpat_span_type {
+ /* Local SPAN Ethernet.
+ * The original packet is not encapsulated.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0,
+
+ /* Remote SPAN Ethernet VLAN.
+ * The packet is forwarded to the monitoring port on the monitoring
+ * VLAN.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH = 0x1,
+
+ /* Encapsulated Remote SPAN Ethernet L3 GRE.
+ * The packet is encapsulated with GRE header.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3 = 0x3,
+};
+
+/* reg_mpat_span_type
+ * SPAN type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4);
+
+/* reg_mpat_pide
+ * Policer enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, pide, 0x0C, 15, 1);
+
+/* reg_mpat_pid
+ * Policer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, pid, 0x0C, 0, 14);
+
+/* Remote SPAN - Ethernet VLAN
+ * - - - - - - - - - - - - - -
+ */
+
+/* reg_mpat_eth_rspan_vid
+ * Encapsulation header VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_vid, 0x18, 0, 12);
+
+/* Encapsulated Remote SPAN - Ethernet L2
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_version {
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER = 15,
+};
+
+/* reg_mpat_eth_rspan_version
+ * RSPAN mirror header version.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_version, 0x10, 18, 4);
+
+/* reg_mpat_eth_rspan_mac
+ * Destination MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_mac, 0x12, 6);
+
+/* reg_mpat_eth_rspan_tp
+ * Tag Packet. Indicates whether the mirroring header should be VLAN tagged.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_tp, 0x18, 16, 1);
+
+/* Encapsulated Remote SPAN - Ethernet L3
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_protocol {
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6,
+};
+
+/* reg_mpat_eth_rspan_protocol
+ * SPAN encapsulation protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_protocol, 0x18, 24, 4);
+
+/* reg_mpat_eth_rspan_ttl
+ * Encapsulation header Time-to-Live/HopLimit.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_ttl, 0x1C, 4, 8);
+
+/* reg_mpat_eth_rspan_smac
+ * Source MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_smac, 0x22, 6);
+
+/* reg_mpat_eth_rspan_dip*
+ * Destination IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_dip4, 0x4C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_dip6, 0x40, 16);
+
+/* reg_mpat_eth_rspan_sip*
+ * Source IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_sip4, 0x5C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_sip6, 0x50, 16);
+
+static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
+ u16 system_port, bool e,
+ enum mlxsw_reg_mpat_span_type span_type)
+{
+ MLXSW_REG_ZERO(mpat, payload);
+ mlxsw_reg_mpat_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpat_system_port_set(payload, system_port);
+ mlxsw_reg_mpat_e_set(payload, e);
+ mlxsw_reg_mpat_qos_set(payload, 1);
+ mlxsw_reg_mpat_be_set(payload, 1);
+ mlxsw_reg_mpat_span_type_set(payload, span_type);
+}
+
+static inline void mlxsw_reg_mpat_eth_rspan_pack(char *payload, u16 vid)
+{
+ mlxsw_reg_mpat_eth_rspan_vid_set(payload, vid);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l2_pack(char *payload,
+ enum mlxsw_reg_mpat_eth_rspan_version version,
+ const char *mac,
+ bool tp)
+{
+ mlxsw_reg_mpat_eth_rspan_version_set(payload, version);
+ mlxsw_reg_mpat_eth_rspan_mac_memcpy_to(payload, mac);
+ mlxsw_reg_mpat_eth_rspan_tp_set(payload, tp);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(char *payload, u8 ttl,
+ const char *smac,
+ u32 sip, u32 dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4);
+ mlxsw_reg_mpat_eth_rspan_sip4_set(payload, sip);
+ mlxsw_reg_mpat_eth_rspan_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
+ const char *smac,
+ struct in6_addr sip, struct in6_addr dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6);
+ mlxsw_reg_mpat_eth_rspan_sip6_memcpy_to(payload, (void *)&sip);
+ mlxsw_reg_mpat_eth_rspan_dip6_memcpy_to(payload, (void *)&dip);
+}
+
+/* MPAR - Monitoring Port Analyzer Register
+ * ----------------------------------------
+ * MPAR register is used to query and configure the port analyzer port mirroring
+ * properties.
+ */
+#define MLXSW_REG_MPAR_ID 0x901B
+#define MLXSW_REG_MPAR_LEN 0x0C
+
+MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
+
+/* reg_mpar_local_port
+ * The local port to mirror the packets from.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mpar, 0x00, 16, 0x00, 4);
+
+enum mlxsw_reg_mpar_i_e {
+ MLXSW_REG_MPAR_TYPE_EGRESS,
+ MLXSW_REG_MPAR_TYPE_INGRESS,
+};
+
+/* reg_mpar_i_e
+ * Ingress/Egress
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
+
+/* reg_mpar_enable
+ * Enable mirroring
+ * By default, port mirroring is disabled for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
+
+/* reg_mpar_pa_id
+ * Port Analyzer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
+
+#define MLXSW_REG_MPAR_RATE_MAX 3500000000UL
+
+/* reg_mpar_probability_rate
+ * Sampling rate.
+ * Valid values are: 1 to 3.5*10^9
+ * Value of 1 means "sample all". Default is 1.
+ * Reserved when Spectrum-1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpar_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_mpar_i_e i_e,
+ bool enable, u8 pa_id,
+ u32 probability_rate)
+{
+ MLXSW_REG_ZERO(mpar, payload);
+ mlxsw_reg_mpar_local_port_set(payload, local_port);
+ mlxsw_reg_mpar_enable_set(payload, enable);
+ mlxsw_reg_mpar_i_e_set(payload, i_e);
+ mlxsw_reg_mpar_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpar_probability_rate_set(payload, probability_rate);
+}
+
+/* MGIR - Management General Information Register
+ * ----------------------------------------------
+ * MGIR register allows software to query the hardware and firmware general
+ * information.
+ */
+#define MLXSW_REG_MGIR_ID 0x9020
+#define MLXSW_REG_MGIR_LEN 0x9C
+
+MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN);
+
+/* reg_mgir_hw_info_device_hw_revision
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16);
+
+#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16
+
+/* reg_mgir_fw_info_psid
+ * PSID (ASCII string).
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mgir, fw_info_psid, 0x30, MLXSW_REG_MGIR_FW_INFO_PSID_SIZE);
+
+/* reg_mgir_fw_info_extended_major
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_major, 0x44, 0, 32);
+
+/* reg_mgir_fw_info_extended_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_minor, 0x48, 0, 32);
+
+/* reg_mgir_fw_info_extended_sub_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_sub_minor, 0x4C, 0, 32);
+
+static inline void mlxsw_reg_mgir_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mgir, payload);
+}
+
+static inline void
+mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid,
+ u32 *fw_major, u32 *fw_minor, u32 *fw_sub_minor)
+{
+ *hw_rev = mlxsw_reg_mgir_hw_info_device_hw_revision_get(payload);
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(payload, fw_info_psid);
+ *fw_major = mlxsw_reg_mgir_fw_info_extended_major_get(payload);
+ *fw_minor = mlxsw_reg_mgir_fw_info_extended_minor_get(payload);
+ *fw_sub_minor = mlxsw_reg_mgir_fw_info_extended_sub_minor_get(payload);
+}
+
+/* MRSR - Management Reset and Shutdown Register
+ * ---------------------------------------------
+ * MRSR register is used to reset or shutdown the switch or
+ * the entire system (when applicable).
+ */
+#define MLXSW_REG_MRSR_ID 0x9023
+#define MLXSW_REG_MRSR_LEN 0x08
+
+MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN);
+
+/* reg_mrsr_command
+ * Reset/shutdown command
+ * 0 - do nothing
+ * 1 - software reset
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4);
+
+static inline void mlxsw_reg_mrsr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mrsr, payload);
+ mlxsw_reg_mrsr_command_set(payload, 1);
+}
+
+/* MLCR - Management LED Control Register
+ * --------------------------------------
+ * Controls the system LEDs.
+ */
+#define MLXSW_REG_MLCR_ID 0x902B
+#define MLXSW_REG_MLCR_LEN 0x0C
+
+MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN);
+
+/* reg_mlcr_local_port
+ * Local port number.
+ * Access: RW
+ */
+MLXSW_ITEM32_LP(reg, mlcr, 0x00, 16, 0x00, 24);
+
+#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF
+
+/* reg_mlcr_beacon_duration
+ * Duration of the beacon to be active, in seconds.
+ * 0x0 - Will turn off the beacon.
+ * 0xFFFF - Will turn on the beacon until explicitly turned off.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16);
+
+/* reg_mlcr_beacon_remain
+ * Remaining duration of the beacon, in seconds.
+ * 0xFFFF indicates an infinite amount of time.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16);
+
+static inline void mlxsw_reg_mlcr_pack(char *payload, u16 local_port,
+ bool active)
+{
+ MLXSW_REG_ZERO(mlcr, payload);
+ mlxsw_reg_mlcr_local_port_set(payload, local_port);
+ mlxsw_reg_mlcr_beacon_duration_set(payload, active ?
+ MLXSW_REG_MLCR_DURATION_MAX : 0);
+}
+
+/* MCION - Management Cable IO and Notifications Register
+ * ------------------------------------------------------
+ * The MCION register is used to query transceiver modules' IO pins and other
+ * notifications.
+ */
+#define MLXSW_REG_MCION_ID 0x9052
+#define MLXSW_REG_MCION_LEN 0x18
+
+MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
+
+/* reg_mcion_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+
+/* reg_mcion_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4);
+
+enum {
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_mcion_module_status_bits
+ * Module IO status as defined by SFF.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_slot_index_set(payload, slot_index);
+ mlxsw_reg_mcion_module_set(payload, module);
+}
+
+/* MTPPS - Management Pulse Per Second Register
+ * --------------------------------------------
+ * This register provides the device PPS capabilities, configure the PPS in and
+ * out modules and holds the PPS in time stamp.
+ */
+#define MLXSW_REG_MTPPS_ID 0x9053
+#define MLXSW_REG_MTPPS_LEN 0x3C
+
+MLXSW_REG_DEFINE(mtpps, MLXSW_REG_MTPPS_ID, MLXSW_REG_MTPPS_LEN);
+
+/* reg_mtpps_enable
+ * Enables the PPS functionality the specific pin.
+ * A boolean variable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpps, enable, 0x20, 31, 1);
+
+enum mlxsw_reg_mtpps_pin_mode {
+ MLXSW_REG_MTPPS_PIN_MODE_VIRTUAL_PIN = 0x2,
+};
+
+/* reg_mtpps_pin_mode
+ * Pin mode to be used. The mode must comply with the supported modes of the
+ * requested pin.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpps, pin_mode, 0x20, 8, 4);
+
+#define MLXSW_REG_MTPPS_PIN_SP_VIRTUAL_PIN 7
+
+/* reg_mtpps_pin
+ * Pin to be configured or queried out of the supported pins.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpps, pin, 0x20, 0, 8);
+
+/* reg_mtpps_time_stamp
+ * When pin_mode = pps_in, the latched device time when it was triggered from
+ * the external GPIO pin.
+ * When pin_mode = pps_out or virtual_pin or pps_out_and_virtual_pin, the target
+ * time to generate next output signal.
+ * Time is in units of device clock.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mtpps, time_stamp, 0x28, 0, 64);
+
+static inline void
+mlxsw_reg_mtpps_vpin_pack(char *payload, u64 time_stamp)
+{
+ MLXSW_REG_ZERO(mtpps, payload);
+ mlxsw_reg_mtpps_pin_set(payload, MLXSW_REG_MTPPS_PIN_SP_VIRTUAL_PIN);
+ mlxsw_reg_mtpps_pin_mode_set(payload,
+ MLXSW_REG_MTPPS_PIN_MODE_VIRTUAL_PIN);
+ mlxsw_reg_mtpps_enable_set(payload, true);
+ mlxsw_reg_mtpps_time_stamp_set(payload, time_stamp);
+}
+
+/* MTUTC - Management UTC Register
+ * -------------------------------
+ * Configures the HW UTC counter.
+ */
+#define MLXSW_REG_MTUTC_ID 0x9055
+#define MLXSW_REG_MTUTC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mtutc, MLXSW_REG_MTUTC_ID, MLXSW_REG_MTUTC_LEN);
+
+enum mlxsw_reg_mtutc_operation {
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC = 0,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 1,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME = 2,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ = 3,
+};
+
+/* reg_mtutc_operation
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mtutc, operation, 0x00, 0, 4);
+
+/* reg_mtutc_freq_adjustment
+ * Frequency adjustment: Every PPS the HW frequency will be
+ * adjusted by this value. Units of HW clock, where HW counts
+ * 10^9 HW clocks for 1 HW second. Range is from -50,000,000 to +50,000,000.
+ * In Spectrum-2, the field is reversed, positive values mean to decrease the
+ * frequency.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtutc, freq_adjustment, 0x04, 0, 32);
+
+#define MLXSW_REG_MTUTC_MAX_FREQ_ADJ (50 * 1000 * 1000)
+
+/* reg_mtutc_utc_sec
+ * UTC seconds.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_sec, 0x10, 0, 32);
+
+/* reg_mtutc_utc_nsec
+ * UTC nSecs.
+ * Range 0..(10^9-1)
+ * Updated when operation is SET_TIME_IMMEDIATE.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_nsec, 0x14, 0, 30);
+
+/* reg_mtutc_time_adjustment
+ * Time adjustment.
+ * Units of nSec.
+ * Range is from -32768 to +32767.
+ * Updated when operation is ADJUST_TIME.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, time_adjustment, 0x18, 0, 32);
+
+static inline void
+mlxsw_reg_mtutc_pack(char *payload, enum mlxsw_reg_mtutc_operation oper,
+ u32 freq_adj, u32 utc_sec, u32 utc_nsec, u32 time_adj)
+{
+ MLXSW_REG_ZERO(mtutc, payload);
+ mlxsw_reg_mtutc_operation_set(payload, oper);
+ mlxsw_reg_mtutc_freq_adjustment_set(payload, freq_adj);
+ mlxsw_reg_mtutc_utc_sec_set(payload, utc_sec);
+ mlxsw_reg_mtutc_utc_nsec_set(payload, utc_nsec);
+ mlxsw_reg_mtutc_time_adjustment_set(payload, time_adj);
+}
+
+/* MCQI - Management Component Query Information
+ * ---------------------------------------------
+ * This register allows querying information about firmware components.
+ */
+#define MLXSW_REG_MCQI_ID 0x9061
+#define MLXSW_REG_MCQI_BASE_LEN 0x18
+#define MLXSW_REG_MCQI_CAP_LEN 0x14
+#define MLXSW_REG_MCQI_LEN (MLXSW_REG_MCQI_BASE_LEN + MLXSW_REG_MCQI_CAP_LEN)
+
+MLXSW_REG_DEFINE(mcqi, MLXSW_REG_MCQI_ID, MLXSW_REG_MCQI_LEN);
+
+/* reg_mcqi_component_index
+ * Index of the accessed component.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcqi, component_index, 0x00, 0, 16);
+
+enum mlxfw_reg_mcqi_info_type {
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES,
+};
+
+/* reg_mcqi_info_type
+ * Component properties set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, info_type, 0x08, 0, 5);
+
+/* reg_mcqi_offset
+ * The requested/returned data offset from the section start, given in bytes.
+ * Must be DWORD aligned.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, offset, 0x10, 0, 32);
+
+/* reg_mcqi_data_size
+ * The requested/returned data size, given in bytes. If data_size is not DWORD
+ * aligned, the last bytes are zero padded.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, data_size, 0x14, 0, 16);
+
+/* reg_mcqi_cap_max_component_size
+ * Maximum size for this component, given in bytes.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_max_component_size, 0x20, 0, 32);
+
+/* reg_mcqi_cap_log_mcda_word_size
+ * Log 2 of the access word size in bytes. Read and write access must be aligned
+ * to the word size. Write access must be done for an integer number of words.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_log_mcda_word_size, 0x24, 28, 4);
+
+/* reg_mcqi_cap_mcda_max_write_size
+ * Maximal write size for MCDA register
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_mcda_max_write_size, 0x24, 0, 16);
+
+static inline void mlxsw_reg_mcqi_pack(char *payload, u16 component_index)
+{
+ MLXSW_REG_ZERO(mcqi, payload);
+ mlxsw_reg_mcqi_component_index_set(payload, component_index);
+ mlxsw_reg_mcqi_info_type_set(payload,
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES);
+ mlxsw_reg_mcqi_offset_set(payload, 0);
+ mlxsw_reg_mcqi_data_size_set(payload, MLXSW_REG_MCQI_CAP_LEN);
+}
+
+static inline void mlxsw_reg_mcqi_unpack(char *payload,
+ u32 *p_cap_max_component_size,
+ u8 *p_cap_log_mcda_word_size,
+ u16 *p_cap_mcda_max_write_size)
+{
+ *p_cap_max_component_size =
+ mlxsw_reg_mcqi_cap_max_component_size_get(payload);
+ *p_cap_log_mcda_word_size =
+ mlxsw_reg_mcqi_cap_log_mcda_word_size_get(payload);
+ *p_cap_mcda_max_write_size =
+ mlxsw_reg_mcqi_cap_mcda_max_write_size_get(payload);
+}
+
+/* MCC - Management Component Control
+ * ----------------------------------
+ * Controls the firmware component and updates the FSM.
+ */
+#define MLXSW_REG_MCC_ID 0x9062
+#define MLXSW_REG_MCC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mcc, MLXSW_REG_MCC_ID, MLXSW_REG_MCC_LEN);
+
+enum mlxsw_reg_mcc_instruction {
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
+ MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
+ MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
+ MLXSW_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
+ MLXSW_REG_MCC_INSTRUCTION_CANCEL = 0x08,
+};
+
+/* reg_mcc_instruction
+ * Command to be executed by the FSM.
+ * Applicable for write operation only.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcc, instruction, 0x00, 0, 8);
+
+/* reg_mcc_component_index
+ * Index of the accessed component. Applicable only for commands that
+ * refer to components. Otherwise, this field is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcc, component_index, 0x04, 0, 16);
+
+/* reg_mcc_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, update_handle, 0x08, 0, 24);
+
+/* reg_mcc_error_code
+ * Indicates the successful completion of the instruction, or the reason it
+ * failed
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, error_code, 0x0C, 8, 8);
+
+/* reg_mcc_control_state
+ * Current FSM state
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, control_state, 0x0C, 0, 4);
+
+/* reg_mcc_component_size
+ * Component size in bytes. Valid for UPDATE_COMPONENT instruction. Specifying
+ * the size may shorten the update time. Value 0x0 means that size is
+ * unspecified.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, component_size, 0x10, 0, 32);
+
+static inline void mlxsw_reg_mcc_pack(char *payload,
+ enum mlxsw_reg_mcc_instruction instr,
+ u16 component_index, u32 update_handle,
+ u32 component_size)
+{
+ MLXSW_REG_ZERO(mcc, payload);
+ mlxsw_reg_mcc_instruction_set(payload, instr);
+ mlxsw_reg_mcc_component_index_set(payload, component_index);
+ mlxsw_reg_mcc_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcc_component_size_set(payload, component_size);
+}
+
+static inline void mlxsw_reg_mcc_unpack(char *payload, u32 *p_update_handle,
+ u8 *p_error_code, u8 *p_control_state)
+{
+ if (p_update_handle)
+ *p_update_handle = mlxsw_reg_mcc_update_handle_get(payload);
+ if (p_error_code)
+ *p_error_code = mlxsw_reg_mcc_error_code_get(payload);
+ if (p_control_state)
+ *p_control_state = mlxsw_reg_mcc_control_state_get(payload);
+}
+
+/* MCDA - Management Component Data Access
+ * ---------------------------------------
+ * This register allows reading and writing a firmware component.
+ */
+#define MLXSW_REG_MCDA_ID 0x9063
+#define MLXSW_REG_MCDA_BASE_LEN 0x10
+#define MLXSW_REG_MCDA_MAX_DATA_LEN 0x80
+#define MLXSW_REG_MCDA_LEN \
+ (MLXSW_REG_MCDA_BASE_LEN + MLXSW_REG_MCDA_MAX_DATA_LEN)
+
+MLXSW_REG_DEFINE(mcda, MLXSW_REG_MCDA_ID, MLXSW_REG_MCDA_LEN);
+
+/* reg_mcda_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, update_handle, 0x00, 0, 24);
+
+/* reg_mcda_offset
+ * Offset of accessed address relative to component start. Accesses must be in
+ * accordance to log_mcda_word_size in MCQI reg.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, offset, 0x04, 0, 32);
+
+/* reg_mcda_size
+ * Size of the data accessed, given in bytes.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, size, 0x08, 0, 16);
+
+/* reg_mcda_data
+ * Data block accessed.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, mcda, data, 0x10, 0, 32, 4, 0, false);
+
+static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle,
+ u32 offset, u16 size, u8 *data)
+{
+ int i;
+
+ MLXSW_REG_ZERO(mcda, payload);
+ mlxsw_reg_mcda_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcda_offset_set(payload, offset);
+ mlxsw_reg_mcda_size_set(payload, size);
+
+ for (i = 0; i < size / 4; i++)
+ mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]);
+}
+
+/* MPSC - Monitoring Packet Sampling Configuration Register
+ * --------------------------------------------------------
+ * MPSC Register is used to configure the Packet Sampling mechanism.
+ */
+#define MLXSW_REG_MPSC_ID 0x9080
+#define MLXSW_REG_MPSC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
+
+/* reg_mpsc_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mpsc, 0x00, 16, 0x00, 12);
+
+/* reg_mpsc_e
+ * Enable sampling on port local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
+
+#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL
+
+/* reg_mpsc_rate
+ * Sampling rate = 1 out of rate packets (with randomization around
+ * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpsc_pack(char *payload, u16 local_port, bool e,
+ u32 rate)
+{
+ MLXSW_REG_ZERO(mpsc, payload);
+ mlxsw_reg_mpsc_local_port_set(payload, local_port);
+ mlxsw_reg_mpsc_e_set(payload, e);
+ mlxsw_reg_mpsc_rate_set(payload, rate);
+}
+
+/* MGPC - Monitoring General Purpose Counter Set Register
+ * The MGPC register retrieves and sets the General Purpose Counter Set.
+ */
+#define MLXSW_REG_MGPC_ID 0x9081
+#define MLXSW_REG_MGPC_LEN 0x18
+
+MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
+
+/* reg_mgpc_counter_set_type
+ * Counter set type.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
+
+/* reg_mgpc_counter_index
+ * Counter index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
+
+enum mlxsw_reg_mgpc_opcode {
+ /* Nop */
+ MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
+ /* Clear counters */
+ MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_mgpc_opcode
+ * Opcode.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
+
+/* reg_mgpc_byte_counter
+ * Byte counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
+
+/* reg_mgpc_packet_counter
+ * Packet counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
+
+static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
+ enum mlxsw_reg_mgpc_opcode opcode,
+ enum mlxsw_reg_flow_counter_set_type set_type)
+{
+ MLXSW_REG_ZERO(mgpc, payload);
+ mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
+ mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
+ mlxsw_reg_mgpc_opcode_set(payload, opcode);
+}
+
+/* MPRS - Monitoring Parsing State Register
+ * ----------------------------------------
+ * The MPRS register is used for setting up the parsing for hash,
+ * policy-engine and routing.
+ */
+#define MLXSW_REG_MPRS_ID 0x9083
+#define MLXSW_REG_MPRS_LEN 0x14
+
+MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN);
+
+/* reg_mprs_parsing_depth
+ * Minimum parsing depth.
+ * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL
+ * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16);
+
+/* reg_mprs_parsing_en
+ * Parsing enable.
+ * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and
+ * NVGRE. Default is enabled. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16);
+
+/* reg_mprs_vxlan_udp_dport
+ * VxLAN UDP destination port.
+ * Used for identifying VxLAN packets and for dport field in
+ * encapsulation. Default is 4789.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16);
+
+static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
+ u16 vxlan_udp_dport)
+{
+ MLXSW_REG_ZERO(mprs, payload);
+ mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth);
+ mlxsw_reg_mprs_parsing_en_set(payload, true);
+ mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
+}
+
+/* MOGCR - Monitoring Global Configuration Register
+ * ------------------------------------------------
+ */
+#define MLXSW_REG_MOGCR_ID 0x9086
+#define MLXSW_REG_MOGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(mogcr, MLXSW_REG_MOGCR_ID, MLXSW_REG_MOGCR_LEN);
+
+/* reg_mogcr_ptp_iftc
+ * PTP Ingress FIFO Trap Clear
+ * The PTP_ING_FIFO trap provides MTPPTR with clr according
+ * to this value. Default 0.
+ * Reserved when IB switches and when SwitchX/-2, Spectrum-2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1);
+
+/* reg_mogcr_ptp_eftc
+ * PTP Egress FIFO Trap Clear
+ * The PTP_EGR_FIFO trap provides MTPPTR with clr according
+ * to this value. Default 0.
+ * Reserved when IB switches and when SwitchX/-2, Spectrum-2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1);
+
+/* reg_mogcr_mirroring_pid_base
+ * Base policer id for mirroring policers.
+ * Must have an even value (e.g. 1000, not 1001).
+ * Reserved when SwitchX/-2, Switch-IB/2, Spectrum-1 and Quantum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mogcr, mirroring_pid_base, 0x0C, 0, 14);
+
+/* MPAGR - Monitoring Port Analyzer Global Register
+ * ------------------------------------------------
+ * This register is used for global port analyzer configurations.
+ * Note: This register is not supported by current FW versions for Spectrum-1.
+ */
+#define MLXSW_REG_MPAGR_ID 0x9089
+#define MLXSW_REG_MPAGR_LEN 0x0C
+
+MLXSW_REG_DEFINE(mpagr, MLXSW_REG_MPAGR_ID, MLXSW_REG_MPAGR_LEN);
+
+enum mlxsw_reg_mpagr_trigger {
+ MLXSW_REG_MPAGR_TRIGGER_EGRESS,
+ MLXSW_REG_MPAGR_TRIGGER_INGRESS,
+ MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED,
+ MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER,
+ MLXSW_REG_MPAGR_TRIGGER_INGRESS_ING_CONG,
+ MLXSW_REG_MPAGR_TRIGGER_INGRESS_EGR_CONG,
+ MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN,
+ MLXSW_REG_MPAGR_TRIGGER_EGRESS_HIGH_LATENCY,
+};
+
+/* reg_mpagr_trigger
+ * Mirror trigger.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4);
+
+/* reg_mpagr_pa_id
+ * Port analyzer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4);
+
+#define MLXSW_REG_MPAGR_RATE_MAX 3500000000UL
+
+/* reg_mpagr_probability_rate
+ * Sampling rate.
+ * Valid values are: 1 to 3.5*10^9
+ * Value of 1 means "sample all". Default is 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpagr, probability_rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpagr_pack(char *payload,
+ enum mlxsw_reg_mpagr_trigger trigger,
+ u8 pa_id, u32 probability_rate)
+{
+ MLXSW_REG_ZERO(mpagr, payload);
+ mlxsw_reg_mpagr_trigger_set(payload, trigger);
+ mlxsw_reg_mpagr_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpagr_probability_rate_set(payload, probability_rate);
+}
+
+/* MOMTE - Monitoring Mirror Trigger Enable Register
+ * -------------------------------------------------
+ * This register is used to configure the mirror enable for different mirror
+ * reasons.
+ */
+#define MLXSW_REG_MOMTE_ID 0x908D
+#define MLXSW_REG_MOMTE_LEN 0x10
+
+MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN);
+
+/* reg_momte_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, momte, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_momte_type {
+ MLXSW_REG_MOMTE_TYPE_WRED = 0x20,
+ MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS = 0x31,
+ MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS_DESCRIPTORS = 0x32,
+ MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_EGRESS_PORT = 0x33,
+ MLXSW_REG_MOMTE_TYPE_ING_CONG = 0x40,
+ MLXSW_REG_MOMTE_TYPE_EGR_CONG = 0x50,
+ MLXSW_REG_MOMTE_TYPE_ECN = 0x60,
+ MLXSW_REG_MOMTE_TYPE_HIGH_LATENCY = 0x70,
+};
+
+/* reg_momte_type
+ * Type of mirroring.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8);
+
+/* reg_momte_tclass_en
+ * TClass/PG mirror enable. Each bit represents corresponding tclass.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1);
+
+static inline void mlxsw_reg_momte_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_momte_type type)
+{
+ MLXSW_REG_ZERO(momte, payload);
+ mlxsw_reg_momte_local_port_set(payload, local_port);
+ mlxsw_reg_momte_type_set(payload, type);
+}
+
+/* MTPPPC - Time Precision Packet Port Configuration
+ * -------------------------------------------------
+ * This register serves for configuration of which PTP messages should be
+ * timestamped. This is a global configuration, despite the register name.
+ *
+ * Reserved when Spectrum-2.
+ */
+#define MLXSW_REG_MTPPPC_ID 0x9090
+#define MLXSW_REG_MTPPPC_LEN 0x28
+
+MLXSW_REG_DEFINE(mtpppc, MLXSW_REG_MTPPPC_ID, MLXSW_REG_MTPPPC_LEN);
+
+/* reg_mtpppc_ing_timestamp_message_type
+ * Bitwise vector of PTP message types to timestamp at ingress.
+ * MessageType field as defined by IEEE 1588
+ * Each bit corresponds to a value (e.g. Bit0: Sync, Bit1: Delay_Req)
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpppc, ing_timestamp_message_type, 0x08, 0, 16);
+
+/* reg_mtpppc_egr_timestamp_message_type
+ * Bitwise vector of PTP message types to timestamp at egress.
+ * MessageType field as defined by IEEE 1588
+ * Each bit corresponds to a value (e.g. Bit0: Sync, Bit1: Delay_Req)
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpppc, egr_timestamp_message_type, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_mtpppc_pack(char *payload, u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpppc, payload);
+ mlxsw_reg_mtpppc_ing_timestamp_message_type_set(payload, ing);
+ mlxsw_reg_mtpppc_egr_timestamp_message_type_set(payload, egr);
+}
+
+/* MTPPTR - Time Precision Packet Timestamping Reading
+ * ---------------------------------------------------
+ * The MTPPTR is used for reading the per port PTP timestamp FIFO.
+ * There is a trap for packets which are latched to the timestamp FIFO, thus the
+ * SW knows which FIFO to read. Note that packets enter the FIFO before been
+ * trapped. The sequence number is used to synchronize the timestamp FIFO
+ * entries and the trapped packets.
+ * Reserved when Spectrum-2.
+ */
+
+#define MLXSW_REG_MTPPTR_ID 0x9091
+#define MLXSW_REG_MTPPTR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_MTPPTR_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_MTPPTR_REC_MAX_COUNT 4
+#define MLXSW_REG_MTPPTR_LEN (MLXSW_REG_MTPPTR_BASE_LEN + \
+ MLXSW_REG_MTPPTR_REC_LEN * MLXSW_REG_MTPPTR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN);
+
+/* reg_mtpptr_local_port
+ * Not supported for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mtpptr, 0x00, 16, 0x00, 12);
+
+enum mlxsw_reg_mtpptr_dir {
+ MLXSW_REG_MTPPTR_DIR_INGRESS,
+ MLXSW_REG_MTPPTR_DIR_EGRESS,
+};
+
+/* reg_mtpptr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpptr, dir, 0x00, 0, 1);
+
+/* reg_mtpptr_clr
+ * Clear the records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mtpptr, clr, 0x04, 31, 1);
+
+/* reg_mtpptr_num_rec
+ * Number of valid records in the response
+ * Range 0.. cap_ptp_timestamp_fifo
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtpptr, num_rec, 0x08, 0, 4);
+
+/* reg_mtpptr_rec_message_type
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req)
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_message_type,
+ MLXSW_REG_MTPPTR_BASE_LEN, 8, 4,
+ MLXSW_REG_MTPPTR_REC_LEN, 0, false);
+
+/* reg_mtpptr_rec_domain_number
+ * DomainNumber field as defined by IEEE 1588
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_domain_number,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 8,
+ MLXSW_REG_MTPPTR_REC_LEN, 0, false);
+
+/* reg_mtpptr_rec_sequence_id
+ * SequenceId field as defined by IEEE 1588
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_sequence_id,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 16,
+ MLXSW_REG_MTPPTR_REC_LEN, 0x4, false);
+
+/* reg_mtpptr_rec_timestamp_high
+ * Timestamp of when the PTP packet has passed through the port Units of PLL
+ * clock time.
+ * For Spectrum-1 the PLL clock is 156.25Mhz and PLL clock time is 6.4nSec.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_timestamp_high,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 32,
+ MLXSW_REG_MTPPTR_REC_LEN, 0x8, false);
+
+/* reg_mtpptr_rec_timestamp_low
+ * See rec_timestamp_high.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_timestamp_low,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 32,
+ MLXSW_REG_MTPPTR_REC_LEN, 0xC, false);
+
+static inline void mlxsw_reg_mtpptr_unpack(const char *payload,
+ unsigned int rec,
+ u8 *p_message_type,
+ u8 *p_domain_number,
+ u16 *p_sequence_id,
+ u64 *p_timestamp)
+{
+ u32 timestamp_high, timestamp_low;
+
+ *p_message_type = mlxsw_reg_mtpptr_rec_message_type_get(payload, rec);
+ *p_domain_number = mlxsw_reg_mtpptr_rec_domain_number_get(payload, rec);
+ *p_sequence_id = mlxsw_reg_mtpptr_rec_sequence_id_get(payload, rec);
+ timestamp_high = mlxsw_reg_mtpptr_rec_timestamp_high_get(payload, rec);
+ timestamp_low = mlxsw_reg_mtpptr_rec_timestamp_low_get(payload, rec);
+ *p_timestamp = (u64)timestamp_high << 32 | timestamp_low;
+}
+
+/* MTPTPT - Monitoring Precision Time Protocol Trap Register
+ * ---------------------------------------------------------
+ * This register is used for configuring under which trap to deliver PTP
+ * packets depending on type of the packet.
+ */
+#define MLXSW_REG_MTPTPT_ID 0x9092
+#define MLXSW_REG_MTPTPT_LEN 0x08
+
+MLXSW_REG_DEFINE(mtptpt, MLXSW_REG_MTPTPT_ID, MLXSW_REG_MTPTPT_LEN);
+
+enum mlxsw_reg_mtptpt_trap_id {
+ MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+};
+
+/* reg_mtptpt_trap_id
+ * Trap id.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtptpt, trap_id, 0x00, 0, 4);
+
+/* reg_mtptpt_message_type
+ * Bitwise vector of PTP message types to trap. This is a necessary but
+ * non-sufficient condition since need to enable also per port. See MTPPPC.
+ * Message types are defined by IEEE 1588 Each bit corresponds to a value (e.g.
+ * Bit0: Sync, Bit1: Delay_Req)
+ */
+MLXSW_ITEM32(reg, mtptpt, message_type, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mtptpt_pack(char *payload,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
+{
+ MLXSW_REG_ZERO(mtptpt, payload);
+ mlxsw_reg_mtptpt_trap_id_set(payload, trap_id);
+ mlxsw_reg_mtptpt_message_type_set(payload, message_type);
+}
+
+/* MTPCPC - Monitoring Time Precision Correction Port Configuration Register
+ * -------------------------------------------------------------------------
+ */
+#define MLXSW_REG_MTPCPC_ID 0x9093
+#define MLXSW_REG_MTPCPC_LEN 0x2C
+
+MLXSW_REG_DEFINE(mtpcpc, MLXSW_REG_MTPCPC_ID, MLXSW_REG_MTPCPC_LEN);
+
+/* reg_mtpcpc_pport
+ * Per port:
+ * 0: config is global. When reading - the local_port is 1.
+ * 1: config is per port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpcpc, pport, 0x00, 31, 1);
+
+/* reg_mtpcpc_local_port
+ * Local port number.
+ * Supported to/from CPU port.
+ * Reserved when pport = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mtpcpc, 0x00, 16, 0x00, 12);
+
+/* reg_mtpcpc_ptp_trap_en
+ * Enable PTP traps.
+ * The trap_id is configured by MTPTPT.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ptp_trap_en, 0x04, 0, 1);
+
+/* reg_mtpcpc_ing_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at ingress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ing_correction_message_type, 0x10, 0, 16);
+
+/* reg_mtpcpc_egr_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at egress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, egr_correction_message_type, 0x14, 0, 16);
+
+static inline void mlxsw_reg_mtpcpc_pack(char *payload, bool pport,
+ u16 local_port, bool ptp_trap_en,
+ u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpcpc, payload);
+ mlxsw_reg_mtpcpc_pport_set(payload, pport);
+ mlxsw_reg_mtpcpc_local_port_set(payload, pport ? local_port : 0);
+ mlxsw_reg_mtpcpc_ptp_trap_en_set(payload, ptp_trap_en);
+ mlxsw_reg_mtpcpc_ing_correction_message_type_set(payload, ing);
+ mlxsw_reg_mtpcpc_egr_correction_message_type_set(payload, egr);
+}
+
+/* MFGD - Monitoring FW General Debug Register
+ * -------------------------------------------
+ */
+#define MLXSW_REG_MFGD_ID 0x90F0
+#define MLXSW_REG_MFGD_LEN 0x0C
+
+MLXSW_REG_DEFINE(mfgd, MLXSW_REG_MFGD_ID, MLXSW_REG_MFGD_LEN);
+
+/* reg_mfgd_fw_fatal_event_mode
+ * 0 - don't check FW fatal (default)
+ * 1 - check FW fatal - enable MFDE trap
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfgd, fatal_event_mode, 0x00, 9, 2);
+
+/* reg_mfgd_trigger_test
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mfgd, trigger_test, 0x00, 11, 1);
+
+/* MGPIR - Management General Peripheral Information Register
+ * ----------------------------------------------------------
+ * MGPIR register allows software to query the hardware and
+ * firmware general information of peripheral entities.
+ */
+#define MLXSW_REG_MGPIR_ID 0x9100
+#define MLXSW_REG_MGPIR_LEN 0xA0
+
+MLXSW_REG_DEFINE(mgpir, MLXSW_REG_MGPIR_ID, MLXSW_REG_MGPIR_LEN);
+
+enum mlxsw_reg_mgpir_device_type {
+ MLXSW_REG_MGPIR_DEVICE_TYPE_NONE,
+ MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
+};
+
+/* mgpir_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4);
+
+/* mgpir_device_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4);
+
+/* mgpir_devices_per_flash
+ * Number of devices of device_type per flash (can be shared by few devices).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
+
+/* mgpir_num_of_devices
+ * Number of devices of device_type.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+
+/* max_modules_per_slot
+ * Maximum number of modules that can be connected per slot.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8);
+
+/* mgpir_num_of_slots
+ * Number of slots in the system.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8);
+
+/* mgpir_num_of_modules
+ * Number of modules.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
+
+static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index)
+{
+ MLXSW_REG_ZERO(mgpir, payload);
+ mlxsw_reg_mgpir_slot_index_set(payload, slot_index);
+}
+
+static inline void
+mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
+ enum mlxsw_reg_mgpir_device_type *device_type,
+ u8 *devices_per_flash, u8 *num_of_modules,
+ u8 *num_of_slots)
+{
+ if (num_of_devices)
+ *num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
+ if (device_type)
+ *device_type = mlxsw_reg_mgpir_device_type_get(payload);
+ if (devices_per_flash)
+ *devices_per_flash =
+ mlxsw_reg_mgpir_devices_per_flash_get(payload);
+ if (num_of_modules)
+ *num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
+ if (num_of_slots)
+ *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload);
+}
+
+/* MBCT - Management Binary Code Transfer Register
+ * -----------------------------------------------
+ * This register allows to transfer binary codes from the host to
+ * the management FW by transferring it by chunks of maximum 1KB.
+ */
+#define MLXSW_REG_MBCT_ID 0x9120
+#define MLXSW_REG_MBCT_LEN 0x420
+
+MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN);
+
+/* reg_mbct_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4);
+
+/* reg_mbct_data_size
+ * Actual data field size in bytes for the current data transfer.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11);
+
+enum mlxsw_reg_mbct_op {
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */
+ MLXSW_REG_MBCT_OP_ACTIVATE,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS,
+};
+
+/* reg_mbct_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4);
+
+/* reg_mbct_last
+ * Indicates that the current data field is the last chunk of the INI.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1);
+
+/* reg_mbct_oee
+ * Opcode Event Enable. When set a BCTOE event will be sent once the opcode
+ * was executed and the fsm_state has changed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1);
+
+enum mlxsw_reg_mbct_status {
+ /* Partial data transfer completed successfully and ready for next
+ * data transfer.
+ */
+ MLXSW_REG_MBCT_STATUS_PART_DATA = 2,
+ MLXSW_REG_MBCT_STATUS_LAST_DATA,
+ MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE,
+ /* Error - trying to erase INI while it being used. */
+ MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE,
+ /* Last data transfer completed, applying magic pattern. */
+ MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7,
+ MLXSW_REG_MBCT_STATUS_INI_ERROR,
+ MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED,
+ MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11,
+};
+
+/* reg_mbct_status
+ * Status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5);
+
+enum mlxsw_reg_mbct_fsm_state {
+ MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5,
+ MLXSW_REG_MBCT_FSM_STATE_ERROR,
+};
+
+/* reg_mbct_fsm_state
+ * FSM state.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4);
+
+#define MLXSW_REG_MBCT_DATA_LEN 1024
+
+/* reg_mbct_data
+ * Up to 1KB of data.
+ * Access: WO
+ */
+MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN);
+
+static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mbct_op op, bool oee)
+{
+ MLXSW_REG_ZERO(mbct, payload);
+ mlxsw_reg_mbct_slot_index_set(payload, slot_index);
+ mlxsw_reg_mbct_op_set(payload, op);
+ mlxsw_reg_mbct_oee_set(payload, oee);
+}
+
+static inline void mlxsw_reg_mbct_dt_pack(char *payload,
+ u16 data_size, bool last,
+ const char *data)
+{
+ if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN))
+ return;
+ mlxsw_reg_mbct_data_size_set(payload, data_size);
+ mlxsw_reg_mbct_last_set(payload, last);
+ mlxsw_reg_mbct_data_memcpy_to(payload, data);
+}
+
+static inline void
+mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
+ enum mlxsw_reg_mbct_status *p_status,
+ enum mlxsw_reg_mbct_fsm_state *p_fsm_state)
+{
+ if (p_slot_index)
+ *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload);
+ *p_status = mlxsw_reg_mbct_status_get(payload);
+ if (p_fsm_state)
+ *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
+}
+
+/* MDDT - Management DownStream Device Tunneling Register
+ * ------------------------------------------------------
+ * This register allows to deliver query and request messages (PRM registers,
+ * commands) to a DownStream device.
+ */
+#define MLXSW_REG_MDDT_ID 0x9160
+#define MLXSW_REG_MDDT_LEN 0x110
+
+MLXSW_REG_DEFINE(mddt, MLXSW_REG_MDDT_ID, MLXSW_REG_MDDT_LEN);
+
+/* reg_mddt_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, slot_index, 0x00, 8, 4);
+
+/* reg_mddt_device_index
+ * Device index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, device_index, 0x00, 0, 8);
+
+/* reg_mddt_read_size
+ * Read size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, read_size, 0x04, 24, 8);
+
+/* reg_mddt_write_size
+ * Write size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, write_size, 0x04, 16, 8);
+
+enum mlxsw_reg_mddt_status {
+ MLXSW_REG_MDDT_STATUS_OK,
+};
+
+/* reg_mddt_status
+ * Return code of the Downstream Device to the register that was sent.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddt, status, 0x0C, 24, 8);
+
+enum mlxsw_reg_mddt_method {
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+};
+
+/* reg_mddt_method
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, method, 0x0C, 22, 2);
+
+/* reg_mddt_register_id
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, register_id, 0x0C, 0, 16);
+
+#define MLXSW_REG_MDDT_PAYLOAD_OFFSET 0x0C
+#define MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN 4
+
+static inline char *mlxsw_reg_mddt_inner_payload(char *payload)
+{
+ return payload + MLXSW_REG_MDDT_PAYLOAD_OFFSET +
+ MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+}
+
+static inline void mlxsw_reg_mddt_pack(char *payload, u8 slot_index,
+ u8 device_index,
+ enum mlxsw_reg_mddt_method method,
+ const struct mlxsw_reg_info *reg,
+ char **inner_payload)
+{
+ int len = reg->len + MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+
+ if (WARN_ON(len + MLXSW_REG_MDDT_PAYLOAD_OFFSET > MLXSW_REG_MDDT_LEN))
+ len = MLXSW_REG_MDDT_LEN - MLXSW_REG_MDDT_PAYLOAD_OFFSET;
+
+ MLXSW_REG_ZERO(mddt, payload);
+ mlxsw_reg_mddt_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddt_device_index_set(payload, device_index);
+ mlxsw_reg_mddt_method_set(payload, method);
+ mlxsw_reg_mddt_register_id_set(payload, reg->id);
+ mlxsw_reg_mddt_read_size_set(payload, len / 4);
+ mlxsw_reg_mddt_write_size_set(payload, len / 4);
+ *inner_payload = mlxsw_reg_mddt_inner_payload(payload);
+}
+
+/* MDDQ - Management DownStream Device Query Register
+ * --------------------------------------------------
+ * This register allows to query the DownStream device properties. The desired
+ * information is chosen upon the query_type field and is delivered by 32B
+ * of data blocks.
+ */
+#define MLXSW_REG_MDDQ_ID 0x9161
+#define MLXSW_REG_MDDQ_LEN 0x30
+
+MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN);
+
+/* reg_mddq_sie
+ * Slot info event enable.
+ * When set to '1', each change in the slot_info.provisioned / sr_valid /
+ * active / ready will generate a DSDSC event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
+
+enum mlxsw_reg_mddq_query_type {
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
+};
+
+/* reg_mddq_query_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
+
+/* reg_mddq_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
+/* reg_mddq_slot_info_provisioned
+ * If set, the INI file is applied and the card is provisioned.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1);
+
+/* reg_mddq_slot_info_sr_valid
+ * If set, Shift Register is valid (after being provisioned) and data
+ * can be sent from the switch ASIC to the line-card CPLD over Shift-Register.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1);
+
+enum mlxsw_reg_mddq_slot_info_ready {
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR,
+};
+
+/* reg_mddq_slot_info_lc_ready
+ * If set, the LC is powered on, matching the INI version and a new FW
+ * version can be burnt (if necessary).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2);
+
+/* reg_mddq_slot_info_active
+ * If set, the FW has completed the MDDC.device_enable command.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1);
+
+/* reg_mddq_slot_info_hw_revision
+ * Major user-configured version number of the current INI file.
+ * Valid only when active or ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16);
+
+/* reg_mddq_slot_info_ini_file_version
+ * User-configured version number of the current INI file.
+ * Valid only when active or lc_ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16);
+
+/* reg_mddq_slot_info_card_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8);
+
+static inline void
+__mlxsw_reg_mddq_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mddq_query_type query_type)
+{
+ MLXSW_REG_ZERO(mddq, payload);
+ mlxsw_reg_mddq_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddq_query_type_set(payload, query_type);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO);
+ mlxsw_reg_mddq_sie_set(payload, sie);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
+ bool *p_provisioned, bool *p_sr_valid,
+ enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready,
+ bool *p_active, u16 *p_hw_revision,
+ u16 *p_ini_file_version,
+ u8 *p_card_type)
+{
+ *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload);
+ *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload);
+ *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload);
+ *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload);
+ *p_active = mlxsw_reg_mddq_slot_info_active_get(payload);
+ *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload);
+ *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload);
+ *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
+}
+
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
+#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
+
+/* reg_mddq_slot_ascii_name
+ * Slot's ASCII name.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10,
+ MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN);
+
+static inline void
+mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name)
+{
+ mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name);
+}
+
+/* MDDC - Management DownStream Device Control Register
+ * ----------------------------------------------------
+ * This register allows to control downstream devices and line cards.
+ */
+#define MLXSW_REG_MDDC_ID 0x9163
+#define MLXSW_REG_MDDC_LEN 0x30
+
+MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN);
+
+/* reg_mddc_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4);
+
+/* reg_mddc_rst
+ * Reset request.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1);
+
+/* reg_mddc_device_enable
+ * When set, FW is the manager and allowed to program the downstream device.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1);
+
+static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst,
+ bool device_enable)
+{
+ MLXSW_REG_ZERO(mddc, payload);
+ mlxsw_reg_mddc_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddc_rst_set(payload, rst);
+ mlxsw_reg_mddc_device_enable_set(payload, device_enable);
+}
+
+/* MFDE - Monitoring FW Debug Register
+ * -----------------------------------
+ */
+#define MLXSW_REG_MFDE_ID 0x9200
+#define MLXSW_REG_MFDE_LEN 0x30
+
+MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
+
+/* reg_mfde_irisc_id
+ * Which irisc triggered the event
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8);
+
+enum mlxsw_reg_mfde_severity {
+ /* Unrecoverable switch behavior */
+ MLXSW_REG_MFDE_SEVERITY_FATL = 2,
+ /* Unexpected state with possible systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_NRML = 3,
+ /* Unexpected state without systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_INTR = 5,
+};
+
+/* reg_mfde_severity
+ * The severity of the event.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, severity, 0x00, 16, 8);
+
+enum mlxsw_reg_mfde_event_id {
+ /* CRspace timeout */
+ MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
+ /* KVD insertion machine stopped */
+ MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+ /* Triggered by MFGD.trigger_test */
+ MLXSW_REG_MFDE_EVENT_ID_TEST,
+ /* Triggered when firmware hits an assert */
+ MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT,
+ /* Fatal error interrupt from hardware */
+ MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE,
+};
+
+/* reg_mfde_event_id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 16);
+
+enum mlxsw_reg_mfde_method {
+ MLXSW_REG_MFDE_METHOD_QUERY,
+ MLXSW_REG_MFDE_METHOD_WRITE,
+};
+
+/* reg_mfde_method
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, method, 0x04, 29, 1);
+
+/* reg_mfde_long_process
+ * Indicates if the command is in long_process mode.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, long_process, 0x04, 28, 1);
+
+enum mlxsw_reg_mfde_command_type {
+ MLXSW_REG_MFDE_COMMAND_TYPE_MAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_EMAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF,
+};
+
+/* reg_mfde_command_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
+
+/* reg_mfde_reg_attr_id
+ * EMAD - register id, MAD - attibute id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
+
+/* reg_mfde_crspace_to_log_address
+ * crspace address accessed, which resulted in timeout.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, crspace_to_log_address, 0x10, 0, 32);
+
+/* reg_mfde_crspace_to_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, crspace_to_oe, 0x14, 24, 1);
+
+/* reg_mfde_crspace_to_log_id
+ * Which irisc triggered the timeout.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, crspace_to_log_id, 0x14, 0, 4);
+
+/* reg_mfde_crspace_to_log_ip
+ * IP (instruction pointer) that triggered the timeout.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, mfde, crspace_to_log_ip, 0x18, 0, 64);
+
+/* reg_mfde_kvd_im_stop_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_oe, 0x10, 24, 1);
+
+/* reg_mfde_kvd_im_stop_pipes_mask
+ * Bit per kvh pipe.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_pipes_mask, 0x10, 0, 16);
+
+/* reg_mfde_fw_assert_var0-4
+ * Variables passed to assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_var0, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var1, 0x14, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var2, 0x18, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var3, 0x1C, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var4, 0x20, 0, 32);
+
+/* reg_mfde_fw_assert_existptr
+ * The instruction pointer when assert was triggered.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_existptr, 0x24, 0, 32);
+
+/* reg_mfde_fw_assert_callra
+ * The return address after triggering assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_callra, 0x28, 0, 32);
+
+/* reg_mfde_fw_assert_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_oe, 0x2C, 24, 1);
+
+/* reg_mfde_fw_assert_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_v, 0x2C, 23, 1);
+
+/* reg_mfde_fw_assert_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_index, 0x2C, 16, 6);
+
+/* reg_mfde_fw_assert_ext_synd
+ * A generated one-to-one identifier which is specific per-assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_ext_synd, 0x2C, 0, 16);
+
+/* reg_mfde_fatal_cause_id
+ * HW interrupt cause id.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_id, 0x10, 0, 18);
+
+/* reg_mfde_fatal_cause_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_v, 0x14, 23, 1);
+
+/* reg_mfde_fatal_cause_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_index, 0x14, 16, 6);
+
+/* TNGCR - Tunneling NVE General Configuration Register
+ * ----------------------------------------------------
+ * The TNGCR register is used for setting up the NVE Tunneling configuration.
+ */
+#define MLXSW_REG_TNGCR_ID 0xA001
+#define MLXSW_REG_TNGCR_LEN 0x44
+
+MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN);
+
+enum mlxsw_reg_tngcr_type {
+ MLXSW_REG_TNGCR_TYPE_VXLAN,
+ MLXSW_REG_TNGCR_TYPE_VXLAN_GPE,
+ MLXSW_REG_TNGCR_TYPE_GENEVE,
+ MLXSW_REG_TNGCR_TYPE_NVGRE,
+};
+
+/* reg_tngcr_type
+ * Tunnel type for encapsulation and decapsulation. The types are mutually
+ * exclusive.
+ * Note: For Spectrum the NVE parsing must be enabled in MPRS.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4);
+
+/* reg_tngcr_nve_valid
+ * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1);
+
+/* reg_tngcr_nve_ttl_uc
+ * The TTL for NVE tunnel encapsulation underlay unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8);
+
+/* reg_tngcr_nve_ttl_mc
+ * The TTL for NVE tunnel encapsulation underlay multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8);
+
+enum {
+ /* Do not copy flow label. Calculate flow label using nve_flh. */
+ MLXSW_REG_TNGCR_FL_NO_COPY,
+ /* Copy flow label from inner packet if packet is IPv6 and
+ * encapsulation is by IPv6. Otherwise, calculate flow label using
+ * nve_flh.
+ */
+ MLXSW_REG_TNGCR_FL_COPY,
+};
+
+/* reg_tngcr_nve_flc
+ * For NVE tunnel encapsulation: Flow label copy from inner packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1);
+
+enum {
+ /* Flow label is static. In Spectrum this means '0'. Spectrum-2
+ * uses {nve_fl_prefix, nve_fl_suffix}.
+ */
+ MLXSW_REG_TNGCR_FL_NO_HASH,
+ /* 8 LSBs of the flow label are calculated from ECMP hash of the
+ * inner packet. 12 MSBs are configured by nve_fl_prefix.
+ */
+ MLXSW_REG_TNGCR_FL_HASH,
+};
+
+/* reg_tngcr_nve_flh
+ * NVE flow label hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1);
+
+/* reg_tngcr_nve_fl_prefix
+ * NVE flow label prefix. Constant 12 MSBs of the flow label.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12);
+
+/* reg_tngcr_nve_fl_suffix
+ * NVE flow label suffix. Constant 8 LSBs of the flow label.
+ * Reserved when nve_flh=1 and for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8);
+
+enum {
+ /* Source UDP port is fixed (default '0') */
+ MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH,
+ /* Source UDP port is calculated based on hash */
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH,
+};
+
+/* reg_tngcr_nve_udp_sport_type
+ * NVE UDP source port type.
+ * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2).
+ * When the source UDP port is calculated based on hash, then the 8 LSBs
+ * are calculated from hash the 8 MSBs are configured by
+ * nve_udp_sport_prefix.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1);
+
+/* reg_tngcr_nve_udp_sport_prefix
+ * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port.
+ * Reserved when NVE type is NVGRE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8);
+
+/* reg_tngcr_nve_group_size_mc
+ * The amount of sequential linked lists of MC entries. The first linked
+ * list is configured by SFD.underlay_mc_ptr.
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8);
+
+/* reg_tngcr_nve_group_size_flood
+ * The amount of sequential linked lists of flooding entries. The first
+ * linked list is configured by SFMR.nve_tunnel_flood_ptr
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8);
+
+/* reg_tngcr_learn_enable
+ * During decapsulation, whether to learn from NVE port.
+ * Reserved when Spectrum-2. See TNPC.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1);
+
+/* reg_tngcr_underlay_virtual_router
+ * Underlay virtual router.
+ * Reserved when Spectrum-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16);
+
+/* reg_tngcr_underlay_rif
+ * Underlay ingress router interface. RIF type should be loopback generic.
+ * Reserved when Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16);
+
+/* reg_tngcr_usipv4
+ * Underlay source IPv4 address of the NVE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32);
+
+/* reg_tngcr_usipv6
+ * Underlay source IPv6 address of the NVE. For Spectrum, must not be
+ * modified under traffic of NVE tunneling encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16);
+
+static inline void mlxsw_reg_tngcr_pack(char *payload,
+ enum mlxsw_reg_tngcr_type type,
+ bool valid, u8 ttl)
+{
+ MLXSW_REG_ZERO(tngcr, payload);
+ mlxsw_reg_tngcr_type_set(payload, type);
+ mlxsw_reg_tngcr_nve_valid_set(payload, valid);
+ mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY);
+ mlxsw_reg_tngcr_nve_flh_set(payload, 0);
+ mlxsw_reg_tngcr_nve_udp_sport_type_set(payload,
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH);
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0);
+ mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1);
+ mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1);
+}
+
+/* TNUMT - Tunneling NVE Underlay Multicast Table Register
+ * -------------------------------------------------------
+ * The TNUMT register is for building the underlay MC table. It is used
+ * for MC, flooding and BC traffic into the NVE tunnel.
+ */
+#define MLXSW_REG_TNUMT_ID 0xA003
+#define MLXSW_REG_TNUMT_LEN 0x20
+
+MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN);
+
+enum mlxsw_reg_tnumt_record_type {
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ MLXSW_REG_TNUMT_RECORD_TYPE_LABEL,
+};
+
+/* reg_tnumt_record_type
+ * Record type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
+
+/* reg_tnumt_tunnel_port
+ * Tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4);
+
+/* reg_tnumt_underlay_mc_ptr
+ * Index to the underlay multicast table.
+ * For Spectrum the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24);
+
+/* reg_tnumt_vnext
+ * The next_underlay_mc_ptr is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1);
+
+/* reg_tnumt_next_underlay_mc_ptr
+ * The next index to the underlay multicast table.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24);
+
+/* reg_tnumt_record_size
+ * Number of IP addresses in the record.
+ * Range is 1..cap_max_nve_mc_entries_ipv{4,6}
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3);
+
+/* reg_tnumt_udip
+ * The underlay IPv4 addresses. udip[i] is reserved if i >= size
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false);
+
+/* reg_tnumt_udip_ptr
+ * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if
+ * i >= size. The IPv6 addresses are configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_tnumt_pack(char *payload,
+ enum mlxsw_reg_tnumt_record_type type,
+ enum mlxsw_reg_tunnel_port tport,
+ u32 underlay_mc_ptr, bool vnext,
+ u32 next_underlay_mc_ptr,
+ u8 record_size)
+{
+ MLXSW_REG_ZERO(tnumt, payload);
+ mlxsw_reg_tnumt_record_type_set(payload, type);
+ mlxsw_reg_tnumt_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr);
+ mlxsw_reg_tnumt_vnext_set(payload, vnext);
+ mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr);
+ mlxsw_reg_tnumt_record_size_set(payload, record_size);
+}
+
+/* TNQCR - Tunneling NVE QoS Configuration Register
+ * ------------------------------------------------
+ * The TNQCR register configures how QoS is set in encapsulation into the
+ * underlay network.
+ */
+#define MLXSW_REG_TNQCR_ID 0xA010
+#define MLXSW_REG_TNQCR_LEN 0x0C
+
+MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN);
+
+/* reg_tnqcr_enc_set_dscp
+ * For encapsulation: How to set DSCP field:
+ * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay
+ * (outer) IP header. If there is no IP header, use TNQDR.dscp
+ * 1 - Set the DSCP field as TNQDR.dscp
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1);
+
+static inline void mlxsw_reg_tnqcr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(tnqcr, payload);
+ mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0);
+}
+
+/* TNQDR - Tunneling NVE QoS Default Register
+ * ------------------------------------------
+ * The TNQDR register configures the default QoS settings for NVE
+ * encapsulation.
+ */
+#define MLXSW_REG_TNQDR_ID 0xA011
+#define MLXSW_REG_TNQDR_LEN 0x08
+
+MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
+
+/* reg_tnqdr_local_port
+ * Local port number (receive port). CPU port is supported.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, tnqdr, 0x00, 16, 0x00, 12);
+
+/* reg_tnqdr_dscp
+ * For encapsulation, the default DSCP.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
+
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u16 local_port)
+{
+ MLXSW_REG_ZERO(tnqdr, payload);
+ mlxsw_reg_tnqdr_local_port_set(payload, local_port);
+ mlxsw_reg_tnqdr_dscp_set(payload, 0);
+}
+
+/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TNEEM_ID 0xA012
+#define MLXSW_REG_TNEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN);
+
+/* reg_tneem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tneem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tneem, payload);
+ mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TNDEM_ID 0xA013
+#define MLXSW_REG_TNDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN);
+
+/* reg_tndem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tndem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tndem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tndem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4);
+
+/* reg_tndem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 ecn, bool trap_en,
+ u16 trap_id)
+{
+ MLXSW_REG_ZERO(tndem, payload);
+ mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tndem_eip_ecn_set(payload, ecn);
+ mlxsw_reg_tndem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tndem_trap_id_set(payload, trap_id);
+}
+
+/* TNPC - Tunnel Port Configuration Register
+ * -----------------------------------------
+ * The TNPC register is used for tunnel port configuration.
+ * Reserved when Spectrum.
+ */
+#define MLXSW_REG_TNPC_ID 0xA020
+#define MLXSW_REG_TNPC_LEN 0x18
+
+MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
+
+/* reg_tnpc_tunnel_port
+ * Tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4);
+
+/* reg_tnpc_learn_enable_v6
+ * During IPv6 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
+
+/* reg_tnpc_learn_enable_v4
+ * During IPv4 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
+
+static inline void mlxsw_reg_tnpc_pack(char *payload,
+ enum mlxsw_reg_tunnel_port tport,
+ bool learn_enable)
+{
+ MLXSW_REG_ZERO(tnpc, payload);
+ mlxsw_reg_tnpc_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable);
+ mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable);
+}
+
+/* TIGCR - Tunneling IPinIP General Configuration Register
+ * -------------------------------------------------------
+ * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
+ */
+#define MLXSW_REG_TIGCR_ID 0xA801
+#define MLXSW_REG_TIGCR_LEN 0x10
+
+MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
+
+/* reg_tigcr_ipip_ttlc
+ * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
+ * header.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
+
+/* reg_tigcr_ipip_ttl_uc
+ * The TTL for IPinIP Tunnel encapsulation of unicast packets if
+ * reg_tigcr_ipip_ttlc is unset.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
+
+static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
+{
+ MLXSW_REG_ZERO(tigcr, payload);
+ mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
+ mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
+}
+
+/* TIEEM - Tunneling IPinIP Encapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TIEEM_ID 0xA812
+#define MLXSW_REG_TIEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tieem, MLXSW_REG_TIEEM_ID, MLXSW_REG_TIEEM_LEN);
+
+/* reg_tieem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tieem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tineem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tieem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tieem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tieem, payload);
+ mlxsw_reg_tieem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tieem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TIDEM - Tunneling IPinIP Decapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TIDEM_ID 0xA813
+#define MLXSW_REG_TIDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tidem, MLXSW_REG_TIDEM_ID, MLXSW_REG_TIDEM_LEN);
+
+/* reg_tidem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tidem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tidem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tidem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_en, 0x08, 28, 4);
+
+/* reg_tidem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 eip_ecn,
+ bool trap_en, u16 trap_id)
+{
+ MLXSW_REG_ZERO(tidem, payload);
+ mlxsw_reg_tidem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tidem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tidem_eip_ecn_set(payload, eip_ecn);
+ mlxsw_reg_tidem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tidem_trap_id_set(payload, trap_id);
+}
+
+/* SBPR - Shared Buffer Pools Register
+ * -----------------------------------
+ * The SBPR configures and retrieves the shared buffer pools and configuration.
+ */
+#define MLXSW_REG_SBPR_ID 0xB001
+#define MLXSW_REG_SBPR_LEN 0x14
+
+MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN);
+
+/* reg_sbpr_desc
+ * When set, configures descriptor buffer.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, desc, 0x00, 31, 1);
+
+/* shared direstion enum for SBPR, SBCM, SBPM */
+enum mlxsw_reg_sbxx_dir {
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+};
+
+/* reg_sbpr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
+
+/* reg_sbpr_pool
+ * Pool index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+
+/* reg_sbpr_infi_size
+ * Size is infinite.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, infi_size, 0x04, 31, 1);
+
+/* reg_sbpr_size
+ * Pool size in buffer cells.
+ * Reserved when infi_size = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
+
+enum mlxsw_reg_sbpr_mode {
+ MLXSW_REG_SBPR_MODE_STATIC,
+ MLXSW_REG_SBPR_MODE_DYNAMIC,
+};
+
+/* reg_sbpr_mode
+ * Pool quota calculation mode.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
+
+static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size,
+ bool infi_size)
+{
+ MLXSW_REG_ZERO(sbpr, payload);
+ mlxsw_reg_sbpr_pool_set(payload, pool);
+ mlxsw_reg_sbpr_dir_set(payload, dir);
+ mlxsw_reg_sbpr_mode_set(payload, mode);
+ mlxsw_reg_sbpr_size_set(payload, size);
+ mlxsw_reg_sbpr_infi_size_set(payload, infi_size);
+}
+
+/* SBCM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBCM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-PG, including the binding to pool
+ * and definition of the associated quota.
+ */
+#define MLXSW_REG_SBCM_ID 0xB002
+#define MLXSW_REG_SBCM_LEN 0x28
+
+MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN);
+
+/* reg_sbcm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, sbcm, 0x00, 16, 0x00, 4);
+
+/* reg_sbcm_pg_buff
+ * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
+ * For PG buffer: range is 0..cap_max_pg_buffers - 1
+ * For traffic class: range is 0..cap_max_tclass - 1
+ * Note that when traffic class is in MC aware mode then the traffic
+ * classes which are MC aware cannot be configured.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
+
+/* reg_sbcm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
+
+/* reg_sbcm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+
+/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+
+/* reg_sbcm_infi_max
+ * Max buffer is infinite.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, infi_max, 0x1C, 31, 1);
+
+/* reg_sbcm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Reserved when infi_max = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbcm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbcm_pack(char *payload, u16 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff,
+ bool infi_max, u8 pool)
+{
+ MLXSW_REG_ZERO(sbcm, payload);
+ mlxsw_reg_sbcm_local_port_set(payload, local_port);
+ mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
+ mlxsw_reg_sbcm_dir_set(payload, dir);
+ mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbcm_infi_max_set(payload, infi_max);
+ mlxsw_reg_sbcm_pool_set(payload, pool);
+}
+
+/* SBPM - Shared Buffer Port Management Register
+ * ---------------------------------------------
+ * The SBPM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-Pool, including the definition
+ * of the associated quota.
+ */
+#define MLXSW_REG_SBPM_ID 0xB003
+#define MLXSW_REG_SBPM_LEN 0x28
+
+MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN);
+
+/* reg_sbpm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, sbpm, 0x00, 16, 0x00, 12);
+
+/* reg_sbpm_pool
+ * The pool associated to quota counting on the local_port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
+
+/* reg_sbpm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+
+/* reg_sbpm_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
+
+/* reg_sbpm_clr
+ * Clear Max Buffer Occupancy
+ * When this bit is set, max_buff_occupancy field is cleared (and a
+ * new max value is tracked from the time the clear was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
+
+/* reg_sbpm_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
+
+/* reg_sbpm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
+
+/* reg_sbpm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_sbpm_pack(char *payload, u16 local_port, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, bool clr,
+ u32 min_buff, u32 max_buff)
+{
+ MLXSW_REG_ZERO(sbpm, payload);
+ mlxsw_reg_sbpm_local_port_set(payload, local_port);
+ mlxsw_reg_sbpm_pool_set(payload, pool);
+ mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_clr_set(payload, clr);
+ mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
+}
+
+static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
+ *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
+}
+
+/* SBMM - Shared Buffer Multicast Management Register
+ * --------------------------------------------------
+ * The SBMM register configures and retrieves the shared buffer allocation
+ * and configuration for MC packets according to Switch-Priority, including
+ * the binding to pool and definition of the associated quota.
+ */
+#define MLXSW_REG_SBMM_ID 0xB004
+#define MLXSW_REG_SBMM_LEN 0x28
+
+MLXSW_REG_DEFINE(sbmm, MLXSW_REG_SBMM_ID, MLXSW_REG_SBMM_LEN);
+
+/* reg_sbmm_prio
+ * Switch Priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
+
+/* reg_sbmm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
+
+/* reg_sbmm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbmm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
+ u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbmm, payload);
+ mlxsw_reg_sbmm_prio_set(payload, prio);
+ mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbmm_pool_set(payload, pool);
+}
+
+/* SBSR - Shared Buffer Status Register
+ * ------------------------------------
+ * The SBSR register retrieves the shared buffer occupancy according to
+ * Port-Pool. Note that this register enables reading a large amount of data.
+ * It is the user's responsibility to limit the amount of data to ensure the
+ * response can match the maximum transfer unit. In case the response exceeds
+ * the maximum transport unit, it will be truncated with no special notice.
+ */
+#define MLXSW_REG_SBSR_ID 0xB005
+#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
+#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
+#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
+ MLXSW_REG_SBSR_REC_LEN * \
+ MLXSW_REG_SBSR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN);
+
+/* reg_sbsr_clr
+ * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
+ * field is cleared (and a new max value is tracked from the time the clear
+ * was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+
+#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256
+
+/* reg_sbsr_port_page
+ * Determines the range of the ports specified in the 'ingress_port_mask'
+ * and 'egress_port_mask' bit masks.
+ * {ingress,egress}_port_mask[x] is (256 * port_page) + x
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4);
+
+/* reg_sbsr_ingress_port_mask
+ * Bit vector for all ingress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
+
+/* reg_sbsr_pg_buff_mask
+ * Bit vector for all switch priority groups.
+ * Indicates which of the priorities (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other priority
+ * does not change.
+ * Range is 0..cap_max_pg_buffers - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
+
+/* reg_sbsr_egress_port_mask
+ * Bit vector for all egress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
+
+/* reg_sbsr_tclass_mask
+ * Bit vector for all traffic classes.
+ * Indicates which of the traffic classes (for which the relevant bit is
+ * set) are affected by the set operation. Configuration of any other
+ * traffic class does not change.
+ * Range is 0..cap_max_tclass - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
+
+static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
+{
+ MLXSW_REG_ZERO(sbsr, payload);
+ mlxsw_reg_sbsr_clr_set(payload, clr);
+}
+
+/* reg_sbsr_rec_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
+
+/* reg_sbsr_rec_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
+ u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy =
+ mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
+ *p_max_buff_occupancy =
+ mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
+}
+
+/* SBIB - Shared Buffer Internal Buffer Register
+ * ---------------------------------------------
+ * The SBIB register configures per port buffers for internal use. The internal
+ * buffers consume memory on the port buffers (note that the port buffers are
+ * used also by PBMC).
+ *
+ * For Spectrum this is used for egress mirroring.
+ */
+#define MLXSW_REG_SBIB_ID 0xB006
+#define MLXSW_REG_SBIB_LEN 0x10
+
+MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN);
+
+/* reg_sbib_local_port
+ * Local port number
+ * Not supported for CPU port and router port
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, sbib, 0x00, 16, 0x00, 12);
+
+/* reg_sbib_buff_size
+ * Units represented in cells
+ * Allowed range is 0 to (cap_max_headroom_size - 1)
+ * Default is 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
+
+static inline void mlxsw_reg_sbib_pack(char *payload, u16 local_port,
+ u32 buff_size)
+{
+ MLXSW_REG_ZERO(sbib, payload);
+ mlxsw_reg_sbib_local_port_set(payload, local_port);
+ mlxsw_reg_sbib_buff_size_set(payload, buff_size);
+}
+
+static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
+ MLXSW_REG(sgcr),
+ MLXSW_REG(spad),
+ MLXSW_REG(sspr),
+ MLXSW_REG(sfdat),
+ MLXSW_REG(sfd),
+ MLXSW_REG(sfn),
+ MLXSW_REG(spms),
+ MLXSW_REG(spvid),
+ MLXSW_REG(spvm),
+ MLXSW_REG(spaft),
+ MLXSW_REG(sfgc),
+ MLXSW_REG(sfdf),
+ MLXSW_REG(sldr),
+ MLXSW_REG(slcr),
+ MLXSW_REG(slcor),
+ MLXSW_REG(spmlr),
+ MLXSW_REG(svfa),
+ MLXSW_REG(spvtr),
+ MLXSW_REG(svpe),
+ MLXSW_REG(sfmr),
+ MLXSW_REG(spvmlr),
+ MLXSW_REG(spvc),
+ MLXSW_REG(spevet),
+ MLXSW_REG(smpe),
+ MLXSW_REG(smid2),
+ MLXSW_REG(cwtp),
+ MLXSW_REG(cwtpm),
+ MLXSW_REG(pgcr),
+ MLXSW_REG(ppbt),
+ MLXSW_REG(pacl),
+ MLXSW_REG(pagt),
+ MLXSW_REG(ptar),
+ MLXSW_REG(ppbs),
+ MLXSW_REG(prcr),
+ MLXSW_REG(pefa),
+ MLXSW_REG(pemrbt),
+ MLXSW_REG(ptce2),
+ MLXSW_REG(perpt),
+ MLXSW_REG(peabfe),
+ MLXSW_REG(perar),
+ MLXSW_REG(ptce3),
+ MLXSW_REG(percr),
+ MLXSW_REG(pererp),
+ MLXSW_REG(iedr),
+ MLXSW_REG(qpts),
+ MLXSW_REG(qpcr),
+ MLXSW_REG(qtct),
+ MLXSW_REG(qeec),
+ MLXSW_REG(qrwe),
+ MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdp),
+ MLXSW_REG(qpdpm),
+ MLXSW_REG(qtctm),
+ MLXSW_REG(qpsc),
+ MLXSW_REG(pmlp),
+ MLXSW_REG(pmtu),
+ MLXSW_REG(ptys),
+ MLXSW_REG(ppad),
+ MLXSW_REG(paos),
+ MLXSW_REG(pfcc),
+ MLXSW_REG(ppcnt),
+ MLXSW_REG(pptb),
+ MLXSW_REG(pbmc),
+ MLXSW_REG(pspa),
+ MLXSW_REG(pmaos),
+ MLXSW_REG(pplr),
+ MLXSW_REG(pmtdb),
+ MLXSW_REG(pmecr),
+ MLXSW_REG(pmpe),
+ MLXSW_REG(pddr),
+ MLXSW_REG(pmmp),
+ MLXSW_REG(pllp),
+ MLXSW_REG(pmtm),
+ MLXSW_REG(htgt),
+ MLXSW_REG(hpkt),
+ MLXSW_REG(rgcr),
+ MLXSW_REG(ritr),
+ MLXSW_REG(rtar),
+ MLXSW_REG(ratr),
+ MLXSW_REG(rtdp),
+ MLXSW_REG(rips),
+ MLXSW_REG(ratrad),
+ MLXSW_REG(rdpm),
+ MLXSW_REG(ricnt),
+ MLXSW_REG(rrcr),
+ MLXSW_REG(ralta),
+ MLXSW_REG(ralst),
+ MLXSW_REG(raltb),
+ MLXSW_REG(ralue),
+ MLXSW_REG(rauht),
+ MLXSW_REG(raleu),
+ MLXSW_REG(rauhtd),
+ MLXSW_REG(rigr2),
+ MLXSW_REG(recr2),
+ MLXSW_REG(rmft2),
+ MLXSW_REG(reiv),
+ MLXSW_REG(mfcr),
+ MLXSW_REG(mfsc),
+ MLXSW_REG(mfsm),
+ MLXSW_REG(mfsl),
+ MLXSW_REG(fore),
+ MLXSW_REG(mtcap),
+ MLXSW_REG(mtmp),
+ MLXSW_REG(mtwe),
+ MLXSW_REG(mtbr),
+ MLXSW_REG(mcia),
+ MLXSW_REG(mpat),
+ MLXSW_REG(mpar),
+ MLXSW_REG(mgir),
+ MLXSW_REG(mrsr),
+ MLXSW_REG(mlcr),
+ MLXSW_REG(mcion),
+ MLXSW_REG(mtpps),
+ MLXSW_REG(mtutc),
+ MLXSW_REG(mpsc),
+ MLXSW_REG(mcqi),
+ MLXSW_REG(mcc),
+ MLXSW_REG(mcda),
+ MLXSW_REG(mgpc),
+ MLXSW_REG(mprs),
+ MLXSW_REG(mogcr),
+ MLXSW_REG(mpagr),
+ MLXSW_REG(momte),
+ MLXSW_REG(mtpppc),
+ MLXSW_REG(mtpptr),
+ MLXSW_REG(mtptpt),
+ MLXSW_REG(mtpcpc),
+ MLXSW_REG(mfgd),
+ MLXSW_REG(mgpir),
+ MLXSW_REG(mbct),
+ MLXSW_REG(mddt),
+ MLXSW_REG(mddq),
+ MLXSW_REG(mddc),
+ MLXSW_REG(mfde),
+ MLXSW_REG(tngcr),
+ MLXSW_REG(tnumt),
+ MLXSW_REG(tnqcr),
+ MLXSW_REG(tnqdr),
+ MLXSW_REG(tneem),
+ MLXSW_REG(tndem),
+ MLXSW_REG(tnpc),
+ MLXSW_REG(tigcr),
+ MLXSW_REG(tieem),
+ MLXSW_REG(tidem),
+ MLXSW_REG(sbpr),
+ MLXSW_REG(sbcm),
+ MLXSW_REG(sbpm),
+ MLXSW_REG(sbmm),
+ MLXSW_REG(sbsr),
+ MLXSW_REG(sbib),
+};
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+ const struct mlxsw_reg_info *reg_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_reg_infos); i++) {
+ reg_info = mlxsw_reg_infos[i];
+ if (reg_info->id == reg_id)
+ return reg_info->name;
+ }
+ return "*UNKNOWN*";
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pude, 0x00, 16, 0x00, 12);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
new file mode 100644
index 000000000..19ae0d1c7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_RESOURCES_H
+#define _MLXSW_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+enum mlxsw_res_id {
+ MLXSW_RES_ID_KVD_SIZE,
+ MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
+ MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_PGT_SIZE,
+ MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
+ MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
+ MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_CQE_V0,
+ MLXSW_RES_ID_CQE_V1,
+ MLXSW_RES_ID_CQE_V2,
+ MLXSW_RES_ID_COUNTER_POOL_SIZE,
+ MLXSW_RES_ID_COUNTER_BANK_SIZE,
+ MLXSW_RES_ID_MAX_SPAN,
+ MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+ MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
+ MLXSW_RES_ID_MAX_SYSTEM_PORT,
+ MLXSW_RES_ID_FID,
+ MLXSW_RES_ID_MAX_LAG,
+ MLXSW_RES_ID_MAX_LAG_MEMBERS,
+ MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
+ MLXSW_RES_ID_CELL_SIZE,
+ MLXSW_RES_ID_MAX_HEADROOM_SIZE,
+ MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
+ MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
+ MLXSW_RES_ID_ACL_MAX_REGIONS,
+ MLXSW_RES_ID_ACL_MAX_GROUPS,
+ MLXSW_RES_ID_ACL_MAX_GROUP_SIZE,
+ MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS,
+ MLXSW_RES_ID_ACL_FLEX_KEYS,
+ MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
+ MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
+ MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
+ MLXSW_RES_ID_ACL_MAX_BF_LOG,
+ MLXSW_RES_ID_MAX_GLOBAL_POLICERS,
+ MLXSW_RES_ID_MAX_CPU_POLICERS,
+ MLXSW_RES_ID_MAX_VRS,
+ MLXSW_RES_ID_MAX_RIFS,
+ MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
+ MLXSW_RES_ID_MAX_RIF_MAC_PROFILES,
+ MLXSW_RES_ID_MAX_LPM_TREES,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ MLXSW_RES_ID_KVD_SINGLE_SIZE,
+ MLXSW_RES_ID_KVD_DOUBLE_SIZE,
+ MLXSW_RES_ID_KVD_LINEAR_SIZE,
+
+ __MLXSW_RES_ID_MAX,
+};
+
+static u16 mlxsw_res_ids[] = {
+ [MLXSW_RES_ID_KVD_SIZE] = 0x1001,
+ [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
+ [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_PGT_SIZE] = 0x1004,
+ [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
+ [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
+ [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_CQE_V0] = 0x2210,
+ [MLXSW_RES_ID_CQE_V1] = 0x2211,
+ [MLXSW_RES_ID_CQE_V2] = 0x2212,
+ [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
+ [MLXSW_RES_ID_COUNTER_BANK_SIZE] = 0x2411,
+ [MLXSW_RES_ID_MAX_SPAN] = 0x2420,
+ [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
+ [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
+ [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
+ [MLXSW_RES_ID_FID] = 0x2512,
+ [MLXSW_RES_ID_MAX_LAG] = 0x2520,
+ [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
+ [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
+ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
+ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
+ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
+ [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
+ [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
+ [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904,
+ [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905,
+ [MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS] = 0x2908,
+ [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
+ [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
+ [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
+ [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
+ [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960,
+ [MLXSW_RES_ID_MAX_GLOBAL_POLICERS] = 0x2A10,
+ [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
+ [MLXSW_RES_ID_MAX_VRS] = 0x2C01,
+ [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+ [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
+ [MLXSW_RES_ID_MAX_RIF_MAC_PROFILES] = 0x2C14,
+ [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
+};
+
+struct mlxsw_res {
+ bool valid[__MLXSW_RES_ID_MAX];
+ u64 values[__MLXSW_RES_ID_MAX];
+};
+
+static inline bool mlxsw_res_valid(struct mlxsw_res *res,
+ enum mlxsw_res_id res_id)
+{
+ return res->valid[res_id];
+}
+
+#define MLXSW_RES_VALID(res, short_res_id) \
+ mlxsw_res_valid(res, MLXSW_RES_ID_##short_res_id)
+
+static inline u64 mlxsw_res_get(struct mlxsw_res *res,
+ enum mlxsw_res_id res_id)
+{
+ if (WARN_ON(!res->valid[res_id]))
+ return 0;
+ return res->values[res_id];
+}
+
+#define MLXSW_RES_GET(res, short_res_id) \
+ mlxsw_res_get(res, MLXSW_RES_ID_##short_res_id)
+
+static inline void mlxsw_res_set(struct mlxsw_res *res,
+ enum mlxsw_res_id res_id, u64 value)
+{
+ res->valid[res_id] = true;
+ res->values[res_id] = value;
+}
+
+#define MLXSW_RES_SET(res, short_res_id, value) \
+ mlxsw_res_set(res, MLXSW_RES_ID_##short_res_id, value)
+
+static inline void mlxsw_res_parse(struct mlxsw_res *res, u16 id, u64 value)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_res_ids); i++) {
+ if (mlxsw_res_ids[i] == id) {
+ mlxsw_res_set(res, i, value);
+ return;
+ }
+ }
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
new file mode 100644
index 000000000..67ecdb9e7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -0,0 +1,5331 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/dcbnl.h>
+#include <linux/inetdevice.h>
+#include <linux/netlink.h>
+#include <linux/jhash.h>
+#include <linux/log2.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+#include <net/switchdev.h>
+#include <net/pkt_cls.h>
+#include <net/netevent.h>
+#include <net/addrconf.h>
+#include <linux/ptp_classify.h>
+
+#include "spectrum.h"
+#include "pci.h"
+#include "core.h"
+#include "core_env.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_acl_flex_actions.h"
+#include "spectrum_span.h"
+#include "spectrum_ptp.h"
+#include "spectrum_trap.h"
+
+#define MLXSW_SP_FWREV_MINOR 2010
+#define MLXSW_SP_FWREV_SUBMINOR 1006
+
+#define MLXSW_SP1_FWREV_MAJOR 13
+#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
+
+static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
+ .major = MLXSW_SP1_FWREV_MAJOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
+ .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
+};
+
+#define MLXSW_SP1_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+
+#define MLXSW_SP2_FWREV_MAJOR 29
+
+static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
+ .major = MLXSW_SP2_FWREV_MAJOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP2_FW_FILENAME \
+ "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+
+#define MLXSW_SP3_FWREV_MAJOR 30
+
+static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
+ .major = MLXSW_SP3_FWREV_MAJOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP3_FW_FILENAME \
+ "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+
+#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
+ "mellanox/lc_ini_bundle_" \
+ __stringify(MLXSW_SP_FWREV_MINOR) "_" \
+ __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
+
+static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
+static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
+static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
+
+static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
+};
+static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+ int err;
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+ if (err)
+ return err;
+ if (packets)
+ *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
+ if (bytes)
+ *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
+ return 0;
+}
+
+static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+}
+
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index)
+{
+ int err;
+
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ p_counter_index);
+ if (err)
+ return err;
+ err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+ return 0;
+
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ counter_index);
+}
+
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr;
+ u16 max_fid;
+ int err;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ err = -ENOMEM;
+ goto err_skb_cow_head;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+ err = -EIO;
+ goto err_res_valid;
+ }
+ max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ return 0;
+
+err_res_valid:
+err_skb_cow_head:
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+ unsigned int type;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ type = ptp_classify_raw(skb);
+ return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+ * need special handling and cannot be transmitted as regular control
+ * packets.
+ */
+ if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+ return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+ mlxsw_sp_port, skb,
+ tx_info);
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return MLXSW_REG_SPMS_STATE_FORWARDING;
+ case BR_STATE_LEARNING:
+ return MLXSW_REG_SPMS_STATE_LEARNING;
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return MLXSW_REG_SPMS_STATE_DISCARDING;
+ default:
+ BUG();
+ }
+}
+
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state)
+{
+ enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
+ return 0;
+}
+
+int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ const unsigned char *addr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+
+ mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
+ mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
+ mlxsw_sp_port->local_port);
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
+ mlxsw_sp_port->dev->dev_addr);
+}
+
+static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int err;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+
+ *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+ return 0;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ if (mtu > mlxsw_sp_port->max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, u8 swid)
+{
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svpe_pl[MLXSW_REG_SVPE_LEN];
+
+ mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
+}
+
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvmlr_pl;
+ int err;
+
+ spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
+ if (!spvmlr_pl)
+ return -ENOMEM;
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
+ kfree(spvmlr_pl);
+ return err;
+}
+
+int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
+{
+ switch (ethtype) {
+ case ETH_P_8021Q:
+ *p_sver_type = 0;
+ break;
+ case ETH_P_8021AD:
+ *p_sver_type = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ethtype)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spevet_pl[MLXSW_REG_SPEVET_LEN];
+ u8 sver_type;
+ int err;
+
+ err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
+}
+
+static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, u16 ethtype)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+ u8 sver_type;
+ int err;
+
+ err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
+ sver_type);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool allow)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spaft_pl[MLXSW_REG_SPAFT_LEN];
+
+ mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
+}
+
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u16 ethtype)
+{
+ int err;
+
+ if (!vid) {
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+ } else {
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_allow_untagged_set;
+ }
+
+ mlxsw_sp_port->pvid = vid;
+ return 0;
+
+err_port_allow_untagged_set:
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
+ return err;
+}
+
+static int
+mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int
+mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, char *pmlp_pl,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ bool separate_rxtx;
+ u8 first_lane;
+ u8 slot_index;
+ u8 module;
+ u8 width;
+ int i;
+
+ module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
+ width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+ first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
+
+ if (width && !is_power_of_2(width)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < width; i++) {
+ if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (separate_rxtx &&
+ mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
+ mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
+ local_port);
+ return -EINVAL;
+ }
+ }
+
+ port_mapping->module = module;
+ port_mapping->slot_index = slot_index;
+ port_mapping->width = width;
+ port_mapping->module_width = width;
+ port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
+ return 0;
+}
+
+static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ pmlp_pl, port_mapping);
+}
+
+static int
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ const struct mlxsw_sp_port_mapping *port_mapping)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int i, err;
+
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
+ for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
+ port_mapping->slot_index);
+ mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
+ mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ goto err_pmlp_write;
+ return 0;
+
+err_pmlp_write:
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
+ return err;
+}
+
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ u8 slot_index, u8 module)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
+}
+
+static int mlxsw_sp_port_open(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
+
+ err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_admin_status_set;
+ netif_start_queue(dev);
+ return 0;
+
+err_port_admin_status_set:
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module);
+ return err;
+}
+
+static int mlxsw_sp_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ netif_stop_queue(dev);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module);
+ return 0;
+}
+
+static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sp_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (eth_skb_pad(skb)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+ &tx_info);
+ if (err)
+ return NETDEV_TX_OK;
+
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static void mlxsw_sp_set_rx_mode(struct net_device *dev)
+{
+}
+
+static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
+ if (err)
+ return err;
+ eth_hw_addr_set(dev, addr->sa_data);
+ return 0;
+}
+
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int err;
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ hdroom.mtu = mtu;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_port_mtu_set;
+ dev->mtu = mtu;
+ return 0;
+
+err_port_mtu_set:
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+ return err;
+}
+
+static int
+mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return 0;
+}
+
+static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlxsw_sp_port_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+}
+
+static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl);
+ if (err)
+ goto out;
+
+ stats->tx_packets =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ stats->rx_packets =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ stats->tx_bytes =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ stats->rx_bytes =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ stats->multicast =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+
+ stats->rx_crc_errors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ stats->rx_frame_errors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+
+ stats->rx_length_errors = (
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
+
+ stats->rx_errors = (stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_length_errors);
+
+out:
+ return err;
+}
+
+static void
+mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
+ struct mlxsw_sp_port_xstats *xstats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err, i;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+ ppcnt_pl);
+ if (!err)
+ xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
+
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev,
+ MLXSW_REG_PPCNT_TC_CONG_CNT,
+ i, ppcnt_pl);
+ if (err)
+ goto tc_cnt;
+
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
+
+tc_cnt:
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->backlog[i] =
+ mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
+ xstats->tail_drop[i] =
+ mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
+ xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
+ }
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ container_of(work, struct mlxsw_sp_port,
+ periodic_hw_stats.update_dw.work);
+
+ if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+ * necessary when port goes down.
+ */
+ goto out;
+
+ mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
+ &mlxsw_sp_port->periodic_hw_stats.stats);
+ mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
+ &mlxsw_sp_port->periodic_hw_stats.xstats);
+
+out:
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
+ MLXSW_HW_STATS_UPDATE_TIME);
+}
+
+/* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+static void
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
+}
+
+static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvm_pl;
+ int err;
+
+ spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
+ if (!spvm_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, is_member, untagged);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
+ kfree(spvm_pl);
+ return err;
+}
+
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
+ is_member, untagged);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool flush_default)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
+
+ list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
+ &mlxsw_sp_port->vlans_list, list) {
+ if (!flush_default &&
+ mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
+ continue;
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+ }
+}
+
+static void
+mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ if (mlxsw_sp_port_vlan->bridge_port)
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ else if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+}
+
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool untagged = vid == MLXSW_SP_DEFAULT_VID;
+ int err;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan)
+ return ERR_PTR(-EEXIST);
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
+ if (err)
+ return ERR_PTR(err);
+
+ mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
+ if (!mlxsw_sp_port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
+ }
+
+ mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+ mlxsw_sp_port_vlan->vid = vid;
+ list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
+
+ return mlxsw_sp_port_vlan;
+
+err_port_vlan_alloc:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
+ list_del(&mlxsw_sp_port_vlan->list);
+ kfree(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+}
+
+static int mlxsw_sp_port_add_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ /* VLAN 0 is added to HW filter when device goes up, but it is
+ * reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
+}
+
+static int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ /* VLAN 0 is removed from HW filter when device goes down, but
+ * it is reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_port_vlan)
+ return 0;
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+
+ return 0;
+}
+
+static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
+ case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
+ return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
+ case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
+ return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_RED:
+ return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_PRIO:
+ return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_FIFO:
+ return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ if (!enable) {
+ if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
+ mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
+ netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
+ } else {
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
+ }
+ return 0;
+}
+
+static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ char pplr_pl[MLXSW_REG_PPLR_LEN];
+ int err;
+
+ if (netif_running(dev))
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+
+ mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
+ pplr_pl);
+
+ if (netif_running(dev))
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+
+ return err;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlxsw_sp_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ dev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(dev, enable);
+ if (err) {
+ netdev_err(dev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
+ return err;
+ }
+
+ if (enable)
+ dev->features |= feature;
+ else
+ dev->features &= ~feature;
+
+ return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t oper_features = dev->features;
+ int err = 0;
+
+ err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ mlxsw_sp_feature_hw_tc);
+ err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
+ mlxsw_sp_feature_loopback);
+
+ if (err) {
+ dev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct devlink_port *
+mlxsw_sp_port_get_devlink_port(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ mlxsw_sp_port->local_port);
+}
+
+static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ &config);
+ if (err)
+ return err;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ &config);
+ if (err)
+ return err;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct hwtstamp_config config = {0};
+
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
+}
+
+static int
+mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
+ case SIOCGHWTSTAMP:
+ return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+ .ndo_open = mlxsw_sp_port_open,
+ .ndo_stop = mlxsw_sp_port_stop,
+ .ndo_start_xmit = mlxsw_sp_port_xmit,
+ .ndo_setup_tc = mlxsw_sp_setup_tc,
+ .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
+ .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
+ .ndo_change_mtu = mlxsw_sp_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
+ .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
+ .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
+ .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_set_features = mlxsw_sp_set_features,
+ .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
+ .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
+};
+
+static int
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap_masked;
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ /* Set advertised speeds to speeds supported by both the driver
+ * and the device.
+ */
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+ eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_cap_masked,
+ mlxsw_sp_port->link.autoneg);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
+{
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_oper;
+ int err;
+
+ port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
+ port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
+ mlxsw_sp_port->local_port, 0,
+ false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
+ &eth_proto_oper);
+ *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
+ return 0;
+}
+
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_de_set(qeec_pl, true);
+ mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
+ mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate, u8 burst_size)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mase_set(qeec_pl, true);
+ mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 minrate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mise_set(qeec_pl, true);
+ mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtct_pl[MLXSW_REG_QTCT_LEN];
+
+ mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
+ tclass);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
+}
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, i;
+
+ /* Setup the elements hierarcy, so that each TC is linked to
+ * one subgroup, which are all member in the same group.
+ */
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
+ 0, false, 0);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_TC, i, i,
+ false, 0);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_TC,
+ i + 8, i,
+ true, 100);
+ if (err)
+ return err;
+ }
+
+ /* Make sure the max shaper is disabled in all hierarchies that support
+ * it. Note that this disables ptps (PTP shaper), but that is intended
+ * for the initial configuration.
+ */
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_TC,
+ i, i,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_TC,
+ i + 8, i,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+ if (err)
+ return err;
+ }
+
+ /* Configure the min shaper for multicast TCs. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_TC,
+ i + 8, i,
+ MLXSW_REG_QEEC_MIS_MIN);
+ if (err)
+ return err;
+ }
+
+ /* Map all priorities to traffic class 0. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtctm_pl[MLXSW_REG_QTCTM_LEN];
+
+ mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
+}
+
+static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u64 overheat_counter;
+ int err;
+
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
+ module, &overheat_counter);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
+ return 0;
+}
+
+int
+mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_8021ad_tagged,
+ bool is_8021q_tagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvc_pl[MLXSW_REG_SPVC_LEN];
+
+ mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
+ is_8021ad_tagged, is_8021q_tagged);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
+}
+
+static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, u8 *port_number,
+ u8 *split_port_subnumber,
+ u8 *slot_index)
+{
+ char pllp_pl[MLXSW_REG_PLLP_LEN];
+ int err;
+
+ mlxsw_reg_pllp_pack(pllp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pllp_unpack(pllp_pl, port_number,
+ split_port_subnumber, slot_index);
+ return 0;
+}
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ bool split,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u32 lanes = port_mapping->width;
+ u8 split_port_subnumber;
+ struct net_device *dev;
+ u8 port_number;
+ u8 slot_index;
+ bool splittable;
+ int err;
+
+ err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+ local_port);
+ return err;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
+ &split_port_subnumber, &slot_index);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
+ local_port);
+ goto err_port_label_info_get;
+ }
+
+ splittable = lanes > 1 && !split;
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
+ port_number, split, split_port_subnumber,
+ splittable, lanes, mlxsw_sp->base_mac,
+ sizeof(mlxsw_sp->base_mac));
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ goto err_core_port_init;
+ }
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+ SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
+ dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp_port->dev = dev;
+ mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
+ mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping = *port_mapping;
+ mlxsw_sp_port->link.autoneg = 1;
+ INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
+
+ mlxsw_sp_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
+ if (!mlxsw_sp_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
+ &update_stats_cache);
+
+ dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+
+ err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
+ mlxsw_sp_port->local_port);
+ goto err_dev_addr_init;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_speed_by_width_set;
+ }
+
+ err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
+ &mlxsw_sp_port->max_speed);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
+ mlxsw_sp_port->local_port);
+ goto err_max_speed_get;
+ }
+
+ err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_max_mtu_get;
+ }
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_buffers_init;
+ }
+
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_ets_init;
+ }
+
+ err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_tc_mc_mode;
+ }
+
+ /* ETS and buffers must be initialized before DCB. */
+ err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_dcb_init;
+ }
+
+ err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_fids_init;
+ }
+
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
+ false);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_vlan_clear;
+ }
+
+ err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_nve_init;
+ }
+
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_set;
+ }
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
+ MLXSW_SP_DEFAULT_VID);
+ if (IS_ERR(mlxsw_sp_port_vlan)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
+ mlxsw_sp_port->local_port);
+ err = PTR_ERR(mlxsw_sp_port_vlan);
+ goto err_port_vlan_create;
+ }
+ mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
+
+ /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
+ * only packets with 802.1q header as tagged packets.
+ */
+ err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
+ local_port);
+ goto err_port_vlan_classification_set;
+ }
+
+ INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
+ mlxsw_sp->ptp_ops->shaper_work);
+
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+
+ err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_overheat_init_val_set;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sp_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
+ mlxsw_sp_port, dev);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
+ return 0;
+
+err_register_netdev:
+err_port_overheat_init_val_set:
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
+err_port_vlan_classification_set:
+ mlxsw_sp->ports[local_port] = NULL;
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+err_port_vlan_create:
+err_port_pvid_set:
+ mlxsw_sp_port_nve_fini(mlxsw_sp_port);
+err_port_nve_init:
+err_port_vlan_clear:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
+err_port_fids_init:
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+err_port_dcb_init:
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+err_port_tc_mc_mode:
+err_port_ets_init:
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
+err_port_buffers_init:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_max_mtu_get:
+err_max_speed_get:
+err_port_speed_by_width_set:
+err_port_system_port_mapping_set:
+err_dev_addr_init:
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+err_alloc_etherdev:
+ mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+err_core_port_init:
+err_port_label_info_get:
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
+ port_mapping->slot_index,
+ port_mapping->module);
+ return err;
+}
+
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
+ mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
+ unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
+ mlxsw_sp->ports[local_port] = NULL;
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
+ mlxsw_sp_port_nve_fini(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
+ free_netdev(mlxsw_sp_port->dev);
+ mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
+}
+
+static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
+ if (!mlxsw_sp_port)
+ return -ENOMEM;
+
+ mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
+
+ err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
+ mlxsw_sp_port,
+ mlxsw_sp->base_mac,
+ sizeof(mlxsw_sp->base_mac));
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
+ goto err_core_cpu_port_init;
+ }
+
+ mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
+ return 0;
+
+err_core_cpu_port_init:
+ kfree(mlxsw_sp_port);
+ return err;
+}
+
+static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
+
+ mlxsw_core_cpu_port_fini(mlxsw_sp->core);
+ mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
+ kfree(mlxsw_sp_port);
+}
+
+static bool mlxsw_sp_local_port_valid(u16 local_port)
+{
+ return local_port != MLXSW_PORT_CPU_PORT;
+}
+
+static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ if (!mlxsw_sp_local_port_valid(local_port))
+ return false;
+ return mlxsw_sp->ports[local_port] != NULL;
+}
+
+static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, bool enable)
+{
+ char pmecr_pl[MLXSW_REG_PMECR_LEN];
+
+ mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
+ enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
+}
+
+struct mlxsw_sp_port_mapping_event {
+ struct list_head list;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+};
+
+static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ LIST_HEAD(event_queue);
+ u16 local_port;
+ int err;
+
+ events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
+ mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
+ devlink = priv_to_devlink(mlxsw_sp->core);
+
+ spin_lock_bh(&events->queue_lock);
+ list_splice_init(&events->queue, &event_queue);
+ spin_unlock_bh(&events->queue_lock);
+
+ list_for_each_entry_safe(event, next_event, &event_queue, list) {
+ local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
+ err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ event->pmlp_pl, &port_mapping);
+ if (err)
+ goto out;
+
+ if (WARN_ON_ONCE(!port_mapping.width))
+ goto out;
+
+ devl_lock(devlink);
+
+ if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, &port_mapping);
+ else
+ WARN_ON_ONCE(1);
+
+ devl_unlock(devlink);
+
+ mlxsw_sp->port_mapping[local_port] = port_mapping;
+
+out:
+ kfree(event);
+ }
+}
+
+static void
+mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
+ char *pmlp_pl, void *priv)
+{
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping_event *event;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u16 local_port;
+
+ local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+
+ events = &mlxsw_sp->port_mapping_events;
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
+ spin_lock(&events->queue_lock);
+ list_add_tail(&event->list, &events->queue);
+ spin_unlock(&events->queue_lock);
+ mlxsw_core_schedule_work(&events->work);
+}
+
+static void
+__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+
+ events = &mlxsw_sp->port_mapping_events;
+
+ /* Caller needs to make sure that no new event is going to appear. */
+ cancel_work_sync(&events->work);
+ list_for_each_entry_safe(event, next_event, &events->queue, list) {
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+
+static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ int i;
+
+ for (i = 1; i < max_ports; i++)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+
+ for (i = 1; i < max_ports; i++)
+ if (mlxsw_sp_port_created(mlxsw_sp, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ mlxsw_sp_cpu_port_remove(mlxsw_sp);
+ kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
+}
+
+static void
+mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
+ int i;
+
+ for (i = 1; i < max_ports; i++)
+ if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+}
+
+static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping *port_mapping;
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
+ mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sp->ports)
+ return -ENOMEM;
+
+ events = &mlxsw_sp->port_mapping_events;
+ INIT_LIST_HEAD(&events->queue);
+ spin_lock_init(&events->queue_lock);
+ INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
+ if (err)
+ goto err_event_enable;
+ }
+
+ err = mlxsw_sp_cpu_port_create(mlxsw_sp);
+ if (err)
+ goto err_cpu_port_create;
+
+ for (i = 1; i < max_ports; i++) {
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ if (!port_mapping->width)
+ continue;
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ if (mlxsw_sp_port_created(mlxsw_sp, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ i = max_ports;
+ mlxsw_sp_cpu_port_remove(mlxsw_sp);
+err_cpu_port_create:
+err_event_enable:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+ kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
+ return err;
+}
+
+static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping *port_mapping;
+ int i;
+ int err;
+
+ mlxsw_sp->port_mapping = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_port_mapping),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping)
+ return -ENOMEM;
+
+ for (i = 1; i < max_ports; i++) {
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
+ if (err)
+ goto err_port_module_info_get;
+ }
+ return 0;
+
+err_port_module_info_get:
+ kfree(mlxsw_sp->port_mapping);
+ return err;
+}
+
+static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->port_mapping);
+}
+
+static int
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port_mapping *port_mapping,
+ unsigned int count, const char *pmtdb_pl)
+{
+ struct mlxsw_sp_port_mapping split_port_mapping;
+ int err, i;
+
+ split_port_mapping = *port_mapping;
+ split_port_mapping.width /= count;
+ for (i = 0; i < count; i++) {
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (!mlxsw_sp_local_port_valid(s_local_port))
+ continue;
+
+ err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
+ true, &split_port_mapping);
+ if (err)
+ goto err_port_create;
+ split_port_mapping.lane += split_port_mapping.width;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--) {
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ unsigned int count,
+ const char *pmtdb_pl)
+{
+ struct mlxsw_sp_port_mapping *port_mapping;
+ int i;
+
+ /* Go over original unsplit ports in the gap and recreate them. */
+ for (i = 0; i < count; i++) {
+ u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ port_mapping = &mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
+ continue;
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, port_mapping);
+ }
+}
+
+static struct mlxsw_sp_port *
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
+ return mlxsw_sp->ports[local_port];
+ return NULL;
+}
+
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
+ unsigned int count,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ enum mlxsw_reg_pmtdb_status status;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
+ int i;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
+ if (!mlxsw_sp_port) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
+ return -EINVAL;
+ }
+
+ if (mlxsw_sp_port->split) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is already split");
+ return -EINVAL;
+ }
+
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
+ }
+
+ status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
+ if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
+ return -EINVAL;
+ }
+
+ port_mapping = mlxsw_sp_port->mapping;
+
+ for (i = 0; i < count; i++) {
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
+
+ err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
+ count, pmtdb_pl);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
+ }
+
+ return 0;
+
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
+ return err;
+}
+
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
+ unsigned int count;
+ int i;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
+ if (!mlxsw_sp_port) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
+ return -EINVAL;
+ }
+
+ if (!mlxsw_sp_port->split) {
+ NL_SET_ERR_MSG_MOD(extack, "Port was not split");
+ return -EINVAL;
+ }
+
+ count = mlxsw_sp_port->mapping.module_width /
+ mlxsw_sp_port->mapping.width;
+
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
+ }
+
+ for (i = 0; i < count; i++) {
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
+
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ for (i = 0; i < TC_MAX_QUEUE; i++)
+ mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
+static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u16 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ return;
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
+ netdev_info(mlxsw_sp_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sp_port->dev);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
+ } else {
+ netdev_info(mlxsw_sp_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sp_port->dev);
+ mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
+ }
+}
+
+static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
+ char *mtpptr_pl, bool ingress)
+{
+ u16 local_port;
+ u8 num_rec;
+ int i;
+
+ local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
+ num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
+ for (i = 0; i < num_rec; i++) {
+ u8 domain_number;
+ u8 message_type;
+ u16 sequence_id;
+ u64 timestamp;
+
+ mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
+ &domain_number, &sequence_id,
+ &timestamp);
+ mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
+ message_type, domain_number,
+ sequence_id, timestamp);
+ }
+}
+
+static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
+ char *mtpptr_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
+}
+
+static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
+ char *mtpptr_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
+}
+
+void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
+ u16 local_port, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
+ void *priv)
+{
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
+}
+
+static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
+ u16 local_port, void *priv)
+{
+ skb->offload_l3_fwd_mark = 1;
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
+}
+
+void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
+}
+
+#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
+#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
+#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
+#define MLXSW_SP_EVENTL(_func, _trap_id) \
+ MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
+
+static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
+ /* L2 traps */
+ MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
+ /* L3 traps */
+ MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
+ false),
+ MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
+ false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ /* NVE traps */
+ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
+};
+
+static const struct mlxsw_listener mlxsw_sp1_listener[] = {
+ /* Events */
+ MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
+ MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
+};
+
+static const struct mlxsw_listener mlxsw_sp2_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
+};
+
+static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ enum mlxsw_reg_qpcr_ir_units ir_units;
+ int max_cpu_policers;
+ bool is_bytes;
+ u8 burst_size;
+ u32 rate;
+ int i, err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
+ return -EIO;
+
+ max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
+
+ ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
+ for (i = 0; i < max_cpu_policers; i++) {
+ is_bytes = false;
+ switch (i) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
+ rate = 1024;
+ burst_size = 7;
+ break;
+ default:
+ continue;
+ }
+
+ __set_bit(i, mlxsw_sp->trap->policers_usage);
+ mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
+ burst_size);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ enum mlxsw_reg_htgt_trap_group i;
+ int max_cpu_policers;
+ int max_trap_groups;
+ u8 priority, tc;
+ u16 policer_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
+ return -EIO;
+
+ max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
+ max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
+
+ for (i = 0; i < max_trap_groups; i++) {
+ policer_id = i;
+ switch (i) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
+ priority = 1;
+ tc = 1;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
+ priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
+ tc = MLXSW_REG_HTGT_DEFAULT_TC;
+ policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
+ break;
+ default:
+ continue;
+ }
+
+ if (max_cpu_policers <= policer_id &&
+ policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
+ return -EIO;
+
+ mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_trap *trap;
+ u64 max_policers;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
+ return -EIO;
+ max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
+ trap = kzalloc(struct_size(trap, policers_usage,
+ BITS_TO_LONGS(max_policers)), GFP_KERNEL);
+ if (!trap)
+ return -ENOMEM;
+ trap->max_policers = max_policers;
+ mlxsw_sp->trap = trap;
+
+ err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
+ if (err)
+ goto err_cpu_policers_set;
+
+ err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
+ if (err)
+ goto err_trap_groups_set;
+
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
+ if (err)
+ goto err_traps_register;
+
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count, mlxsw_sp);
+ if (err)
+ goto err_extra_traps_init;
+
+ return 0;
+
+err_extra_traps_init:
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
+err_traps_register:
+err_trap_groups_set:
+err_cpu_policers_set:
+ kfree(trap);
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count,
+ mlxsw_sp);
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
+ kfree(mlxsw_sp->trap);
+}
+
+#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
+
+static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u16 max_lag;
+ u32 seed;
+ int err;
+
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
+ MLXSW_SP_LAG_SEED_INIT);
+ mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
+ MLXSW_REG_SLCR_LAG_HASH_DMAC |
+ MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
+ MLXSW_REG_SLCR_LAG_HASH_VLANID |
+ MLXSW_REG_SLCR_LAG_HASH_SIP |
+ MLXSW_REG_SLCR_LAG_HASH_DIP |
+ MLXSW_REG_SLCR_LAG_HASH_SPORT |
+ MLXSW_REG_SLCR_LAG_HASH_DPORT |
+ MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
+}
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
+ .clock_init = mlxsw_sp1_ptp_clock_init,
+ .clock_fini = mlxsw_sp1_ptp_clock_fini,
+ .init = mlxsw_sp1_ptp_init,
+ .fini = mlxsw_sp1_ptp_fini,
+ .receive = mlxsw_sp1_ptp_receive,
+ .transmitted = mlxsw_sp1_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp1_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp1_get_stats_count,
+ .get_stats_strings = mlxsw_sp1_get_stats_strings,
+ .get_stats = mlxsw_sp1_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
+};
+
+struct mlxsw_sp_sample_trigger_node {
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params params;
+ struct rhash_head ht_node;
+ struct rcu_head rcu;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
+ .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_sample_trigger),
+ .automatic_shrinking = true,
+};
+
+static void
+mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ memset(key, 0, sizeof(*key));
+ key->type = trigger->type;
+ key->local_port = trigger->local_port;
+}
+
+/* RCU read lock must be held */
+struct mlxsw_sp_sample_params *
+mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+ trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return NULL;
+
+ return &trigger_node->params;
+}
+
+static int
+mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ int err;
+
+ trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
+ if (!trigger_node)
+ return -ENOMEM;
+
+ trigger_node->trigger = *trigger;
+ trigger_node->params = *params;
+ refcount_set(&trigger_node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
+ &trigger_node->ht_node,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(trigger_node);
+ return err;
+}
+
+static void
+mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_sample_trigger_node *trigger_node)
+{
+ rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
+ &trigger_node->ht_node,
+ mlxsw_sp_sample_trigger_ht_params);
+ kfree_rcu(trigger_node, rcu);
+}
+
+int
+mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ ASSERT_RTNL();
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+
+ trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
+ &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
+ params);
+
+ if (trigger_node->trigger.local_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
+ return -EINVAL;
+ }
+
+ if (trigger_node->params.psample_group != params->psample_group ||
+ trigger_node->params.truncate != params->truncate ||
+ trigger_node->params.rate != params->rate ||
+ trigger_node->params.trunc_size != params->trunc_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
+ return -EINVAL;
+ }
+
+ refcount_inc(&trigger_node->refcount);
+
+ return 0;
+}
+
+void
+mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger)
+{
+ struct mlxsw_sp_sample_trigger_node *trigger_node;
+ struct mlxsw_sp_sample_trigger key;
+
+ ASSERT_RTNL();
+
+ mlxsw_sp_sample_trigger_key_init(&key, trigger);
+
+ trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
+ &key,
+ mlxsw_sp_sample_trigger_ht_params);
+ if (!trigger_node)
+ return;
+
+ if (!refcount_dec_and_test(&trigger_node->refcount))
+ return;
+
+ mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+
+#define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
+#define MLXSW_SP_INCREASED_PARSING_DEPTH 128
+#define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
+
+static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+{
+ refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
+ mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
+ mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
+ mutex_init(&mlxsw_sp->parsing.lock);
+}
+
+static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->parsing.lock);
+ WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
+}
+
+struct mlxsw_sp_ipv6_addr_node {
+ struct in6_addr key;
+ struct rhash_head ht_node;
+ u32 kvdl_index;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
+ .key_len = sizeof(struct in6_addr),
+ .automatic_shrinking = true,
+};
+
+static int
+mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ p_kvdl_index);
+ if (err)
+ return err;
+
+ mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ err = -ENOMEM;
+ goto err_node_alloc;
+ }
+
+ node->key = *addr6;
+ node->kvdl_index = *p_kvdl_index;
+ refcount_set(&node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
+ &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(node);
+err_node_alloc:
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ *p_kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipv6_addr_node *node)
+{
+ u32 kvdl_index = node->kvdl_index;
+
+ rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ kfree(node);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ kvdl_index);
+}
+
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (node) {
+ refcount_inc(&node->refcount);
+ *p_kvdl_index = node->kvdl_index;
+ goto out_unlock;
+ }
+
+ err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+ return err;
+}
+
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (WARN_ON(!node))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&node->refcount))
+ goto out_unlock;
+
+ mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+}
+
+static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
+ &mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ return err;
+
+ mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
+ return 0;
+}
+
+static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
+ rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
+}
+
+static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ mlxsw_sp->core = mlxsw_core;
+ mlxsw_sp->bus_info = mlxsw_bus_info;
+
+ mlxsw_sp_parsing_init(mlxsw_sp);
+ mlxsw_core_emad_string_tlv_enable(mlxsw_core);
+
+ err = mlxsw_sp_base_mac_get(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
+ err = mlxsw_sp_kvdl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
+ return err;
+ }
+
+ err = mlxsw_sp_pgt_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
+ goto err_pgt_init;
+ }
+
+ err = mlxsw_sp_fids_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
+ goto err_fids_init;
+ }
+
+ err = mlxsw_sp_policers_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
+ goto err_policers_init;
+ }
+
+ err = mlxsw_sp_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ goto err_traps_init;
+ }
+
+ err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
+ goto err_devlink_traps_init;
+ }
+
+ err = mlxsw_sp_buffers_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
+ goto err_buffers_init;
+ }
+
+ err = mlxsw_sp_lag_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
+ goto err_lag_init;
+ }
+
+ /* Initialize SPAN before router and switchdev, so that those components
+ * can call mlxsw_sp_span_respin().
+ */
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
+ err = mlxsw_sp_switchdev_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
+ goto err_switchdev_init;
+ }
+
+ err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+ goto err_counter_pool_init;
+ }
+
+ err = mlxsw_sp_afa_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
+ goto err_afa_init;
+ }
+
+ err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
+ goto err_ipv6_addr_ht_init;
+ }
+
+ err = mlxsw_sp_nve_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
+ goto err_nve_init;
+ }
+
+ err = mlxsw_sp_acl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
+ goto err_acl_init;
+ }
+
+ err = mlxsw_sp_router_init(mlxsw_sp, extack);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
+ goto err_router_init;
+ }
+
+ if (mlxsw_sp->bus_info->read_clock_capable) {
+ /* NULL is a valid return value from clock_init */
+ mlxsw_sp->clock =
+ mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
+ mlxsw_sp->bus_info->dev);
+ if (IS_ERR(mlxsw_sp->clock)) {
+ err = PTR_ERR(mlxsw_sp->clock);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
+ goto err_ptp_clock_init;
+ }
+ }
+
+ if (mlxsw_sp->clock) {
+ /* NULL is a valid return value from ptp_ops->init */
+ mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
+ if (IS_ERR(mlxsw_sp->ptp_state)) {
+ err = PTR_ERR(mlxsw_sp->ptp_state);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
+ goto err_ptp_init;
+ }
+ }
+
+ /* Initialize netdevice notifier after SPAN is initialized, so that the
+ * event handler can call SPAN respin.
+ */
+ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
+ goto err_netdev_notifier;
+ }
+
+ err = mlxsw_sp_dpipe_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
+ goto err_dpipe_init;
+ }
+
+ err = mlxsw_sp_port_module_info_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
+ goto err_port_module_info_init;
+ }
+
+ err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
+ &mlxsw_sp_sample_trigger_ht_params);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
+ goto err_sample_trigger_init;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
+ return 0;
+
+err_ports_create:
+ rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
+err_sample_trigger_init:
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
+err_port_module_info_init:
+ mlxsw_sp_dpipe_fini(mlxsw_sp);
+err_dpipe_init:
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
+err_netdev_notifier:
+ if (mlxsw_sp->clock)
+ mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
+err_ptp_init:
+ if (mlxsw_sp->clock)
+ mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
+err_ptp_clock_init:
+ mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ mlxsw_sp_acl_fini(mlxsw_sp);
+err_acl_init:
+ mlxsw_sp_nve_fini(mlxsw_sp);
+err_nve_init:
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
+err_ipv6_addr_ht_init:
+ mlxsw_sp_afa_fini(mlxsw_sp);
+err_afa_init:
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
+err_switchdev_init:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
+err_lag_init:
+ mlxsw_sp_buffers_fini(mlxsw_sp);
+err_buffers_init:
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
+err_devlink_traps_init:
+ mlxsw_sp_traps_fini(mlxsw_sp);
+err_traps_init:
+ mlxsw_sp_policers_fini(mlxsw_sp);
+err_policers_init:
+ mlxsw_sp_fids_fini(mlxsw_sp);
+err_fids_init:
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+err_pgt_init:
+ mlxsw_sp_kvdl_fini(mlxsw_sp);
+ mlxsw_sp_parsing_fini(mlxsw_sp);
+ return err;
+}
+
+static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+ mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
+ mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
+ mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp1_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
+ mlxsw_sp->pgt_smpe_index_valid = true;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+ mlxsw_sp->pgt_smpe_index_valid = false;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
+ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
+ mlxsw_sp->pgt_smpe_index_valid = false;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
+ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+ mlxsw_sp->pgt_smpe_index_valid = false;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
+ mlxsw_sp_dpipe_fini(mlxsw_sp);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
+ if (mlxsw_sp->clock) {
+ mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
+ mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
+ }
+ mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_acl_fini(mlxsw_sp);
+ mlxsw_sp_nve_fini(mlxsw_sp);
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
+ mlxsw_sp_afa_fini(mlxsw_sp);
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
+ mlxsw_sp_buffers_fini(mlxsw_sp);
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_policers_fini(mlxsw_sp);
+ mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+ mlxsw_sp_kvdl_fini(mlxsw_sp);
+ mlxsw_sp_parsing_fini(mlxsw_sp);
+}
+
+static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
+ .used_kvd_sizes = 1,
+ .kvd_hash_single_parts = 59,
+ .kvd_hash_double_parts = 41,
+ .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
+ * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
+ * table.
+ */
+#define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
+
+static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
+ .used_max_lag = 1,
+ .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
+ struct devlink_resource_size_params *kvd_size_params,
+ struct devlink_resource_size_params *linear_size_params,
+ struct devlink_resource_size_params *hash_double_size_params,
+ struct devlink_resource_size_params *hash_single_size_params)
+{
+ u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_SINGLE_MIN_SIZE);
+ u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_DOUBLE_MIN_SIZE);
+ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ u32 linear_size_min = 0;
+
+ devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ devlink_resource_size_params_init(linear_size_params, linear_size_min,
+ kvd_size - single_size_min -
+ double_size_min,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ devlink_resource_size_params_init(hash_double_size_params,
+ double_size_min,
+ kvd_size - single_size_min -
+ linear_size_min,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ devlink_resource_size_params_init(hash_single_size_params,
+ single_size_min,
+ kvd_size - double_size_min -
+ linear_size_min,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+}
+
+static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params hash_single_size_params;
+ struct devlink_resource_size_params hash_double_size_params;
+ struct devlink_resource_size_params linear_size_params;
+ struct devlink_resource_size_params kvd_size_params;
+ u32 kvd_size, single_size, double_size, linear_size;
+ const struct mlxsw_config_profile *profile;
+ int err;
+
+ profile = &mlxsw_sp1_config_profile;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
+ &linear_size_params,
+ &hash_double_size_params,
+ &hash_single_size_params);
+
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
+ if (err)
+ return err;
+
+ linear_size = profile->kvd_linear_size;
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &linear_size_params);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
+ if (err)
+ return err;
+
+ double_size = kvd_size - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_double_size_params);
+ if (err)
+ return err;
+
+ single_size = kvd_size - double_size - linear_size;
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_single_size_params);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params kvd_size_params;
+ u32 kvd_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
+}
+
+static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params span_size_params;
+ u32 max_span;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
+ return -EIO;
+
+ max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
+ devlink_resource_size_params_init(&span_size_params, max_span, max_span,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
+}
+
+static int
+mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u8 max_rif_mac_profiles;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
+ max_rif_mac_profiles = 1;
+ else
+ max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_RIF_MAC_PROFILES);
+ devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
+ max_rif_mac_profiles, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
+static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max_rifs;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
+ devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "rifs", max_rifs,
+ MLXSW_SP_RESOURCE_RIFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
+static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ err = mlxsw_sp_counter_resources_register(mlxsw_core);
+ if (err)
+ goto err_resources_counter_register;
+
+ err = mlxsw_sp_policer_resources_register(mlxsw_core);
+ if (err)
+ goto err_policer_resources_register;
+
+ err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
+ if (err)
+ goto err_resources_rif_mac_profile_register;
+
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
+ return 0;
+
+err_resources_rifs_register:
+err_resources_rif_mac_profile_register:
+err_policer_resources_register:
+err_resources_counter_register:
+err_resources_span_register:
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
+ return err;
+}
+
+static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ err = mlxsw_sp_counter_resources_register(mlxsw_core);
+ if (err)
+ goto err_resources_counter_register;
+
+ err = mlxsw_sp_policer_resources_register(mlxsw_core);
+ if (err)
+ goto err_policer_resources_register;
+
+ err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
+ if (err)
+ goto err_resources_rif_mac_profile_register;
+
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
+ return 0;
+
+err_resources_rifs_register:
+err_resources_rif_mac_profile_register:
+err_policer_resources_register:
+err_resources_counter_register:
+err_resources_span_register:
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
+ return err;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 double_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
+ return -EIO;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile. In case the user
+ * provided the sizes they are obtained via devlink.
+ */
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
+ if (err)
+ *p_linear_size = profile->kvd_linear_size;
+
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
+ if (err) {
+ double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ *p_double_size = rounddown(double_size,
+ MLXSW_SP_KVD_GRANULARITY);
+ }
+
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
+ if (err)
+ *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_double_size - *p_linear_size;
+
+ /* Check results are legal. */
+ if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
+ return 0;
+}
+
+static int
+mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
+}
+
+static const struct devlink_param mlxsw_sp2_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ "acl_region_rehash_interval",
+ DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlxsw_sp_params_acl_region_rehash_intrvl_get,
+ mlxsw_sp_params_acl_region_rehash_intrvl_set,
+ NULL),
+};
+
+static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
+ ARRAY_SIZE(mlxsw_sp2_devlink_params));
+ if (err)
+ return err;
+
+ value.vu32 = 0;
+ devlink_param_driverinit_value_set(devlink,
+ MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ value);
+ return 0;
+}
+
+static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core),
+ mlxsw_sp2_devlink_params,
+ ARRAY_SIZE(mlxsw_sp2_devlink_params));
+}
+
+static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ skb_pull(skb, MLXSW_TXHDR_LEN);
+ mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
+}
+
+static struct mlxsw_driver mlxsw_sp1_driver = {
+ .kind = mlxsw_sp1_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp1_fw_rev,
+ .fw_filename = MLXSW_SP1_FW_FILENAME,
+ .init = mlxsw_sp1_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp1_resources_register,
+ .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp1_config_profile,
+ .sdq_supports_cqe_v2 = false,
+};
+
+static struct mlxsw_driver mlxsw_sp2_driver = {
+ .kind = mlxsw_sp2_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp2_fw_rev,
+ .fw_filename = MLXSW_SP2_FW_FILENAME,
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
+};
+
+static struct mlxsw_driver mlxsw_sp3_driver = {
+ .kind = mlxsw_sp3_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp3_fw_rev,
+ .fw_filename = MLXSW_SP3_FW_FILENAME,
+ .init = mlxsw_sp3_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
+};
+
+static struct mlxsw_driver mlxsw_sp4_driver = {
+ .kind = mlxsw_sp4_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp4_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp4_config_profile,
+ .sdq_supports_cqe_v2 = true,
+};
+
+bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
+ struct netdev_nested_priv *priv)
+{
+ int ret = 0;
+
+ if (mlxsw_sp_port_dev_check(lower_dev)) {
+ priv->data = (void *)netdev_priv(lower_dev);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+{
+ struct netdev_nested_priv priv = {
+ .data = NULL,
+ };
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
+
+ return (struct mlxsw_sp_port *)priv.data;
+}
+
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
+}
+
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+{
+ struct netdev_nested_priv priv = {
+ .data = NULL,
+ };
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
+ &priv);
+
+ return (struct mlxsw_sp_port *)priv.data;
+}
+
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ rcu_read_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
+ if (mlxsw_sp_port)
+ dev_hold(mlxsw_sp_port->dev);
+ rcu_read_unlock();
+ return mlxsw_sp_port;
+}
+
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ dev_put(mlxsw_sp_port->dev);
+}
+
+int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
+{
+ char mprs_pl[MLXSW_REG_MPRS_LEN];
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->parsing.lock);
+
+ if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
+ goto out_unlock;
+
+ mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
+ mlxsw_sp->parsing.vxlan_udp_dport);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
+ refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->parsing.lock);
+ return err;
+}
+
+void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
+{
+ char mprs_pl[MLXSW_REG_MPRS_LEN];
+
+ mutex_lock(&mlxsw_sp->parsing.lock);
+
+ if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
+ goto out_unlock;
+
+ mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
+ mlxsw_sp->parsing.vxlan_udp_dport);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
+ mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->parsing.lock);
+}
+
+int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ char mprs_pl[MLXSW_REG_MPRS_LEN];
+ int err;
+
+ mutex_lock(&mlxsw_sp->parsing.lock);
+
+ mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
+ be16_to_cpu(udp_dport));
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->parsing.lock);
+ return err;
+}
+
+static void
+mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ if (netif_is_bridge_port(lag_dev))
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!netif_is_bridge_port(upper_dev))
+ continue;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
+ }
+}
+
+static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+{
+ char sldr_pl[MLXSW_REG_SLDR_LEN];
+
+ mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+}
+
+static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+{
+ char sldr_pl[MLXSW_REG_SLDR_LEN];
+
+ mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+}
+
+static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id, u8 port_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char slcor_pl[MLXSW_REG_SLCOR_LEN];
+
+ mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
+ lag_id, port_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
+}
+
+static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char slcor_pl[MLXSW_REG_SLCOR_LEN];
+
+ mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
+ lag_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
+}
+
+static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char slcor_pl[MLXSW_REG_SLCOR_LEN];
+
+ mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
+ lag_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
+}
+
+static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char slcor_pl[MLXSW_REG_SLCOR_LEN];
+
+ mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
+ lag_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
+}
+
+static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *lag_dev,
+ u16 *p_lag_id)
+{
+ struct mlxsw_sp_upper *lag;
+ int free_lag_id = -1;
+ u16 max_lag;
+ int err, i;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
+
+ for (i = 0; i < max_lag; i++) {
+ lag = mlxsw_sp_lag_get(mlxsw_sp, i);
+ if (lag->ref_count) {
+ if (lag->dev == lag_dev) {
+ *p_lag_id = i;
+ return 0;
+ }
+ } else if (free_lag_id < 0) {
+ free_lag_id = i;
+ }
+ }
+ if (free_lag_id < 0)
+ return -EBUSY;
+ *p_lag_id = free_lag_id;
+ return 0;
+}
+
+static bool
+mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *lag_dev,
+ struct netdev_lag_upper_info *lag_upper_info,
+ struct netlink_ext_ack *extack)
+{
+ u16 lag_id;
+
+ if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
+ return false;
+ }
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+ return false;
+ }
+ return true;
+}
+
+static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
+ u16 lag_id, u8 *p_port_index)
+{
+ u64 max_lag_members;
+ int i;
+
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ for (i = 0; i < max_lag_members; i++) {
+ if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
+ *p_port_index = i;
+ return 0;
+ }
+ }
+ return -EBUSY;
+}
+
+static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_upper *lag;
+ u16 lag_id;
+ u8 port_index;
+ int err;
+
+ err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
+ if (err)
+ return err;
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
+ if (!lag->ref_count) {
+ err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
+ if (err)
+ return err;
+ lag->dev = lag_dev;
+ }
+
+ err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
+ if (err)
+ return err;
+ err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
+ if (err)
+ goto err_col_port_add;
+
+ mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
+ mlxsw_sp_port->local_port);
+ mlxsw_sp_port->lag_id = lag_id;
+ mlxsw_sp_port->lagged = 1;
+ lag->ref_count++;
+
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port->default_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
+
+ /* Join a router interface configured on the LAG, if exists */
+ err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
+ lag_dev, extack);
+ if (err)
+ goto err_router_join;
+
+ return 0;
+
+err_router_join:
+ lag->ref_count--;
+ mlxsw_sp_port->lagged = 0;
+ mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
+ mlxsw_sp_port->local_port);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
+err_col_port_add:
+ if (!lag->ref_count)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ return err;
+}
+
+static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_sp_upper *lag;
+
+ if (!mlxsw_sp_port->lagged)
+ return;
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
+ WARN_ON(lag->ref_count == 0);
+
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
+
+ /* Any VLANs configured on the port are no longer valid */
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
+ mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
+ /* Make the LAG and its directly linked uppers leave bridges they
+ * are memeber in
+ */
+ mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
+
+ if (lag->ref_count == 1)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+
+ mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
+ mlxsw_sp_port->local_port);
+ mlxsw_sp_port->lagged = 0;
+ lag->ref_count--;
+
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
+}
+
+static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sldr_pl[MLXSW_REG_SLDR_LEN];
+
+ mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
+ mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+}
+
+static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sldr_pl[MLXSW_REG_SLDR_LEN];
+
+ mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
+ mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+}
+
+static int
+mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
+ mlxsw_sp_port->lag_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
+ if (err)
+ goto err_dist_port_add;
+
+ return 0;
+
+err_dist_port_add:
+ mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
+ return err;
+}
+
+static int
+mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
+ mlxsw_sp_port->lag_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
+ mlxsw_sp_port->lag_id);
+ if (err)
+ goto err_col_port_disable;
+
+ return 0;
+
+err_col_port_disable:
+ mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
+ return err;
+}
+
+static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netdev_lag_lower_state_info *info)
+{
+ if (info->tx_enabled)
+ return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
+ else
+ return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
+}
+
+static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
+ MLXSW_REG_SPMS_STATE_DISCARDING;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+
+ for (vid = 0; vid < VLAN_N_VID; vid++)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u16 vid = 1;
+ int err;
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_stp_set;
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
+ true, false);
+ if (err)
+ goto err_port_vlan_set;
+
+ for (; vid <= VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, false);
+ if (err)
+ goto err_vid_learning_set;
+ }
+
+ return 0;
+
+err_vid_learning_set:
+ for (vid--; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vlan_set:
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+err_port_stp_set:
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ return err;
+}
+
+static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u16 vid;
+
+ for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, true);
+
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
+ false, false);
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+}
+
+static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
+{
+ unsigned int num_vxlans = 0;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev))
+ num_vxlans++;
+ }
+
+ return num_vxlans > 1;
+}
+
+static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
+{
+ DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ u16 pvid;
+ int err;
+
+ if (!netif_is_vxlan(dev))
+ continue;
+
+ err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
+ if (err || !pvid)
+ continue;
+
+ if (test_and_set_bit(pvid, vlans))
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ if (br_multicast_enabled(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
+ return false;
+ }
+
+ if (!br_vlan_enabled(br_dev) &&
+ mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
+ return false;
+ }
+
+ if (br_vlan_enabled(br_dev) &&
+ !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
+ return false;
+ }
+
+ return true;
+}
+
+static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err = 0;
+ u16 proto;
+
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ info = ptr;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(upper_dev) &&
+ !netif_is_bridge_master(upper_dev) &&
+ !netif_is_ovs_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
+ mlxsw_sp_bridge_has_vxlan(upper_dev) &&
+ !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
+ return -EINVAL;
+ }
+ if (netif_is_lag_master(upper_dev) &&
+ !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
+ info->upper_info, extack))
+ return -EINVAL;
+ if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
+ return -EINVAL;
+ }
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
+ return -EINVAL;
+ }
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
+ return -EINVAL;
+ }
+ if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
+ return -EINVAL;
+ }
+ if (netif_is_bridge_master(upper_dev)) {
+ br_vlan_get_proto(upper_dev, &proto);
+ if (br_vlan_enabled(upper_dev) &&
+ proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (vlan_uses_dev(lower_dev) &&
+ br_vlan_enabled(upper_dev) &&
+ proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
+ struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
+
+ if (br_vlan_enabled(br_dev)) {
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+ if (is_vlan_dev(upper_dev) &&
+ ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ lower_dev,
+ upper_dev,
+ extack);
+ else
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ lower_dev,
+ upper_dev);
+ } else if (netif_is_lag_master(upper_dev)) {
+ if (info->linking) {
+ err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
+ upper_dev, extack);
+ } else {
+ mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
+ mlxsw_sp_port_lag_leave(mlxsw_sp_port,
+ upper_dev);
+ }
+ } else if (netif_is_ovs_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
+ else
+ mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ } else if (is_vlan_dev(upper_dev)) {
+ struct net_device *br_dev;
+
+ if (!netif_is_bridge_port(upper_dev))
+ break;
+ if (info->linking)
+ break;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
+ br_dev);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changelowerstate_info *info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ mlxsw_sp_port = netdev_priv(dev);
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_CHANGELOWERSTATE:
+ if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
+ err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
+ info->lower_state_info);
+ if (err)
+ netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
+ struct net_device *port_dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ case NETDEV_CHANGEUPPER:
+ return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
+ event, ptr);
+ case NETDEV_CHANGELOWERSTATE:
+ return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
+ ptr);
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+ int ret;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ if (mlxsw_sp_port_dev_check(dev)) {
+ ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
+ ptr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *dev,
+ unsigned long event, void *ptr,
+ u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
+ mlxsw_sp_bridge_has_vxlan(upper_dev) &&
+ !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
+ return -EINVAL;
+ }
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev,
+ extack);
+ else
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *lag_dev,
+ unsigned long event,
+ void *ptr, u16 vid)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+ int ret;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ if (mlxsw_sp_port_dev_check(dev)) {
+ ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
+ event, ptr,
+ vid);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
+ struct net_device *br_dev,
+ unsigned long event, void *ptr,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
+ else if (netif_is_lag_master(real_dev))
+ return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
+ real_dev, event,
+ ptr, vid);
+ else if (netif_is_bridge_master(real_dev))
+ return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ u16 proto;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev) &&
+ !netif_is_macvlan(upper_dev) &&
+ !netif_is_l3_master(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (br_vlan_enabled(br_dev)) {
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (is_vlan_dev(upper_dev) &&
+ ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
+ return -EOPNOTSUPP;
+ }
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (is_vlan_dev(upper_dev))
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ upper_dev = info->upper_dev;
+
+ if (!netif_is_l3_master(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *cu_info;
+ struct netdev_notifier_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ extack = netdev_notifier_info_to_extack(info);
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
+ upper_dev = cu_info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
+ if (cu_info->linking) {
+ if (!netif_running(dev))
+ return 0;
+ /* When the bridge is VLAN-aware, the VNI of the VxLAN
+ * device needs to be mapped to a VLAN, but at this
+ * point no VLANs are configured on the VxLAN device
+ */
+ if (br_vlan_enabled(upper_dev))
+ return 0;
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
+ dev, 0, extack);
+ } else {
+ /* VLANs were already flushed, which triggered the
+ * necessary cleanup
+ */
+ if (br_vlan_enabled(upper_dev))
+ return 0;
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
+ }
+ break;
+ case NETDEV_PRE_UP:
+ upper_dev = netdev_master_upper_dev_get(dev);
+ if (!upper_dev)
+ return 0;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
+ extack);
+ case NETDEV_DOWN:
+ upper_dev = netdev_master_upper_dev_get(dev);
+ if (!upper_dev)
+ return 0;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp *mlxsw_sp;
+ int err = 0;
+
+ mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ if (event == NETDEV_UNREGISTER) {
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
+ if (span_entry)
+ mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
+ }
+ mlxsw_sp_span_respin(mlxsw_sp);
+
+ if (netif_is_vxlan(dev))
+ err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
+ else if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (netif_is_macvlan(dev))
+ err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
+}
+
+static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inetaddr_valid_event,
+};
+
+static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inet6addr_valid_event,
+};
+
+static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp1_pci_driver = {
+ .name = mlxsw_sp1_driver_name,
+ .id_table = mlxsw_sp1_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp2_pci_driver = {
+ .name = mlxsw_sp2_driver_name,
+ .id_table = mlxsw_sp2_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp3_pci_driver = {
+ .name = mlxsw_sp3_driver_name,
+ .id_table = mlxsw_sp3_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp4_pci_driver = {
+ .name = mlxsw_sp4_driver_name,
+ .id_table = mlxsw_sp4_pci_id_table,
+};
+
+static int __init mlxsw_sp_module_init(void)
+{
+ int err;
+
+ register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
+ register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
+
+ err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
+ if (err)
+ goto err_sp1_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
+ if (err)
+ goto err_sp2_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
+ if (err)
+ goto err_sp3_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
+ if (err)
+ goto err_sp4_core_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
+ if (err)
+ goto err_sp1_pci_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
+ if (err)
+ goto err_sp2_pci_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
+ if (err)
+ goto err_sp3_pci_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
+ if (err)
+ goto err_sp4_pci_driver_register;
+
+ return 0;
+
+err_sp4_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
+err_sp3_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+err_sp2_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
+err_sp4_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
+err_sp3_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+err_sp2_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
+err_sp1_core_driver_register:
+ unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
+ unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
+ return err;
+}
+
+static void __exit mlxsw_sp_module_exit(void)
+{
+ mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
+ unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
+ unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
+}
+
+module_init(mlxsw_sp_module_init);
+module_exit(mlxsw_sp_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Spectrum driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
new file mode 100644
index 000000000..c8ff2a6d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -0,0 +1,1493 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_H
+#define _MLXSW_SPECTRUM_H
+
+#include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/dcbnl.h>
+#include <linux/in6.h>
+#include <linux/notifier.h>
+#include <linux/net_namespace.h>
+#include <linux/spinlock.h>
+#include <net/psample.h>
+#include <net/pkt_cls.h>
+#include <net/red.h>
+#include <net/vxlan.h>
+#include <net/flow_offload.h>
+#include <net/inet_ecn.h>
+
+#include "port.h"
+#include "core.h"
+#include "core_acl_flex_keys.h"
+#include "core_acl_flex_actions.h"
+#include "reg.h"
+
+#define MLXSW_SP_DEFAULT_VID (VLAN_N_VID - 1)
+
+#define MLXSW_SP_FID_8021D_MAX 1024
+
+#define MLXSW_SP_MID_MAX 7000
+
+#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
+#define MLXSW_SP_KVD_GRANULARITY 128
+
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
+
+#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
+
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS "counters"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW "flow"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
+
+enum mlxsw_sp_resource_id {
+ MLXSW_SP_RESOURCE_KVD = MLXSW_CORE_RESOURCE_MAX,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_SPAN,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ MLXSW_SP_RESOURCE_COUNTERS_FLOW,
+ MLXSW_SP_RESOURCE_COUNTERS_RIF,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ MLXSW_SP_RESOURCE_RIFS,
+};
+
+struct mlxsw_sp_port;
+struct mlxsw_sp_rif;
+struct mlxsw_sp_span_entry;
+enum mlxsw_sp_l3proto;
+union mlxsw_sp_l3addr;
+
+struct mlxsw_sp_upper {
+ struct net_device *dev;
+ unsigned int ref_count;
+};
+
+enum mlxsw_sp_rif_type {
+ MLXSW_SP_RIF_TYPE_SUBPORT,
+ MLXSW_SP_RIF_TYPE_VLAN,
+ MLXSW_SP_RIF_TYPE_FID,
+ MLXSW_SP_RIF_TYPE_IPIP_LB, /* IP-in-IP loopback. */
+ MLXSW_SP_RIF_TYPE_MAX,
+};
+
+struct mlxsw_sp_router_ops;
+
+extern const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops;
+extern const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops;
+
+struct mlxsw_sp_switchdev_ops;
+
+extern const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops;
+extern const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops;
+
+enum mlxsw_sp_fid_type {
+ MLXSW_SP_FID_TYPE_8021Q,
+ MLXSW_SP_FID_TYPE_8021D,
+ MLXSW_SP_FID_TYPE_RFID,
+ MLXSW_SP_FID_TYPE_DUMMY,
+ MLXSW_SP_FID_TYPE_MAX,
+};
+
+enum mlxsw_sp_nve_type {
+ MLXSW_SP_NVE_TYPE_VXLAN,
+};
+
+struct mlxsw_sp_sb;
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_router;
+struct mlxsw_sp_mr;
+struct mlxsw_sp_acl;
+struct mlxsw_sp_counter_pool;
+struct mlxsw_sp_fid_core;
+struct mlxsw_sp_kvdl;
+struct mlxsw_sp_nve;
+struct mlxsw_sp_kvdl_ops;
+struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_rulei_ops;
+struct mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_nve_ops;
+struct mlxsw_sp_sb_ops;
+struct mlxsw_sp_sb_vals;
+struct mlxsw_sp_port_type_speed_ops;
+struct mlxsw_sp_ptp_state;
+struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_span_ops;
+struct mlxsw_sp_qdisc_state;
+struct mlxsw_sp_mall_entry;
+struct mlxsw_sp_pgt;
+
+struct mlxsw_sp_port_mapping {
+ u8 module;
+ u8 slot_index;
+ u8 width; /* Number of lanes used by the port */
+ u8 module_width; /* Number of lanes in the module (static) */
+ u8 lane;
+};
+
+struct mlxsw_sp_port_mapping_events {
+ struct list_head queue;
+ spinlock_t queue_lock; /* protects queue */
+ struct work_struct work;
+};
+
+struct mlxsw_sp_parsing {
+ refcount_t parsing_depth_ref;
+ u16 parsing_depth;
+ u16 vxlan_udp_dport;
+ struct mutex lock; /* Protects parsing configuration */
+};
+
+struct mlxsw_sp {
+ struct mlxsw_sp_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ unsigned char base_mac[ETH_ALEN];
+ const unsigned char *mac_mask;
+ struct mlxsw_sp_upper *lags;
+ struct mlxsw_sp_port_mapping *port_mapping;
+ struct mlxsw_sp_port_mapping_events port_mapping_events;
+ struct rhashtable sample_trigger_ht;
+ struct mlxsw_sp_sb *sb;
+ struct mlxsw_sp_bridge *bridge;
+ struct mlxsw_sp_router *router;
+ struct mlxsw_sp_mr *mr;
+ struct mlxsw_afa *afa;
+ struct mlxsw_sp_acl *acl;
+ struct mlxsw_sp_fid_core *fid_core;
+ struct mlxsw_sp_policer_core *policer_core;
+ struct mlxsw_sp_kvdl *kvdl;
+ struct mlxsw_sp_nve *nve;
+ struct notifier_block netdevice_nb;
+ struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp_ptp_state *ptp_state;
+ struct mlxsw_sp_counter_pool *counter_pool;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp_trap *trap;
+ struct mlxsw_sp_parsing parsing;
+ const struct mlxsw_sp_switchdev_ops *switchdev_ops;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ const struct mlxsw_afa_ops *afa_ops;
+ const struct mlxsw_afk_ops *afk_ops;
+ const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+ const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
+ const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
+ const struct mlxsw_sp_acl_bf_ops *acl_bf_ops;
+ const struct mlxsw_sp_nve_ops **nve_ops_arr;
+ const struct mlxsw_sp_sb_vals *sb_vals;
+ const struct mlxsw_sp_sb_ops *sb_ops;
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ const struct mlxsw_sp_ptp_ops *ptp_ops;
+ const struct mlxsw_sp_span_ops *span_ops;
+ const struct mlxsw_sp_policer_core_ops *policer_core_ops;
+ const struct mlxsw_sp_trap_ops *trap_ops;
+ const struct mlxsw_sp_mall_ops *mall_ops;
+ const struct mlxsw_sp_router_ops *router_ops;
+ const struct mlxsw_listener *listeners;
+ const struct mlxsw_sp_fid_family **fid_family_arr;
+ size_t listeners_count;
+ u32 lowest_shaper_bs;
+ struct rhashtable ipv6_addr_ht;
+ struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
+ struct mlxsw_sp_pgt *pgt;
+ bool pgt_smpe_index_valid;
+};
+
+struct mlxsw_sp_ptp_ops {
+ struct mlxsw_sp_ptp_clock *
+ (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+ void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock);
+
+ struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp);
+ void (*fini)(struct mlxsw_sp_ptp_state *ptp_state);
+
+ /* Notify a driver that a packet that might be PTP was received. Driver
+ * is responsible for freeing the passed-in SKB.
+ */
+ void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+ /* Notify a driver that a timestamped packet was transmitted. Driver
+ * is responsible for freeing the passed-in SKB.
+ */
+ void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+ int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+ int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+ void (*shaper_work)(struct work_struct *work);
+ int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+ int (*get_stats_count)(void);
+ void (*get_stats_strings)(u8 **p);
+ void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index);
+ int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+};
+
+static inline struct mlxsw_sp_upper *
+mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+{
+ return &mlxsw_sp->lags[lag_id];
+}
+
+struct mlxsw_sp_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+enum mlxsw_sp_sample_trigger_type {
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS,
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS,
+ MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+};
+
+struct mlxsw_sp_sample_trigger {
+ enum mlxsw_sp_sample_trigger_type type;
+ u16 local_port; /* Reserved when trigger type is not ingress / egress. */
+};
+
+struct mlxsw_sp_sample_params {
+ struct psample_group *psample_group;
+ u32 trunc_size;
+ u32 rate;
+ bool truncate;
+};
+
+struct mlxsw_sp_bridge_port;
+struct mlxsw_sp_fid;
+
+struct mlxsw_sp_port_vlan {
+ struct list_head list;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct list_head bridge_vlan_node;
+};
+
+/* No need an internal lock; At worse - miss a single periodic iteration */
+struct mlxsw_sp_port_xstats {
+ u64 ecn;
+ u64 tc_ecn[TC_MAX_QUEUE];
+ u64 wred_drop[TC_MAX_QUEUE];
+ u64 tail_drop[TC_MAX_QUEUE];
+ u64 backlog[TC_MAX_QUEUE];
+ u64 tx_bytes[IEEE_8021QAZ_MAX_TCS];
+ u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
+};
+
+struct mlxsw_sp_ptp_port_dir_stats {
+ u64 packets;
+ u64 timestamps;
+};
+
+struct mlxsw_sp_ptp_port_stats {
+ struct mlxsw_sp_ptp_port_dir_stats rx_gcd;
+ struct mlxsw_sp_ptp_port_dir_stats tx_gcd;
+};
+
+struct mlxsw_sp_port {
+ struct net_device *dev;
+ struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sp *mlxsw_sp;
+ u16 local_port;
+ u8 lagged:1,
+ split:1;
+ u16 pvid;
+ u16 lag_id;
+ struct {
+ u8 tx_pause:1,
+ rx_pause:1,
+ autoneg:1;
+ } link;
+ struct {
+ struct ieee_ets *ets;
+ struct ieee_maxrate *maxrate;
+ struct ieee_pfc *pfc;
+ enum mlxsw_reg_qpts_trust_state trust_state;
+ } dcb;
+ struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
+ * mlxsw_sp_port lifetime, however
+ * the same localport can have
+ * different mapping.
+ */
+ struct {
+ #define MLXSW_HW_STATS_UPDATE_TIME HZ
+ struct rtnl_link_stats64 stats;
+ struct mlxsw_sp_port_xstats xstats;
+ struct delayed_work update_dw;
+ } periodic_hw_stats;
+ struct list_head vlans_list;
+ struct mlxsw_sp_port_vlan *default_vlan;
+ struct mlxsw_sp_qdisc_state *qdisc;
+ unsigned acl_rule_count;
+ struct mlxsw_sp_flow_block *ing_flow_block;
+ struct mlxsw_sp_flow_block *eg_flow_block;
+ struct {
+ struct delayed_work shaper_dw;
+ struct hwtstamp_config hwtstamp_config;
+ u16 ing_types;
+ u16 egr_types;
+ struct mlxsw_sp_ptp_port_stats stats;
+ } ptp;
+ int max_mtu;
+ u32 max_speed;
+ struct mlxsw_sp_hdroom *hdroom;
+ u64 module_overheat_initial_val;
+};
+
+struct mlxsw_sp_port_type_speed_ops {
+ void (*from_ptys_supported_port)(struct mlxsw_sp *mlxsw_sp,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd);
+ void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+ unsigned long *mode);
+ u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
+ void (*from_ptys_link_mode)(struct mlxsw_sp *mlxsw_sp,
+ bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd);
+ int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
+ u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd);
+ u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd);
+ void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u16 local_port, u32 proto_admin, bool autoneg);
+ void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper);
+ u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
+};
+
+struct mlxsw_sp_ports_bitmap {
+ unsigned long *bitmap;
+ unsigned int nbits;
+};
+
+static inline int
+mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ ports_bm->nbits = nbits;
+ ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!ports_bm->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ bitmap_free(ports_bm->bitmap);
+}
+
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+ bool *trap_en)
+{
+ bool set_ce = false;
+
+ *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ if (set_ce)
+ return INET_ECN_CE;
+ else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+ return INET_ECN_ECT_1;
+ else
+ return inner_ecn;
+}
+
+static inline struct net_device *
+mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev);
+}
+
+static inline int
+mlxsw_sp_vxlan_mapped_vid(const struct net_device *vxlan_dev, u16 *p_vid)
+{
+ struct bridge_vlan_info vinfo;
+ u16 vid = 0;
+ int err;
+
+ err = br_vlan_get_pvid(vxlan_dev, &vid);
+ if (err || !vid)
+ goto out;
+
+ err = br_vlan_get_info(vxlan_dev, vid, &vinfo);
+ if (err || !(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ vid = 0;
+
+out:
+ *p_vid = vid;
+ return err;
+}
+
+static inline bool
+mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
+}
+
+static inline struct mlxsw_sp_port *
+mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port;
+
+ local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
+ lag_id, port_index);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
+}
+
+static inline struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->vid == vid)
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
+}
+
+enum mlxsw_sp_flood_type {
+ MLXSW_SP_FLOOD_TYPE_UC,
+ MLXSW_SP_FLOOD_TYPE_BC,
+ MLXSW_SP_FLOOD_TYPE_MC,
+};
+
+int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl);
+int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_up);
+int
+mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_8021ad_tagged,
+ bool is_8021q_tagged);
+static inline bool
+mlxsw_sp_local_port_is_valid(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ return local_port < max_ports && local_port;
+}
+
+/* spectrum_buffers.c */
+struct mlxsw_sp_hdroom_prio {
+ /* Number of port buffer associated with this priority. This is the
+ * actually configured value.
+ */
+ u8 buf_idx;
+ /* Value of buf_idx deduced from the DCB ETS configuration. */
+ u8 ets_buf_idx;
+ /* Value of buf_idx taken from the dcbnl_setbuffer configuration. */
+ u8 set_buf_idx;
+ bool lossy;
+};
+
+struct mlxsw_sp_hdroom_buf {
+ u32 thres_cells;
+ u32 size_cells;
+ /* Size requirement form dcbnl_setbuffer. */
+ u32 set_size_cells;
+ bool lossy;
+};
+
+enum mlxsw_sp_hdroom_mode {
+ MLXSW_SP_HDROOM_MODE_DCB,
+ MLXSW_SP_HDROOM_MODE_TC,
+};
+
+#define MLXSW_SP_PB_COUNT 10
+
+struct mlxsw_sp_hdroom {
+ enum mlxsw_sp_hdroom_mode mode;
+
+ struct {
+ struct mlxsw_sp_hdroom_prio prio[IEEE_8021Q_MAX_PRIORITIES];
+ } prios;
+ struct {
+ struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT];
+ } bufs;
+ struct {
+ /* Size actually configured for the internal buffer. Equal to
+ * reserve when internal buffer is enabled.
+ */
+ u32 size_cells;
+ /* Space reserved in the headroom for the internal buffer. Port
+ * buffers are not allowed to grow into this space.
+ */
+ u32 reserve_cells;
+ bool enable;
+ } int_buf;
+ int delay_bytes;
+ int mtu;
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack);
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom);
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom);
+struct mlxsw_sp_sample_params *
+mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger);
+int
+mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger,
+ const struct mlxsw_sp_sample_params *params,
+ struct netlink_ext_ack *extack);
+void
+mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sample_trigger *trigger);
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6);
+
+extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
+extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
+
+extern const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops;
+
+/* spectrum_switchdev.c */
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding);
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev);
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev);
+extern struct notifier_block mlxsw_sp_switchdev_notifier;
+
+/* spectrum.c */
+void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
+ u16 local_port, void *priv);
+void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight);
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass);
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate, u8 burst_size);
+enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state);
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable);
+int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
+int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ethtype);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u16 ethtype);
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport);
+
+/* spectrum_dcb.c */
+#ifdef CONFIG_MLXSW_SPECTRUM_DCB
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+#else
+static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return 0;
+}
+static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{}
+#endif
+
+/* spectrum_router.c */
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
+};
+
+union mlxsw_sp_l3addr {
+ __be32 addr4;
+ struct in6_addr addr6;
+};
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev);
+int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack);
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev);
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip,
+ u32 tunnel_index);
+void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip);
+int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ u16 *vr_id);
+int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ u16 *ul_rif_index);
+void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index);
+
+/* spectrum_kvdl.c */
+enum mlxsw_sp_kvdl_entry_type {
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS,
+ MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
+};
+
+static inline unsigned int
+mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
+{
+ switch (type) {
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
+ default:
+ return 1;
+ }
+}
+
+struct mlxsw_sp_kvdl_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+ void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+ int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+ int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+
+/* spectrum1_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+
+/* spectrum2_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
+
+enum mlxsw_sp_acl_mangle_field {
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4,
+};
+
+struct mlxsw_sp_acl_rule_info {
+ unsigned int priority;
+ struct mlxsw_afk_element_values values;
+ struct mlxsw_afa_block *act_block;
+ u8 action_created:1,
+ ingress_bind_blocker:1,
+ egress_bind_blocker:1,
+ counter_valid:1,
+ policer_index_valid:1,
+ ipv6_valid:1;
+ unsigned int counter_index;
+ u16 policer_index;
+ struct {
+ u32 prev_val;
+ enum mlxsw_sp_acl_mangle_field prev_field;
+ } ipv6;
+};
+
+/* spectrum_flow.c */
+struct mlxsw_sp_flow_block {
+ struct list_head binding_list;
+ struct {
+ struct list_head list;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ } mall;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+ unsigned int ingress_blocker_rule_count;
+ unsigned int egress_blocker_rule_count;
+ unsigned int ingress_binding_count;
+ unsigned int egress_binding_count;
+ struct net *net;
+};
+
+struct mlxsw_sp_flow_block_binding {
+ struct list_head list;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ bool ingress;
+};
+
+static inline struct mlxsw_sp *
+mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+static inline unsigned int
+mlxsw_sp_flow_block_rule_count(const struct mlxsw_sp_flow_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_inc(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_dec(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+static inline bool
+mlxsw_sp_flow_block_disabled(const struct mlxsw_sp_flow_block *block)
+{
+ return block->disable_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_egress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->egress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_ingress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count && block->egress_binding_count;
+}
+
+struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
+int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress);
+
+/* spectrum_acl.c */
+struct mlxsw_sp_acl_ruleset;
+
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ MLXSW_SP_ACL_PROFILE_MR,
+};
+
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
+ enum mlxsw_sp_acl_profile profile);
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
+void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset);
+u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ struct mlxsw_afa_block *afa_block);
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
+ unsigned int priority);
+void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value);
+void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ const char *key_value,
+ const char *mask_value, unsigned int len);
+int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id);
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 prio, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ enum flow_action_mangle_base htype,
+ u32 offset, u32 mask, u32 val,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 index, u64 rate_bytes_ps,
+ u32 burst, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate,
+ struct netlink_ext_ack *extack);
+
+struct mlxsw_sp_acl_rule;
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie,
+ struct mlxsw_afa_block *afa_block,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule);
+void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_afa_block *afa_block);
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie);
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *drops,
+ u64 *last_use,
+ enum flow_action_hw_stats *used_hw_stats);
+
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
+
+static inline const struct flow_action_cookie *
+mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
+{
+ return mlxsw_afa_cookie_lookup(mlxsw_sp->afa, cookie_index);
+}
+
+int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
+u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
+
+struct mlxsw_sp_acl_mangle_action;
+
+struct mlxsw_sp_acl_rulei_ops {
+ int (*act_mangle_field)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_mangle_action *mact, u32 val,
+ struct netlink_ext_ack *extack);
+};
+
+extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops;
+extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops;
+
+/* spectrum_acl_tcam.c */
+struct mlxsw_sp_acl_tcam;
+struct mlxsw_sp_acl_tcam_region;
+
+struct mlxsw_sp_acl_tcam_ops {
+ enum mlxsw_reg_ptar_key_type key_type;
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ size_t region_priv_size;
+ int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *region,
+ void *hints_priv);
+ void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
+ int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region);
+ void * (*region_rehash_hints_get)(void *region_priv);
+ void (*region_rehash_hints_put)(void *hints_priv);
+ size_t chunk_priv_size;
+ void (*chunk_init)(void *region_priv, void *chunk_priv,
+ unsigned int priority);
+ void (*chunk_fini)(void *chunk_priv);
+ size_t entry_priv_size;
+ int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv);
+ int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity);
+};
+
+/* spectrum1_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
+
+/* spectrum2_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
+
+/* spectrum_acl_flex_actions.c */
+extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
+extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
+
+/* spectrum_acl_flex_keys.c */
+extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp4_afk_ops;
+
+/* spectrum_acl_bloom_filter.c */
+extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops;
+extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops;
+
+/* spectrum_matchall.c */
+struct mlxsw_sp_mall_ops {
+ int (*sample_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack);
+ void (*sample_del)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry);
+};
+
+extern const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops;
+extern const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops;
+
+enum mlxsw_sp_mall_action_type {
+ MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
+ MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
+ MLXSW_SP_MALL_ACTION_TYPE_TRAP,
+};
+
+struct mlxsw_sp_mall_mirror_entry {
+ const struct net_device *to_dev;
+ int span_id;
+};
+
+struct mlxsw_sp_mall_trap_entry {
+ int span_id;
+};
+
+struct mlxsw_sp_mall_sample_entry {
+ struct mlxsw_sp_sample_params params;
+ int span_id; /* Relevant for Spectrum-2 onwards. */
+};
+
+struct mlxsw_sp_mall_entry {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned int priority;
+ enum mlxsw_sp_mall_action_type type;
+ bool ingress;
+ union {
+ struct mlxsw_sp_mall_mirror_entry mirror;
+ struct mlxsw_sp_mall_trap_entry trap;
+ struct mlxsw_sp_mall_sample_entry sample;
+ };
+ struct rcu_head rcu;
+};
+
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
+
+/* spectrum_flower.c */
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f);
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f);
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f);
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f);
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f);
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
+
+/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p);
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p);
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p);
+int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
+
+/* spectrum_fid.c */
+bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index);
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
+int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_nve_type *p_type);
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
+ __be32 vni);
+int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
+int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index);
+void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
+bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
+ __be32 vni, int nve_ifindex);
+void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
+bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
+void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev);
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
+ bool member);
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type);
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
+ u16 vid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+
+extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
+extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+
+/* spectrum_mr.c */
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key;
+
+struct mlxsw_sp_mr_tcam_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(void *priv);
+ size_t route_priv_size;
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block);
+};
+
+/* spectrum1_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
+
+/* spectrum2_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+
+/* spectrum_nve.c */
+struct mlxsw_sp_nve_params {
+ enum mlxsw_sp_nve_type type;
+ __be32 vni;
+ const struct net_device *dev;
+ u16 ethertype;
+};
+
+extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
+extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
+
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6);
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6);
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index);
+int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
+
+/* spectrum_nve_vxlan.c */
+int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
+
+/* spectrum_trap.c */
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
+int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
+int
+mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+int
+mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst, struct netlink_ext_ack *extack);
+int
+mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
+int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
+ bool *p_enabled, u16 *p_hw_id);
+
+static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_net(mlxsw_sp->core);
+}
+
+/* spectrum_ethtool.c */
+extern const struct ethtool_ops mlxsw_sp_port_ethtool_ops;
+extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops;
+extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops;
+
+/* spectrum_policer.c */
+extern const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops;
+extern const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops;
+
+enum mlxsw_sp_policer_type {
+ MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+
+ __MLXSW_SP_POLICER_TYPE_MAX,
+ MLXSW_SP_POLICER_TYPE_MAX = __MLXSW_SP_POLICER_TYPE_MAX - 1,
+};
+
+struct mlxsw_sp_policer_params {
+ u64 rate;
+ u64 burst;
+ bool bytes;
+};
+
+int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_policer_type type,
+ const struct mlxsw_sp_policer_params *params,
+ struct netlink_ext_ack *extack, u16 *p_policer_index);
+void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_policer_type type,
+ u16 policer_index);
+int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_policer_type type,
+ u16 policer_index, u64 *p_drops);
+int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+
+/* spectrum_pgt.c */
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member);
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
new file mode 100644
index 000000000..3a636f753
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp1_acl_tcam_region {
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ } catchall;
+};
+
+struct mlxsw_sp1_acl_tcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp1_acl_tcam_entry {
+ struct mlxsw_sp_acl_ctcam_entry centry;
+};
+
+static int
+mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp1_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+}
+
+static int
+mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
+ &region->catchall.cchunk,
+ MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, NULL);
+ if (IS_ERR(rulei)) {
+ err = PTR_ERR(rulei);
+ goto err_rulei_create;
+ }
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry,
+ rulei, false);
+ if (err)
+ goto err_entry_add;
+ region->catchall.rulei = rulei;
+ return 0;
+
+err_entry_add:
+err_rulei_commit:
+err_rulei_act_continue:
+ mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry);
+ mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ void *hints_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ int err;
+
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+ _region,
+ &mlxsw_sp1_acl_ctcam_region_ops);
+ if (err)
+ return err;
+ err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
+ if (err)
+ goto err_catchall_add;
+ region->region = _region;
+ return 0;
+
+err_catchall_add:
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ priority);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry,
+ rulei, false);
+}
+
+static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ _region->tcam_region_info, offset, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+ unsigned int offset;
+
+ offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
+ return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
+ region->region,
+ offset, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
+ .priv_size = 0,
+ .init = mlxsw_sp1_acl_tcam_init,
+ .fini = mlxsw_sp1_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
+ .region_init = mlxsw_sp1_acl_tcam_region_init,
+ .region_fini = mlxsw_sp1_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp1_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
+ .entry_add = mlxsw_sp1_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_action_replace = mlxsw_sp1_acl_tcam_entry_action_replace,
+ .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
new file mode 100644
index 000000000..1e3fc9893
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP1_KVDL_SINGLE_BASE 0
+#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP1_KVDL_SINGLE_END \
+ (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
+
+#define MLXSW_SP1_KVDL_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
+#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP1_KVDL_CHUNKS_END \
+ (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
+
+struct mlxsw_sp1_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+ enum mlxsw_sp_resource_id resource_id;
+};
+
+enum mlxsw_sp1_kvdl_part_id {
+ MLXSW_SP1_KVDL_PART_ID_SINGLE,
+ MLXSW_SP1_KVDL_PART_ID_CHUNKS,
+ MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP1_KVDL_PART_INFO(id) \
+[MLXSW_SP1_KVDL_PART_ID_##id] = { \
+ .start_index = MLXSW_SP1_KVDL_##id##_BASE, \
+ .end_index = MLXSW_SP1_KVDL_##id##_END, \
+ .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
+ .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
+}
+
+static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
+ MLXSW_SP1_KVDL_PART_INFO(SINGLE),
+ MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
+ MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
+
+struct mlxsw_sp1_kvdl_part {
+ struct mlxsw_sp1_kvdl_part_info info;
+ unsigned long usage[]; /* Entries */
+};
+
+struct mlxsw_sp1_kvdl {
+ struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
+};
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (alloc_size <= part->info.alloc_size &&
+ (!min_part ||
+ part->info.alloc_size <= min_part->info.alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp1_kvdl_part *part;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (kvdl_index >= part->info.start_index &&
+ kvdl_index <= part->info.end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
+ u32 kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
+
+static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
+}
+
+static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp1_kvdl_part_free(part, entry_index);
+}
+
+static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info.alloc_size;
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
+ struct mlxsw_sp1_kvdl_part *part_prev,
+ unsigned int size)
+{
+ if (!part_prev) {
+ part->info.end_index = size - 1;
+ } else {
+ part->info.start_index = part_prev->info.end_index + 1;
+ part->info.end_index = part->info.start_index + size - 1;
+ }
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp1_kvdl_part_info *info,
+ struct mlxsw_sp1_kvdl_part *part_prev)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl_part *part;
+ bool need_update = true;
+ unsigned int nr_entries;
+ u64 resource_size;
+ int err;
+
+ err = devl_resource_size_get(devlink, info->resource_id,
+ &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
+ part = kzalloc(struct_size(part, usage, BITS_TO_LONGS(nr_entries)),
+ GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&part->info, info, sizeof(part->info));
+
+ if (need_update)
+ mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
+ return part;
+}
+
+static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_kvdl *kvdl)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info;
+ struct mlxsw_sp1_kvdl_part *part_prev = NULL;
+ int err, i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp1_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
+ part_prev);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ part_prev = kvdl->parts[i];
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (info->end_index -
+ info->start_index + 1) /
+ info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += info->alloc_size;
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
+
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ int err;
+
+ err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
+ if (err)
+ return err;
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
+ mlxsw_sp1_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_kvdl),
+ .init = mlxsw_sp1_kvdl_init,
+ .fini = mlxsw_sp1_kvdl_fini,
+ .alloc = mlxsw_sp1_kvdl_alloc,
+ .free = mlxsw_sp1_kvdl_free,
+ .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
+};
+
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ static struct devlink_resource_size_params size_params;
+ u32 kvdl_max_size;
+ int err;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
new file mode 100644
index 000000000..c8c675369
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp1_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp1_mr_tcam {
+ struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+};
+
+struct mlxsw_sp1_mr_tcam_route {
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+};
+
+static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static struct mlxsw_sp1_mr_tcam_region *
+mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+ int err;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
+ return 0;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ int err;
+
+ err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
+ key, prio);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+}
+
+#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp1_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp1_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
+}
+
+static void mlxsw_sp1_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
+ .init = mlxsw_sp1_mr_tcam_init,
+ .fini = mlxsw_sp1_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
+ .route_create = mlxsw_sp1_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp1_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
new file mode 100644
index 000000000..5b0210862
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_actions.h"
+
+struct mlxsw_sp2_acl_tcam {
+ struct mlxsw_sp_acl_atcam atcam;
+ u32 kvdl_index;
+ unsigned int kvdl_count;
+};
+
+struct mlxsw_sp2_acl_tcam_region {
+ struct mlxsw_sp_acl_atcam_region aregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp2_acl_tcam_chunk {
+ struct mlxsw_sp_acl_atcam_chunk achunk;
+};
+
+struct mlxsw_sp2_acl_tcam_entry {
+ struct mlxsw_sp_acl_atcam_entry aentry;
+ struct mlxsw_afa_block *act_block;
+};
+
+static int
+mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+
+ return 0;
+}
+
+static void
+mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp2_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *_tcam)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+ struct mlxsw_afa_block *afa_block;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ char pgcr_pl[MLXSW_REG_PGCR_LEN];
+ char *enc_actions;
+ int i;
+ int err;
+
+ /* Some TCAM regions are not exposed to the host and used internally
+ * by the device. Allocate KVDL entries for the default actions of
+ * these regions to avoid the host from overwriting them.
+ */
+ tcam->kvdl_count = _tcam->max_regions;
+ if (MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_DEFAULT_ACTIONS))
+ tcam->kvdl_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_DEFAULT_ACTIONS);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, &tcam->kvdl_index);
+ if (err)
+ return err;
+
+ /* Create flex action block, set default action (continue)
+ * but don't commit. We need just the current set encoding
+ * to be written using PEFA register to all indexes for all regions.
+ */
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block;
+ }
+ err = mlxsw_afa_block_continue(afa_block);
+ if (WARN_ON(err))
+ goto err_afa_block_continue;
+ enc_actions = mlxsw_afa_block_cur_set(afa_block);
+
+ /* Only write to KVDL entries used by TCAM regions exposed to the
+ * host.
+ */
+ for (i = 0; i < _tcam->max_regions; i++) {
+ mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
+ true, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ }
+ mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
+ if (err)
+ goto err_pgcr_write;
+
+ err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
+ if (err)
+ goto err_atcam_init;
+
+ mlxsw_afa_block_destroy(afa_block);
+ return 0;
+
+err_atcam_init:
+err_pgcr_write:
+err_pefa_write:
+err_afa_block_continue:
+ mlxsw_afa_block_destroy(afa_block);
+err_afa_block:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+
+ mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ void *hints_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
+
+ region->region = _region;
+
+ return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
+ &region->aregion,
+ _region, hints_priv,
+ &mlxsw_sp2_acl_ctcam_region_ops);
+}
+
+static void
+mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp_acl_atcam_region_fini(&region->aregion);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+}
+
+static void *mlxsw_sp2_acl_tcam_region_rehash_hints_get(void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ return mlxsw_sp_acl_atcam_rehash_hints_get(&region->aregion);
+}
+
+static void mlxsw_sp2_acl_tcam_region_rehash_hints_put(void *hints_priv)
+{
+ mlxsw_sp_acl_atcam_rehash_hints_put(hints_priv);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_init(&region->aregion, &chunk->achunk,
+ priority);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
+}
+
+static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, &region->aregion,
+ &chunk->achunk, &entry->aentry,
+ rulei);
+}
+
+static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, &region->aregion, &chunk->achunk,
+ &entry->aentry);
+}
+
+static int
+mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
+ &region->aregion,
+ &entry->aentry, rulei);
+}
+
+static int
+mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_afa_block_activity_get(entry->act_block, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
+ .priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
+ .init = mlxsw_sp2_acl_tcam_init,
+ .fini = mlxsw_sp2_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
+ .region_init = mlxsw_sp2_acl_tcam_region_init,
+ .region_fini = mlxsw_sp2_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp2_acl_tcam_region_associate,
+ .region_rehash_hints_get = mlxsw_sp2_acl_tcam_region_rehash_hints_get,
+ .region_rehash_hints_put = mlxsw_sp2_acl_tcam_region_rehash_hints_put,
+ .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
+ .entry_add = mlxsw_sp2_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_action_replace = mlxsw_sp2_acl_tcam_entry_action_replace,
+ .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
new file mode 100644
index 000000000..24ff305a2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "resources.h"
+
+struct mlxsw_sp2_kvdl_part_info {
+ u8 res_type;
+ /* For each defined partititon we need to know how many
+ * usage bits we need and how many indexes there are
+ * represented by a single bit. This could be got from FW
+ * querying appropriate resources. So have the resource
+ * ids for this purpose in partition definition.
+ */
+ enum mlxsw_res_id usage_bit_count_res_id;
+ enum mlxsw_res_id index_range_res_id;
+};
+
+#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \
+ _usage_bit_count_res_id, _index_range_res_id) \
+[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \
+ .res_type = _res_type, \
+ .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \
+ .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \
+}
+
+static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
+ MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE),
+ MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS,
+ MAX_KVD_ACTION_SETS),
+ MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
+};
+
+#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
+
+struct mlxsw_sp2_kvdl_part {
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ unsigned int usage_bit_count;
+ unsigned int indexes_per_usage_bit;
+ unsigned int last_allocated_bit;
+ unsigned long usage[]; /* Usage bits */
+};
+
+struct mlxsw_sp2_kvdl {
+ struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN];
+};
+
+static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int bit_count,
+ unsigned int *p_bit)
+{
+ unsigned int start_bit;
+ unsigned int bit;
+ unsigned int i;
+ bool wrap = false;
+
+ start_bit = part->last_allocated_bit + 1;
+ if (start_bit == part->usage_bit_count)
+ start_bit = 0;
+ bit = start_bit;
+again:
+ bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
+ if (!wrap && bit + bit_count >= part->usage_bit_count) {
+ wrap = true;
+ bit = 0;
+ goto again;
+ }
+ if (wrap && bit + bit_count >= start_bit)
+ return -ENOBUFS;
+ for (i = 0; i < bit_count; i++) {
+ if (test_bit(bit + i, part->usage)) {
+ bit += bit_count;
+ goto again;
+ }
+ }
+ *p_bit = bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size,
+ u32 *p_kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit);
+ if (err)
+ return err;
+ for (i = 0; i < bit_count; i++)
+ __set_bit(bit + i, part->usage);
+ *p_kvdl_index = bit * part->indexes_per_usage_bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type,
+ u16 size, u32 kvdl_index)
+{
+ char *iedr_pl;
+ int err;
+
+ iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL);
+ if (!iedr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_iedr_pack(iedr_pl);
+ mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl);
+ kfree(iedr_pl);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size, u32 kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ /* We need to ask FW to delete previously used KVD linear index */
+ err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type,
+ size, kvdl_index);
+ if (err)
+ return;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ bit = kvdl_index / part->indexes_per_usage_bit;
+ for (i = 0; i < bit_count; i++)
+ __clear_bit(bit + i, part->usage);
+}
+
+static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index);
+}
+
+static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ int entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index);
+}
+
+static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
+{
+ *p_alloc_count = entry_count;
+ return 0;
+}
+
+static struct mlxsw_sp2_kvdl_part *
+mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp2_kvdl_part_info *info)
+{
+ unsigned int indexes_per_usage_bit;
+ struct mlxsw_sp2_kvdl_part *part;
+ unsigned int index_range;
+ unsigned int usage_bit_count;
+ size_t usage_size;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core,
+ info->usage_bit_count_res_id) ||
+ !mlxsw_core_res_valid(mlxsw_sp->core,
+ info->index_range_res_id))
+ return ERR_PTR(-EIO);
+ usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
+ info->usage_bit_count_res_id);
+ index_range = mlxsw_core_res_get(mlxsw_sp->core,
+ info->index_range_res_id);
+
+ /* For some partitions, one usage bit represents a group of indexes.
+ * That's why we compute the number of indexes per usage bit here,
+ * according to queried resources.
+ */
+ indexes_per_usage_bit = index_range / usage_bit_count;
+
+ usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+ part->info = info;
+ part->usage_bit_count = usage_bit_count;
+ part->indexes_per_usage_bit = indexes_per_usage_bit;
+ part->last_allocated_bit = usage_bit_count - 1;
+ return part;
+}
+
+static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl *kvdl)
+{
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp2_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl);
+}
+
+static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ mlxsw_sp2_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_kvdl),
+ .init = mlxsw_sp2_kvdl_init,
+ .fini = mlxsw_sp2_kvdl_fini,
+ .alloc = mlxsw_sp2_kvdl_alloc,
+ .free = mlxsw_sp2_kvdl_free,
+ .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
new file mode 100644
index 000000000..b1178b7a7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "core_acl_flex_actions.h"
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp2_mr_tcam {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct mlxsw_sp_acl_ruleset *ruleset4;
+ struct mlxsw_sp_acl_ruleset *ruleset6;
+};
+
+struct mlxsw_sp2_mr_route {
+ struct mlxsw_sp2_mr_tcam *mr_tcam;
+};
+
+static struct mlxsw_sp_acl_ruleset *
+mlxsw_sp2_mr_tcam_proto_ruleset(struct mlxsw_sp2_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mr_tcam->ruleset4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mr_tcam->ruleset6;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_pemrbt_protocol protocol,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ char pemrbt_pl[MLXSW_REG_PEMRBT_LEN];
+ u16 group_id;
+
+ group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
+
+ mlxsw_reg_pemrbt_pack(pemrbt_pl, protocol, group_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pemrbt), pemrbt_pl);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+};
+
+static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ struct mlxsw_afk_element_usage elusage;
+ int err;
+
+ /* Initialize IPv4 ACL group. */
+ mlxsw_afk_element_usage_fill(&elusage,
+ mlxsw_sp2_mr_tcam_usage_ipv4,
+ ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
+ mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
+ mr_tcam->flow_block,
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_ACL_PROFILE_MR,
+ &elusage);
+
+ if (IS_ERR(mr_tcam->ruleset4))
+ return PTR_ERR(mr_tcam->ruleset4);
+
+ /* MC Router groups should be bound before routes are inserted. */
+ err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp,
+ MLXSW_REG_PEMRBT_PROTO_IPV4,
+ mr_tcam->ruleset4);
+ if (err)
+ goto err_bind_group;
+
+ return 0;
+
+err_bind_group:
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+};
+
+static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ struct mlxsw_afk_element_usage elusage;
+ int err;
+
+ /* Initialize IPv6 ACL group */
+ mlxsw_afk_element_usage_fill(&elusage,
+ mlxsw_sp2_mr_tcam_usage_ipv6,
+ ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
+ mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
+ mr_tcam->flow_block,
+ MLXSW_SP_L3_PROTO_IPV6,
+ MLXSW_SP_ACL_PROFILE_MR,
+ &elusage);
+
+ if (IS_ERR(mr_tcam->ruleset6))
+ return PTR_ERR(mr_tcam->ruleset6);
+
+ /* MC Router groups should be bound before routes are inserted. */
+ err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp,
+ MLXSW_REG_PEMRBT_PROTO_IPV6,
+ mr_tcam->ruleset6);
+ if (err)
+ goto err_bind_group;
+
+ return 0;
+
+err_bind_group:
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_ipv6_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->source.addr4,
+ (char *) &key->source_mask.addr4, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->group.addr4,
+ (char *) &key->group_mask.addr4, 4);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->source.addr6.s6_addr[0x0],
+ &key->source_mask.addr6.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->source.addr6.s6_addr[0x4],
+ &key->source_mask.addr6.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->source.addr6.s6_addr[0x8],
+ &key->source_mask.addr6.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->source.addr6.s6_addr[0xc],
+ &key->source_mask.addr6.s6_addr[0xc], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->group.addr6.s6_addr[0x0],
+ &key->group_mask.addr6.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->group.addr6.s6_addr[0x4],
+ &key->group_mask.addr6.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->group.addr6.s6_addr[0x8],
+ &key->group_mask.addr6.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->group.addr6.s6_addr[0xc],
+ &key->group_mask.addr6.s6_addr[0xc], 4);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_sp_mr_route_key *key,
+ unsigned int priority)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ rulei->priority = priority;
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ key->vrid, GENMASK(7, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ key->vrid >> 8, GENMASK(3, 0));
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp2_mr_tcam_rule_parse6(rulei, key);
+ }
+}
+
+static int
+mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp2_mr_route *mr_route = route_priv;
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ mr_route->mr_tcam = mr_tcam;
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset,
+ (unsigned long) route_priv, afa_block,
+ NULL);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ mlxsw_sp2_mr_tcam_rule_parse(rule, key, prio);
+ err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
+ if (err)
+ goto err_rule_add;
+
+ return 0;
+
+err_rule_add:
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+ return err;
+}
+
+static void
+mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset,
+ (unsigned long) route_priv);
+ if (WARN_ON(!rule))
+ return;
+
+ mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+}
+
+static int
+mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp2_mr_route *mr_route = route_priv;
+ struct mlxsw_sp2_mr_tcam *mr_tcam = mr_route->mr_tcam;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset,
+ (unsigned long) route_priv);
+ if (WARN_ON(!rule))
+ return -EINVAL;
+
+ return mlxsw_sp_acl_rule_action_replace(mlxsw_sp, rule, afa_block);
+}
+
+static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ int err;
+
+ mr_tcam->mlxsw_sp = mlxsw_sp;
+ mr_tcam->flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, NULL);
+ if (!mr_tcam->flow_block)
+ return -ENOMEM;
+
+ err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
+ if (err)
+ goto err_ipv4_init;
+
+ err = mlxsw_sp2_mr_tcam_ipv6_init(mr_tcam);
+ if (err)
+ goto err_ipv6_init;
+
+ return 0;
+
+err_ipv6_init:
+ mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
+err_ipv4_init:
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
+ mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_mr_tcam),
+ .init = mlxsw_sp2_mr_tcam_init,
+ .fini = mlxsw_sp2_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp2_mr_route),
+ .route_create = mlxsw_sp2_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp2_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
new file mode 100644
index 000000000..6c5af0185
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -0,0 +1,1124 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <net/net_namespace.h>
+#include <net/tc_act/tc_vlan.h>
+
+#include "reg.h"
+#include "core.h"
+#include "resources.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp_acl {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_afk *afk;
+ struct mlxsw_sp_fid *dummy_fid;
+ struct rhashtable ruleset_ht;
+ struct list_head rules;
+ struct mutex rules_lock; /* Protects rules list */
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
+ } rule_activity_update;
+ struct mlxsw_sp_acl_tcam tcam;
+};
+
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
+{
+ return acl->afk;
+}
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct mlxsw_sp_flow_block *block;
+ u32 chain_index;
+ const struct mlxsw_sp_acl_profile_ops *ops;
+};
+
+struct mlxsw_sp_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+ struct rhashtable rule_ht;
+ unsigned int ref_count;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_rule {
+ struct rhash_head ht_node; /* Member of rule HT */
+ struct list_head list;
+ unsigned long cookie; /* HT key */
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ u64 last_used;
+ u64 last_packets;
+ u64 last_bytes;
+ u64 last_drops;
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
+ .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
+ .automatic_shrinking = true,
+};
+
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp->acl->dummy_fid;
+}
+
+static bool
+mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ /* We hold a reference on ruleset ourselves */
+ return ruleset->ref_count == 2;
+}
+
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+}
+
+static struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
+ const struct mlxsw_sp_acl_profile_ops *ops,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
+ ruleset = kzalloc(alloc_size, GFP_KERNEL);
+ if (!ruleset)
+ return ERR_PTR(-ENOMEM);
+ ruleset->ref_count = 1;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
+ ruleset->ht_key.ops = ops;
+
+ err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
+ tmplt_elusage, &ruleset->min_prio,
+ &ruleset->max_prio);
+ if (err)
+ goto err_ops_ruleset_add;
+
+ err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ mlxsw_sp_acl_ruleset_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return ruleset;
+
+err_ht_insert:
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ mlxsw_sp_acl_ruleset_ht_params);
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
+}
+
+static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ ruleset->ref_count++;
+}
+
+static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ if (--ruleset->ref_count)
+ return;
+ mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
+}
+
+static struct mlxsw_sp_acl_ruleset *
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
+ const struct mlxsw_sp_acl_profile_ops *ops)
+{
+ struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+
+ memset(&ht_key, 0, sizeof(ht_key));
+ ht_key.block = block;
+ ht_key.chain_index = chain_index;
+ ht_key.ops = ops;
+ return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+ mlxsw_sp_acl_ruleset_ht_params);
+}
+
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
+ enum mlxsw_sp_acl_profile profile)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops;
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
+ if (!ruleset)
+ return ERR_PTR(-ENOENT);
+ return ruleset;
+}
+
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops;
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
+ if (ruleset) {
+ mlxsw_sp_acl_ruleset_ref_inc(ruleset);
+ return ruleset;
+ }
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
+ tmplt_elusage);
+}
+
+void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+}
+
+u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_group_id(ruleset->priv);
+}
+
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ *p_min_prio = ruleset->min_prio;
+ *p_max_prio = ruleset->max_prio;
+}
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
+ if (!rulei)
+ return ERR_PTR(-ENOMEM);
+
+ if (afa_block) {
+ rulei->act_block = afa_block;
+ return rulei;
+ }
+
+ rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
+ if (IS_ERR(rulei->act_block)) {
+ err = PTR_ERR(rulei->act_block);
+ goto err_afa_block_create;
+ }
+ rulei->action_created = 1;
+ return rulei;
+
+err_afa_block_create:
+ kfree(rulei);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ if (rulei->action_created)
+ mlxsw_afa_block_destroy(rulei->act_block);
+ kfree(rulei);
+}
+
+int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_commit(rulei->act_block);
+}
+
+void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
+ unsigned int priority)
+{
+ rulei->priority = priority;
+}
+
+void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ u32 key_value, u32 mask_value)
+{
+ mlxsw_afk_values_add_u32(&rulei->values, element,
+ key_value, mask_value);
+}
+
+void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_afk_element element,
+ const char *key_value,
+ const char *mask_value, unsigned int len)
+{
+ mlxsw_afk_values_add_buf(&rulei->values, element,
+ key_value, mask_value, len);
+}
+
+int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_continue(rulei->act_block);
+}
+
+int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id)
+{
+ return mlxsw_afa_block_jump(rulei->act_block, group_id);
+}
+
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_terminate(rulei->act_block);
+}
+
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
+ fa_cookie, extack);
+}
+
+int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_trap(rulei->act_block,
+ MLXSW_TRAP_ID_ACL0);
+}
+
+int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port;
+ bool in_port;
+
+ if (out_dev) {
+ if (!mlxsw_sp_port_dev_check(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
+ return -EINVAL;
+ }
+ mlxsw_sp_port = netdev_priv(out_dev);
+ if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
+ return -EINVAL;
+ }
+ local_port = mlxsw_sp_port->local_port;
+ in_port = false;
+ } else {
+ /* If out_dev is NULL, the caller wants to
+ * set forward to ingress port.
+ */
+ local_port = 0;
+ in_port = true;
+ }
+ return mlxsw_afa_block_append_fwd(rulei->act_block,
+ local_port, in_port, extack);
+}
+
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_port *in_port;
+
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
+ return -EOPNOTSUPP;
+ }
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_flow_block_binding, list);
+ in_port = binding->mlxsw_sp_port;
+
+ return mlxsw_afa_block_append_mirror(rulei->act_block,
+ in_port->local_port,
+ out_dev,
+ binding->ingress,
+ extack);
+}
+
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack)
+{
+ u8 ethertype;
+
+ if (action == FLOW_ACTION_VLAN_MANGLE) {
+ switch (proto) {
+ case ETH_P_8021Q:
+ ethertype = 0;
+ break;
+ case ETH_P_8021AD:
+ ethertype = 1;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
+ proto);
+ return -EINVAL;
+ }
+
+ return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
+ vid, prio, ethertype,
+ extack);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
+ return -EINVAL;
+ }
+}
+
+int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ /* Even though both Linux and Spectrum switches support 16 priorities,
+ * spectrum_qdisc only processes the first eight priomap elements, and
+ * the DCB and PFC features are tied to 8 priorities as well. Therefore
+ * bounce attempts to prioritize packets to higher priorities.
+ */
+ if (prio >= IEEE_8021QAZ_MAX_TCS) {
+ NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
+ return -EINVAL;
+ }
+ return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
+ extack);
+}
+
+struct mlxsw_sp_acl_mangle_action {
+ enum flow_action_mangle_base htype;
+ /* Offset is u32-aligned. */
+ u32 offset;
+ /* Mask bits are unset for the modified field. */
+ u32 mask;
+ /* Shift required to extract the set value. */
+ u32 shift;
+ enum mlxsw_sp_acl_mangle_field field;
+};
+
+#define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
+ { \
+ .htype = _htype, \
+ .offset = _offset, \
+ .mask = _mask, \
+ .shift = _shift, \
+ .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
+ }
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
+ _offset, _mask, _shift, _field)
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
+ _offset, _mask, _shift, _field)
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
+
+static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
+ MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0, IP_DPORT),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
+ MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
+};
+
+static int
+mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_mangle_action *mact,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ switch (mact->field) {
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
+ return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
+ return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
+ return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
+ val, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_mangle_action *mact,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
+ if (err != -EOPNOTSUPP)
+ return err;
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return err;
+}
+
+static int
+mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_sp_acl_mangle_field field,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ if (!rulei->ipv6_valid) {
+ rulei->ipv6.prev_val = val;
+ rulei->ipv6_valid = true;
+ rulei->ipv6.prev_field = field;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
+ return -EOPNOTSUPP;
+}
+
+static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_mangle_action *mact,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
+ if (err != -EOPNOTSUPP)
+ return err;
+
+ switch (mact->field) {
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
+ return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
+ return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
+ /* IPv4 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, false,
+ true, val, 0, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, true,
+ true, val, 0, extack);
+ /* IPv6 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
+ return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
+ mact->field,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ default:
+ break;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return err;
+}
+
+int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ enum flow_action_mangle_base htype,
+ u32 offset, u32 mask, u32 val,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
+ struct mlxsw_sp_acl_mangle_action *mact;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
+ mact = &mlxsw_sp_acl_mangle_actions[i];
+ if (mact->htype == htype &&
+ mact->offset == offset &&
+ mact->mask == mask) {
+ val >>= mact->shift;
+ return acl_rulei_ops->act_mangle_field(mlxsw_sp,
+ rulei, mact,
+ val, extack);
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
+ return -EINVAL;
+}
+
+int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 index, u64 rate_bytes_ps,
+ u32 burst, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_afa_block_append_police(rulei->act_block, index,
+ rate_bytes_ps, burst,
+ &rulei->policer_index, extack);
+ if (err)
+ return err;
+
+ rulei->policer_index_valid = true;
+
+ return 0;
+}
+
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_afa_block_append_counter(rulei->act_block,
+ &rulei->counter_index, extack);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
+}
+
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid, struct netlink_ext_ack *extack)
+{
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
+}
+
+int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_flow_block *block,
+ struct psample_group *psample_group, u32 rate,
+ u32 trunc_size, bool truncate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
+ return -EOPNOTSUPP;
+ }
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_flow_block_binding, list);
+ mlxsw_sp_port = binding->mlxsw_sp_port;
+
+ return mlxsw_afa_block_append_sampler(rulei->act_block,
+ mlxsw_sp_port->local_port,
+ psample_group, rate, trunc_size,
+ truncate, binding->ingress,
+ extack);
+}
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie,
+ struct mlxsw_afa_block *afa_block,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ mlxsw_sp_acl_ruleset_ref_inc(ruleset);
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
+ GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ rule->cookie = cookie;
+ rule->ruleset = ruleset;
+
+ rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
+ if (IS_ERR(rule->rulei)) {
+ err = PTR_ERR(rule->rulei);
+ goto err_rulei_create;
+ }
+
+ return rule;
+
+err_rulei_create:
+ kfree(rule);
+err_alloc:
+ mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+
+ mlxsw_sp_acl_rulei_destroy(rule->rulei);
+ kfree(rule);
+ mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+}
+
+int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
+ int err;
+
+ err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ if (!ruleset->ht_key.chain_index &&
+ mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
+ /* We only need ruleset with chain index 0, the implicit
+ * one, to be directly bound to device. The rest of the
+ * rulesets are bound by "Goto action set".
+ */
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
+ if (err)
+ goto err_ruleset_block_bind;
+ }
+
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
+ list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
+ block->rule_count++;
+ block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
+ block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
+ return 0;
+
+err_ruleset_block_bind:
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
+err_rhashtable_insert:
+ ops->rule_del(mlxsw_sp, rule->priv);
+ return err;
+}
+
+void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
+
+ block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
+ block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
+ block->rule_count--;
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
+ list_del(&rule->list);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
+ if (!ruleset->ht_key.chain_index &&
+ mlxsw_sp_acl_ruleset_is_singular(ruleset))
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
+ ops->rule_del(mlxsw_sp, rule->priv);
+}
+
+int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_rule_info *rulei;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ rulei->act_block = afa_block;
+
+ return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
+}
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+ mlxsw_sp_acl_rule_ht_params);
+}
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
+{
+ return rule->rulei;
+}
+
+static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ bool active;
+ int err;
+
+ err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
+ if (err)
+ return err;
+ if (active)
+ rule->last_used = jiffies;
+ return 0;
+}
+
+static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
+{
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ mutex_lock(&acl->rules_lock);
+ list_for_each_entry(rule, &acl->rules, list) {
+ err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
+ rule);
+ if (err)
+ goto err_rule_update;
+ }
+ mutex_unlock(&acl->rules_lock);
+ return 0;
+
+err_rule_update:
+ mutex_unlock(&acl->rules_lock);
+ return err;
+}
+
+static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
+{
+ unsigned long interval = acl->rule_activity_update.interval;
+
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
+ rule_activity_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_acl_rules_activity_update(acl);
+ if (err)
+ dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
+
+ mlxsw_sp_acl_rule_activity_work_schedule(acl);
+}
+
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *drops,
+ u64 *last_use,
+ enum flow_action_hw_stats *used_hw_stats)
+
+{
+ enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ u64 current_packets = 0;
+ u64 current_bytes = 0;
+ u64 current_drops = 0;
+ int err;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ if (rulei->counter_valid) {
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets,
+ &current_bytes);
+ if (err)
+ return err;
+ *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
+ }
+ if (rulei->policer_index_valid) {
+ err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
+ rulei->policer_index,
+ &current_drops);
+ if (err)
+ return err;
+ }
+ *packets = current_packets - rule->last_packets;
+ *bytes = current_bytes - rule->last_bytes;
+ *drops = current_drops - rule->last_drops;
+ *last_use = rule->last_used;
+
+ rule->last_bytes = current_bytes;
+ rule->last_packets = current_packets;
+ rule->last_drops = current_drops;
+
+ return 0;
+}
+
+int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_fid *fid;
+ struct mlxsw_sp_acl *acl;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
+ acl = kzalloc(alloc_size, GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+ mlxsw_sp->acl = acl;
+ acl->mlxsw_sp = mlxsw_sp;
+ acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_FLEX_KEYS),
+ mlxsw_sp->afk_ops);
+ if (!acl->afk) {
+ err = -ENOMEM;
+ goto err_afk_create;
+ }
+
+ err = rhashtable_init(&acl->ruleset_ht,
+ &mlxsw_sp_acl_ruleset_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
+ }
+ acl->dummy_fid = fid;
+
+ INIT_LIST_HEAD(&acl->rules);
+ mutex_init(&acl->rules_lock);
+ err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
+ if (err)
+ goto err_acl_ops_init;
+
+ /* Create the delayed work for the rule activity_update */
+ INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
+ mlxsw_sp_acl_rule_activity_update_work);
+ acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
+ return 0;
+
+err_acl_ops_init:
+ mutex_destroy(&acl->rules_lock);
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
+ rhashtable_destroy(&acl->ruleset_ht);
+err_rhashtable_init:
+ mlxsw_afk_destroy(acl->afk);
+err_afk_create:
+ kfree(acl);
+ return err;
+}
+
+void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
+ mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+ mutex_destroy(&acl->rules_lock);
+ WARN_ON(!list_empty(&acl->rules));
+ mlxsw_sp_fid_put(acl->dummy_fid);
+ rhashtable_destroy(&acl->ruleset_ht);
+ mlxsw_afk_destroy(acl->afk);
+ kfree(acl);
+}
+
+u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
+ &acl->tcam);
+}
+
+int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
+ &acl->tcam, val);
+}
+
+struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
+ .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
+};
+
+struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
+ .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
new file mode 100644
index 000000000..4b713832f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mlxsw.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_keys.h"
+
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5
+
+struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_lkey_id {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key;
+ refcount_t refcnt;
+ u32 id;
+};
+
+struct mlxsw_sp_acl_atcam_region_ops {
+ int (*init)(struct mlxsw_sp_acl_atcam_region *aregion);
+ void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
+ struct mlxsw_sp_acl_atcam_lkey_id *
+ (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
+ char *enc_key, u8 erp_id);
+ void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
+};
+
+struct mlxsw_sp_acl_atcam_region_generic {
+ struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id;
+};
+
+struct mlxsw_sp_acl_atcam_region_12kb {
+ struct rhashtable lkey_ht;
+ unsigned int max_lkey_id;
+ unsigned long *used_lkey_id;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node),
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node),
+};
+
+static bool
+mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL);
+ if (!region_generic)
+ return -ENOMEM;
+
+ refcount_set(&region_generic->dummy_lkey_id.refcnt, 1);
+ aregion->priv = region_generic;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ kfree(aregion->priv);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ char *enc_key, u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = aregion->priv;
+ return &region_generic->dummy_lkey_id;
+}
+
+static void
+mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_generic_ops = {
+ .init = mlxsw_sp_acl_atcam_region_generic_init,
+ .fini = mlxsw_sp_acl_atcam_region_generic_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put,
+};
+
+static int
+mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
+ u64 max_lkey_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID))
+ return -EIO;
+
+ max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID);
+ region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL);
+ if (!region_12kb)
+ return -ENOMEM;
+
+ region_12kb->used_lkey_id = bitmap_zalloc(max_lkey_id, GFP_KERNEL);
+ if (!region_12kb->used_lkey_id) {
+ err = -ENOMEM;
+ goto err_used_lkey_id_alloc;
+ }
+
+ err = rhashtable_init(&region_12kb->lkey_ht,
+ &mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ region_12kb->max_lkey_id = max_lkey_id;
+ aregion->priv = region_12kb;
+
+ return 0;
+
+err_rhashtable_init:
+ bitmap_free(region_12kb->used_lkey_id);
+err_used_lkey_id_alloc:
+ kfree(region_12kb);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+
+ rhashtable_destroy(&region_12kb->lkey_ht);
+ bitmap_free(region_12kb->used_lkey_id);
+ kfree(region_12kb);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ u32 id;
+ int err;
+
+ id = find_first_zero_bit(region_12kb->used_lkey_id,
+ region_12kb->max_lkey_id);
+ if (id < region_12kb->max_lkey_id)
+ __set_bit(id, region_12kb->used_lkey_id);
+ else
+ return ERR_PTR(-ENOBUFS);
+
+ lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL);
+ if (!lkey_id) {
+ err = -ENOMEM;
+ goto err_lkey_id_alloc;
+ }
+
+ lkey_id->id = id;
+ memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key));
+ refcount_set(&lkey_id->refcnt, 1);
+
+ err = rhashtable_insert_fast(&region_12kb->lkey_ht,
+ &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return lkey_id;
+
+err_rhashtable_insert:
+ kfree(lkey_id);
+err_lkey_id_alloc:
+ __clear_bit(id, region_12kb->used_lkey_id);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ u32 id = lkey_id->id;
+
+ rhashtable_remove_fast(&region_12kb->lkey_ht, &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ kfree(lkey_id);
+ __clear_bit(id, region_12kb->used_lkey_id);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ char *enc_key, u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } };
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+
+ memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
+ mlxsw_afk_clear(afk, ht_key.enc_key,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
+ ht_key.erp_id = erp_id;
+ lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (lkey_id) {
+ refcount_inc(&lkey_id->refcnt);
+ return lkey_id;
+ }
+
+ return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key);
+}
+
+static void
+mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ if (refcount_dec_and_test(&lkey_id->refcnt))
+ mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id);
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_12kb_ops = {
+ .init = mlxsw_sp_acl_atcam_region_12kb_init,
+ .fini = mlxsw_sp_acl_atcam_region_12kb_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put,
+};
+
+static const struct mlxsw_sp_acl_atcam_region_ops *
+mlxsw_sp_acl_atcam_region_ops_arr[] = {
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] =
+ &mlxsw_sp_acl_atcam_region_12kb_ops,
+};
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char perar_pl[MLXSW_REG_PERAR_LEN];
+ /* For now, just assume that every region has 12 key blocks */
+ u16 hw_region = region_id * 3;
+ u64 max_regions;
+
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+ if (hw_region >= max_regions)
+ return -ENOBUFS;
+
+ mlxsw_reg_perar_pack(perar_pl, region_id, hw_region);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
+}
+
+static void
+mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ enum mlxsw_sp_acl_atcam_region_type region_type;
+ unsigned int blocks_count;
+
+ /* We already know the blocks count can not exceed the maximum
+ * blocks count.
+ */
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ if (blocks_count <= 2)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB;
+ else if (blocks_count <= 4)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB;
+ else if (blocks_count <= 8)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB;
+ else
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB;
+
+ aregion->type = region_type;
+ aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type];
+}
+
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ void *hints_priv,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ int err;
+
+ aregion->region = region;
+ aregion->atcam = atcam;
+ mlxsw_sp_acl_atcam_region_type_init(aregion);
+ INIT_LIST_HEAD(&aregion->entries_list);
+
+ err = rhashtable_init(&aregion->entries_ht,
+ &mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ return err;
+ err = aregion->ops->init(aregion);
+ if (err)
+ goto err_ops_init;
+ err = mlxsw_sp_acl_erp_region_init(aregion, hints_priv);
+ if (err)
+ goto err_erp_region_init;
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
+ region, ops);
+ if (err)
+ goto err_ctcam_region_init;
+
+ return 0;
+
+err_ctcam_region_init:
+ mlxsw_sp_acl_erp_region_fini(aregion);
+err_erp_region_init:
+ aregion->ops->fini(aregion);
+err_ops_init:
+ rhashtable_destroy(&aregion->entries_ht);
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion);
+ mlxsw_sp_acl_erp_region_fini(aregion);
+ aregion->ops->fini(aregion);
+ rhashtable_destroy(&aregion->entries_ht);
+ WARN_ON(!list_empty(&aregion->entries_list));
+}
+
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority)
+{
+ mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk,
+ priority);
+}
+
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk)
+{
+ mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
+ if (IS_ERR(lkey_id))
+ return PTR_ERR(lkey_id);
+ aentry->lkey_id = lkey_id;
+
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ priority, region->tcam_region_info,
+ aentry->enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ if (err)
+ goto err_ptce3_write;
+
+ return 0;
+
+err_ptce3_write:
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+
+ mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ region->tcam_region_info,
+ aentry->enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1,
+ lkey_id->id, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
+ priority, region->tcam_region_info,
+ aentry->enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ const struct mlxsw_sp_acl_erp_delta *delta;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
+ int err;
+
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+ aentry->ht_key.full_enc_key, mask);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+ memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
+ sizeof(aentry->enc_key));
+
+ /* Compute all needed delta information and clear the delta bits
+ * from the encrypted key.
+ */
+ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
+ aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ aentry->delta_info.value =
+ mlxsw_sp_acl_erp_delta_value(delta,
+ aentry->ht_key.full_enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
+
+ /* Add rule to the list of A-TCAM rules, assuming this
+ * rule is intended to A-TCAM. In case this rule does
+ * not fit into A-TCAM it will be removed from the list.
+ */
+ list_add(&aentry->list, &aregion->entries_list);
+
+ /* We can't insert identical rules into the A-TCAM, so fail and
+ * let the rule spill into C-TCAM
+ */
+ err = rhashtable_lookup_insert_fast(&aregion->entries_ht,
+ &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Bloom filter must be updated here, before inserting the rule into
+ * the A-TCAM.
+ */
+ err = mlxsw_sp_acl_erp_bf_insert(mlxsw_sp, aregion, erp_mask, aentry);
+ if (err)
+ goto err_bf_insert;
+
+ err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
+ rulei);
+ if (err)
+ goto err_rule_insert;
+
+ return 0;
+
+err_rule_insert:
+ mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, erp_mask, aentry);
+err_bf_insert:
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+err_rhashtable_insert:
+ list_del(&aentry->list);
+ mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
+ return err;
+}
+
+static void
+__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, aentry->erp_mask, aentry);
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ list_del(&aentry->list);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_sp_acl_atcam_region_entry_action_replace(mlxsw_sp, aregion,
+ aentry, rulei);
+}
+
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei);
+ if (!err)
+ return 0;
+
+ /* It is possible we failed to add the rule to the A-TCAM due to
+ * exceeded number of masks. Try to spill into C-TCAM.
+ */
+ trace_mlxsw_sp_acl_atcam_entry_add_ctcam_spill(mlxsw_sp, aregion);
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry,
+ rulei, true);
+ if (!err)
+ return 0;
+
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry);
+ else
+ __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
+}
+
+int
+mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp,
+ &aregion->cregion,
+ &aentry->centry,
+ rulei);
+ else
+ err = __mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
+ aregion, aentry,
+ rulei);
+
+ return err;
+}
+
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam);
+}
+
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
+}
+
+void *
+mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ return mlxsw_sp_acl_erp_rehash_hints_get(aregion);
+}
+
+void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv)
+{
+ mlxsw_sp_acl_erp_rehash_hints_put(hints_priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
new file mode 100644
index 000000000..95f63fcf4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/refcount.h>
+#include <linux/mutex.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp_acl_bf {
+ struct mutex lock; /* Protects Bloom Filter updates. */
+ unsigned int bank_size;
+ refcount_t refcnt[];
+};
+
+/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
+ * blocks, eRP ID and region ID. In Spectrum-2 and above, region key is combined
+ * of up to 12 key blocks, so there can be up to 3 chunks in the Bloom filter
+ * key, depending on the actual number of key blocks used in the region.
+ * The layout of the Bloom filter key is as follows:
+ *
+ * +-------------------------+------------------------+------------------------+
+ * | Chunk 2 Key blocks 11-8 | Chunk 1 Key blocks 7-4 | Chunk 0 Key blocks 3-0 |
+ * +-------------------------+------------------------+------------------------+
+ */
+#define MLXSW_BLOOM_KEY_CHUNKS 3
+
+/* Spectrum-2 and Spectrum-3 chunks */
+#define MLXSW_SP2_BLOOM_KEY_LEN 69
+
+/* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is
+ * 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero
+ * padding.
+ * The layout of each chunk is as follows:
+ *
+ * +---------+----------------------+-----------------------------------+
+ * | 3 bytes | 2 bytes | 18 bytes |
+ * +---------+-----------+----------+-----------------------------------+
+ * | 183:158 | 157:148 | 147:144 | 143:0 |
+ * +---------+-----------+----------+-----------------------------------+
+ * | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) |
+ * +---------+-----------+----------+-----------------------------------+
+ */
+#define MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES 3
+#define MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES 23
+
+/* The offset of the key block within a chunk is 5 bytes as it comes after
+ * 3 bytes of zero padding and 16 bits of region ID and eRP ID.
+ */
+#define MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET 5
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-{2-3}. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
+ * the implicit x^16).
+ */
+static const u16 mlxsw_sp2_acl_bf_crc16_tab[256] = {
+0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d,
+0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a,
+0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a,
+0x5290, 0xd7b9, 0xddeb, 0x58c2, 0xc94f, 0x4c66, 0x4634, 0xc31d,
+0x4527, 0xc00e, 0xca5c, 0x4f75, 0xdef8, 0x5bd1, 0x5183, 0xd4aa,
+0xf7b0, 0x7299, 0x78cb, 0xfde2, 0x6c6f, 0xe946, 0xe314, 0x663d,
+0xa520, 0x2009, 0x2a5b, 0xaf72, 0x3eff, 0xbbd6, 0xb184, 0x34ad,
+0x17b7, 0x929e, 0x98cc, 0x1de5, 0x8c68, 0x0941, 0x0313, 0x863a,
+0x8a4e, 0x0f67, 0x0535, 0x801c, 0x1191, 0x94b8, 0x9eea, 0x1bc3,
+0x38d9, 0xbdf0, 0xb7a2, 0x328b, 0xa306, 0x262f, 0x2c7d, 0xa954,
+0x6a49, 0xef60, 0xe532, 0x601b, 0xf196, 0x74bf, 0x7eed, 0xfbc4,
+0xd8de, 0x5df7, 0x57a5, 0xd28c, 0x4301, 0xc628, 0xcc7a, 0x4953,
+0xcf69, 0x4a40, 0x4012, 0xc53b, 0x54b6, 0xd19f, 0xdbcd, 0x5ee4,
+0x7dfe, 0xf8d7, 0xf285, 0x77ac, 0xe621, 0x6308, 0x695a, 0xec73,
+0x2f6e, 0xaa47, 0xa015, 0x253c, 0xb4b1, 0x3198, 0x3bca, 0xbee3,
+0x9df9, 0x18d0, 0x1282, 0x97ab, 0x0626, 0x830f, 0x895d, 0x0c74,
+0x91b5, 0x149c, 0x1ece, 0x9be7, 0x0a6a, 0x8f43, 0x8511, 0x0038,
+0x2322, 0xa60b, 0xac59, 0x2970, 0xb8fd, 0x3dd4, 0x3786, 0xb2af,
+0x71b2, 0xf49b, 0xfec9, 0x7be0, 0xea6d, 0x6f44, 0x6516, 0xe03f,
+0xc325, 0x460c, 0x4c5e, 0xc977, 0x58fa, 0xddd3, 0xd781, 0x52a8,
+0xd492, 0x51bb, 0x5be9, 0xdec0, 0x4f4d, 0xca64, 0xc036, 0x451f,
+0x6605, 0xe32c, 0xe97e, 0x6c57, 0xfdda, 0x78f3, 0x72a1, 0xf788,
+0x3495, 0xb1bc, 0xbbee, 0x3ec7, 0xaf4a, 0x2a63, 0x2031, 0xa518,
+0x8602, 0x032b, 0x0979, 0x8c50, 0x1ddd, 0x98f4, 0x92a6, 0x178f,
+0x1bfb, 0x9ed2, 0x9480, 0x11a9, 0x8024, 0x050d, 0x0f5f, 0x8a76,
+0xa96c, 0x2c45, 0x2617, 0xa33e, 0x32b3, 0xb79a, 0xbdc8, 0x38e1,
+0xfbfc, 0x7ed5, 0x7487, 0xf1ae, 0x6023, 0xe50a, 0xef58, 0x6a71,
+0x496b, 0xcc42, 0xc610, 0x4339, 0xd2b4, 0x579d, 0x5dcf, 0xd8e6,
+0x5edc, 0xdbf5, 0xd1a7, 0x548e, 0xc503, 0x402a, 0x4a78, 0xcf51,
+0xec4b, 0x6962, 0x6330, 0xe619, 0x7794, 0xf2bd, 0xf8ef, 0x7dc6,
+0xbedb, 0x3bf2, 0x31a0, 0xb489, 0x2504, 0xa02d, 0xaa7f, 0x2f56,
+0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1,
+};
+
+/* Spectrum-4 chunks */
+#define MLXSW_SP4_BLOOM_KEY_LEN 60
+
+/* In Spectrum-4, there is no padding. Each chunk size is 20 bytes.
+ * 18 bytes of it contain 4 key blocks, each is 36 bits, and 2 bytes which hold
+ * eRP ID and region ID.
+ * The layout of each chunk is as follows:
+ *
+ * +----------------------+-----------------------------------+
+ * | 2 bytes | 18 bytes |
+ * +-----------+----------+-----------------------------------+
+ * | 157:148 | 147:144 | 143:0 |
+ * +---------+-----------+----------+-------------------------+
+ * | region ID | eRP ID | 4 Key blocks (18 Bytes) |
+ * +-----------+----------+-----------------------------------+
+ */
+
+#define MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES 0
+#define MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES 20
+
+/* The offset of the key block within a chunk is 2 bytes as it comes after
+ * 16 bits of region ID and eRP ID.
+ */
+#define MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET 2
+
+/* For Spectrum-4, two hash functions are used, CRC-10 and CRC-6 based.
+ * The result is combination of the two calculations -
+ * 6 bit column are MSB (result of CRC-6),
+ * 10 bit row are LSB (result of CRC-10).
+ */
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-4. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x1b (1 + x^1 + x^3 + x^4 and the implicit x^10).
+ */
+static const u16 mlxsw_sp4_acl_bf_crc10_tab[256] = {
+0x0000, 0x001b, 0x0036, 0x002d, 0x006c, 0x0077, 0x005a, 0x0041,
+0x00d8, 0x00c3, 0x00ee, 0x00f5, 0x00b4, 0x00af, 0x0082, 0x0099,
+0x01b0, 0x01ab, 0x0186, 0x019d, 0x01dc, 0x01c7, 0x01ea, 0x01f1,
+0x0168, 0x0173, 0x015e, 0x0145, 0x0104, 0x011f, 0x0132, 0x0129,
+0x0360, 0x037b, 0x0356, 0x034d, 0x030c, 0x0317, 0x033a, 0x0321,
+0x03b8, 0x03a3, 0x038e, 0x0395, 0x03d4, 0x03cf, 0x03e2, 0x03f9,
+0x02d0, 0x02cb, 0x02e6, 0x02fd, 0x02bc, 0x02a7, 0x028a, 0x0291,
+0x0208, 0x0213, 0x023e, 0x0225, 0x0264, 0x027f, 0x0252, 0x0249,
+0x02db, 0x02c0, 0x02ed, 0x02f6, 0x02b7, 0x02ac, 0x0281, 0x029a,
+0x0203, 0x0218, 0x0235, 0x022e, 0x026f, 0x0274, 0x0259, 0x0242,
+0x036b, 0x0370, 0x035d, 0x0346, 0x0307, 0x031c, 0x0331, 0x032a,
+0x03b3, 0x03a8, 0x0385, 0x039e, 0x03df, 0x03c4, 0x03e9, 0x03f2,
+0x01bb, 0x01a0, 0x018d, 0x0196, 0x01d7, 0x01cc, 0x01e1, 0x01fa,
+0x0163, 0x0178, 0x0155, 0x014e, 0x010f, 0x0114, 0x0139, 0x0122,
+0x000b, 0x0010, 0x003d, 0x0026, 0x0067, 0x007c, 0x0051, 0x004a,
+0x00d3, 0x00c8, 0x00e5, 0x00fe, 0x00bf, 0x00a4, 0x0089, 0x0092,
+0x01ad, 0x01b6, 0x019b, 0x0180, 0x01c1, 0x01da, 0x01f7, 0x01ec,
+0x0175, 0x016e, 0x0143, 0x0158, 0x0119, 0x0102, 0x012f, 0x0134,
+0x001d, 0x0006, 0x002b, 0x0030, 0x0071, 0x006a, 0x0047, 0x005c,
+0x00c5, 0x00de, 0x00f3, 0x00e8, 0x00a9, 0x00b2, 0x009f, 0x0084,
+0x02cd, 0x02d6, 0x02fb, 0x02e0, 0x02a1, 0x02ba, 0x0297, 0x028c,
+0x0215, 0x020e, 0x0223, 0x0238, 0x0279, 0x0262, 0x024f, 0x0254,
+0x037d, 0x0366, 0x034b, 0x0350, 0x0311, 0x030a, 0x0327, 0x033c,
+0x03a5, 0x03be, 0x0393, 0x0388, 0x03c9, 0x03d2, 0x03ff, 0x03e4,
+0x0376, 0x036d, 0x0340, 0x035b, 0x031a, 0x0301, 0x032c, 0x0337,
+0x03ae, 0x03b5, 0x0398, 0x0383, 0x03c2, 0x03d9, 0x03f4, 0x03ef,
+0x02c6, 0x02dd, 0x02f0, 0x02eb, 0x02aa, 0x02b1, 0x029c, 0x0287,
+0x021e, 0x0205, 0x0228, 0x0233, 0x0272, 0x0269, 0x0244, 0x025f,
+0x0016, 0x000d, 0x0020, 0x003b, 0x007a, 0x0061, 0x004c, 0x0057,
+0x00ce, 0x00d5, 0x00f8, 0x00e3, 0x00a2, 0x00b9, 0x0094, 0x008f,
+0x01a6, 0x01bd, 0x0190, 0x018b, 0x01ca, 0x01d1, 0x01fc, 0x01e7,
+0x017e, 0x0165, 0x0148, 0x0153, 0x0112, 0x0109, 0x0124, 0x013f,
+};
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-4. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x2d (1 + x^2+ x^3 + x^5 and the implicit x^6).
+ */
+static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
+0x00, 0x2d, 0x37, 0x1a, 0x03, 0x2e, 0x34, 0x19,
+0x06, 0x2b, 0x31, 0x1c, 0x05, 0x28, 0x32, 0x1f,
+0x0c, 0x21, 0x3b, 0x16, 0x0f, 0x22, 0x38, 0x15,
+0x0a, 0x27, 0x3d, 0x10, 0x09, 0x24, 0x3e, 0x13,
+0x18, 0x35, 0x2f, 0x02, 0x1b, 0x36, 0x2c, 0x01,
+0x1e, 0x33, 0x29, 0x04, 0x1d, 0x30, 0x2a, 0x07,
+0x14, 0x39, 0x23, 0x0e, 0x17, 0x3a, 0x20, 0x0d,
+0x12, 0x3f, 0x25, 0x08, 0x11, 0x3c, 0x26, 0x0b,
+0x30, 0x1d, 0x07, 0x2a, 0x33, 0x1e, 0x04, 0x29,
+0x36, 0x1b, 0x01, 0x2c, 0x35, 0x18, 0x02, 0x2f,
+0x3c, 0x11, 0x0b, 0x26, 0x3f, 0x12, 0x08, 0x25,
+0x3a, 0x17, 0x0d, 0x20, 0x39, 0x14, 0x0e, 0x23,
+0x28, 0x05, 0x1f, 0x32, 0x2b, 0x06, 0x1c, 0x31,
+0x2e, 0x03, 0x19, 0x34, 0x2d, 0x00, 0x1a, 0x37,
+0x24, 0x09, 0x13, 0x3e, 0x27, 0x0a, 0x10, 0x3d,
+0x22, 0x0f, 0x15, 0x38, 0x21, 0x0c, 0x16, 0x3b,
+0x0d, 0x20, 0x3a, 0x17, 0x0e, 0x23, 0x39, 0x14,
+0x0b, 0x26, 0x3c, 0x11, 0x08, 0x25, 0x3f, 0x12,
+0x01, 0x2c, 0x36, 0x1b, 0x02, 0x2f, 0x35, 0x18,
+0x07, 0x2a, 0x30, 0x1d, 0x04, 0x29, 0x33, 0x1e,
+0x15, 0x38, 0x22, 0x0f, 0x16, 0x3b, 0x21, 0x0c,
+0x13, 0x3e, 0x24, 0x09, 0x10, 0x3d, 0x27, 0x0a,
+0x19, 0x34, 0x2e, 0x03, 0x1a, 0x37, 0x2d, 0x00,
+0x1f, 0x32, 0x28, 0x05, 0x1c, 0x31, 0x2b, 0x06,
+0x3d, 0x10, 0x0a, 0x27, 0x3e, 0x13, 0x09, 0x24,
+0x3b, 0x16, 0x0c, 0x21, 0x38, 0x15, 0x0f, 0x22,
+0x31, 0x1c, 0x06, 0x2b, 0x32, 0x1f, 0x05, 0x28,
+0x37, 0x1a, 0x00, 0x2d, 0x34, 0x19, 0x03, 0x2e,
+0x25, 0x08, 0x12, 0x3f, 0x26, 0x0b, 0x11, 0x3c,
+0x23, 0x0e, 0x14, 0x39, 0x20, 0x0d, 0x17, 0x3a,
+0x29, 0x04, 0x1e, 0x33, 0x2a, 0x07, 0x1d, 0x30,
+0x2f, 0x02, 0x18, 0x35, 0x2c, 0x01, 0x1b, 0x36,
+};
+
+/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
+ * and we need to populate it with 4 key blocks copied from the entry encoded
+ * key. The original keys layout is same for Spectrum-{2,3,4}.
+ * Since the encoded key contains a 2 bytes padding, key block 11 starts at
+ * offset 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks
+ * take 18 bytes. See 'MLXSW_SP2_AFK_BLOCK_LAYOUT' for more details.
+ * This array defines key offsets for easy access when copying key blocks from
+ * entry key to Bloom filter chunk.
+ */
+static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+
+static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
+{
+ return (crc << 8) ^ mlxsw_sp2_acl_bf_crc16_tab[(crc >> 8) ^ c];
+}
+
+static u16 mlxsw_sp2_acl_bf_crc(const u8 *buffer, size_t len)
+{
+ u16 crc = 0;
+
+ while (len--)
+ crc = mlxsw_sp2_acl_bf_crc16_byte(crc, *buffer++);
+ return crc;
+}
+
+static void
+__mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len, u8 max_chunks, u8 pad_bytes,
+ u8 key_offset, u8 chunk_key_len, u8 chunk_len)
+{
+ struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
+ u8 chunk_index, chunk_count, block_count;
+ char *chunk = output;
+ __be16 erp_region_id;
+
+ block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ chunk_count = 1 + ((block_count - 1) >> 2);
+ erp_region_id = cpu_to_be16(aentry->ht_key.erp_id |
+ (aregion->region->id << 4));
+ for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
+ chunk_index++) {
+ memset(chunk, 0, pad_bytes);
+ memcpy(chunk + pad_bytes, &erp_region_id,
+ sizeof(erp_region_id));
+ memcpy(chunk + key_offset,
+ &aentry->enc_key[chunk_key_offsets[chunk_index]],
+ chunk_key_len);
+ chunk += chunk_len;
+ }
+ *len = chunk_count * chunk_len;
+}
+
+static void
+mlxsw_sp2_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len,
+ MLXSW_BLOOM_KEY_CHUNKS,
+ MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES,
+ MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET,
+ MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES,
+ MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES);
+}
+
+static unsigned int
+mlxsw_sp2_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ char bf_key[MLXSW_SP2_BLOOM_KEY_LEN];
+ u8 bf_size;
+
+ mlxsw_sp2_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp2_acl_bf_crc(bf_key, bf_size);
+}
+
+static u16 mlxsw_sp4_acl_bf_crc10_byte(u16 crc, u8 c)
+{
+ u8 index = ((crc >> 2) ^ c) & 0xff;
+
+ return ((crc << 8) ^ mlxsw_sp4_acl_bf_crc10_tab[index]) & 0x3ff;
+}
+
+static u16 mlxsw_sp4_acl_bf_crc6_byte(u16 crc, u8 c)
+{
+ u8 index = (crc ^ c) & 0xff;
+
+ return ((crc << 6) ^ (mlxsw_sp4_acl_bf_crc6_tab[index] << 2)) & 0xfc;
+}
+
+static u16 mlxsw_sp4_acl_bf_crc(const u8 *buffer, size_t len)
+{
+ u16 crc_row = 0, crc_col = 0;
+
+ while (len--) {
+ crc_row = mlxsw_sp4_acl_bf_crc10_byte(crc_row, *buffer);
+ crc_col = mlxsw_sp4_acl_bf_crc6_byte(crc_col, *buffer);
+ buffer++;
+ }
+
+ crc_col >>= 2;
+
+ /* 6 bit column are MSB, 10 bit row are LSB */
+ return (crc_col << 10) | crc_row;
+}
+
+static void right_shift_array(char *arr, u8 len, u8 shift_bits)
+{
+ u8 byte_mask = 0xff >> shift_bits;
+ int i;
+
+ if (WARN_ON(!shift_bits || shift_bits >= 8))
+ return;
+
+ for (i = len - 1; i >= 0; i--) {
+ /* The first iteration looks like out-of-bounds access,
+ * but actually references a buffer that the array is shifted
+ * into. This move is legal as we never send the last chunk to
+ * this function.
+ */
+ arr[i + 1] &= byte_mask;
+ arr[i + 1] |= arr[i] << (8 - shift_bits);
+ arr[i] = arr[i] >> shift_bits;
+ }
+}
+
+static void mlxsw_sp4_bf_key_shift_chunks(u8 chunk_count, char *output)
+{
+ /* The chunks are suppoosed to be continuous, with no padding.
+ * Since region ID and eRP ID use 14 bits, and not fully 2 bytes,
+ * and in Spectrum-4 there is no padding, it is necessary to shift some
+ * chunks 2 bits right.
+ */
+ switch (chunk_count) {
+ case 2:
+ /* The chunks are copied as follow:
+ * +-------------+-----------------+
+ * | Chunk 0 | Chunk 1 |
+ * | IDs | keys |(**) IDs | keys |
+ * +-------------+-----------------+
+ * In (**), there are two unused bits, therefore, chunk 0 needs
+ * to be shifted two bits right.
+ */
+ right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2);
+ break;
+ case 3:
+ /* The chunks are copied as follow:
+ * +-------------+-----------------+-----------------+
+ * | Chunk 0 | Chunk 1 | Chunk 2 |
+ * | IDs | keys |(**) IDs | keys |(**) IDs | keys |
+ * +-------------+-----------------+-----------------+
+ * In (**), there are two unused bits, therefore, chunk 1 needs
+ * to be shifted two bits right and chunk 0 needs to be shifted
+ * four bits right.
+ */
+ right_shift_array(output + MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES,
+ MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2);
+ right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 4);
+ break;
+ default:
+ WARN_ON(chunk_count > MLXSW_BLOOM_KEY_CHUNKS);
+ }
+}
+
+static void
+mlxsw_sp4_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
+ u8 block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ u8 chunk_count = 1 + ((block_count - 1) >> 2);
+
+ __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len,
+ MLXSW_BLOOM_KEY_CHUNKS,
+ MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES,
+ MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET,
+ MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES,
+ MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES);
+ mlxsw_sp4_bf_key_shift_chunks(chunk_count, output);
+}
+
+static unsigned int
+mlxsw_sp4_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ char bf_key[MLXSW_SP4_BLOOM_KEY_LEN] = {};
+ u8 bf_size;
+
+ mlxsw_sp4_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp4_acl_bf_crc(bf_key, bf_size);
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
+ unsigned int erp_bank,
+ unsigned int bf_index)
+{
+ return erp_bank * bf->bank_size + bf_index;
+}
+
+int
+mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ unsigned int rule_index;
+ char *peabfe_pl;
+ u16 bf_index;
+ int err;
+
+ mutex_lock(&bf->lock);
+
+ bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry);
+ rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
+ bf_index);
+
+ if (refcount_inc_not_zero(&bf->refcnt[rule_index])) {
+ err = 0;
+ goto unlock;
+ }
+
+ peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
+ if (!peabfe_pl) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ mlxsw_reg_peabfe_pack(peabfe_pl);
+ mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
+ kfree(peabfe_pl);
+ if (err)
+ goto unlock;
+
+ refcount_set(&bf->refcnt[rule_index], 1);
+ err = 0;
+
+unlock:
+ mutex_unlock(&bf->lock);
+ return err;
+}
+
+void
+mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ unsigned int rule_index;
+ char *peabfe_pl;
+ u16 bf_index;
+
+ mutex_lock(&bf->lock);
+
+ bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry);
+ rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
+ bf_index);
+
+ if (refcount_dec_and_test(&bf->refcnt[rule_index])) {
+ peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
+ if (!peabfe_pl)
+ goto unlock;
+
+ mlxsw_reg_peabfe_pack(peabfe_pl);
+ mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
+ kfree(peabfe_pl);
+ }
+
+unlock:
+ mutex_unlock(&bf->lock);
+}
+
+struct mlxsw_sp_acl_bf *
+mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+{
+ struct mlxsw_sp_acl_bf *bf;
+ unsigned int bf_bank_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_BF_LOG))
+ return ERR_PTR(-EIO);
+
+ /* Bloom filter size per erp_table_bank
+ * is 2^ACL_MAX_BF_LOG
+ */
+ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
+ GFP_KERNEL);
+ if (!bf)
+ return ERR_PTR(-ENOMEM);
+
+ bf->bank_size = bf_bank_size;
+ mutex_init(&bf->lock);
+
+ return bf;
+}
+
+void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
+{
+ mutex_destroy(&bf->lock);
+ kfree(bf);
+}
+
+const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops = {
+ .index_get = mlxsw_sp2_acl_bf_index_get,
+};
+
+const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops = {
+ .index_get = mlxsw_sp4_acl_bf_index_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
new file mode 100644
index 000000000..05680a7e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+static int
+mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 new_size)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+ region->key_type, new_size, region->id,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 src_offset, u16 dst_offset, u16 size)
+{
+ char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+ mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+ region->tcam_region_info, src_offset,
+ region->tcam_region_info, dst_offset, size);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ char *act_set;
+ u32 priority;
+ char *mask;
+ char *key;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
+ fillup_priority);
+ if (err)
+ return err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info,
+ centry->parman_item.index, priority);
+ key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+ mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
+
+ err = cregion->ops->entry_insert(cregion, centry, mask);
+ if (err)
+ return err;
+
+ /* Only the first action set belongs here, the rest is in KVD */
+ act_set = mlxsw_afa_block_first_set(rulei->act_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ goto err_ptce2_write;
+
+ return 0;
+
+err_ptce2_write:
+ cregion->ops->entry_remove(cregion, centry);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ cregion->ops->entry_remove(cregion, centry);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_afa_block *afa_block,
+ unsigned int priority)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ char *act_set;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_UPDATE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, priority);
+
+ act_set = mlxsw_afa_block_first_set(afa_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+
+static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+ mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
+ from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
+ .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_acl_ctcam_region_parman_resize,
+ .move = mlxsw_sp_acl_ctcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ cregion->region = region;
+ cregion->ops = ops;
+ cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
+ cregion);
+ if (!cregion->parman)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ parman_destroy(cregion->parman);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority)
+{
+ parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
+{
+ parman_prio_fini(&cchunk->parman_prio);
+}
+
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ int err;
+
+ err = parman_item_add(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry,
+ rulei, fillup_priority);
+ if (err)
+ goto err_rule_insert;
+ return 0;
+
+err_rule_insert:
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ return err;
+}
+
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry);
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+}
+
+int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_sp_acl_ctcam_region_entry_action_replace(mlxsw_sp, cregion,
+ centry,
+ rulei->act_block,
+ rulei->priority);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
new file mode 100644
index 000000000..d231f4d28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/objagg.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
+#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100
+#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16
+
+struct mlxsw_sp_acl_erp_core {
+ unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
+ struct gen_pool *erp_tables;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_acl_bf *bf;
+ unsigned int num_erp_banks;
+};
+
+struct mlxsw_sp_acl_erp_key {
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+#define __MASK_LEN 0x38
+#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
+ bool ctcam;
+};
+
+struct mlxsw_sp_acl_erp {
+ struct mlxsw_sp_acl_erp_key key;
+ u8 id;
+ u8 index;
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ struct list_head list;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+};
+
+struct mlxsw_sp_acl_erp_master_mask {
+ DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN];
+};
+
+struct mlxsw_sp_acl_erp_table {
+ struct mlxsw_sp_acl_erp_master_mask master_mask;
+ DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ struct list_head atcam_erps_list;
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ const struct mlxsw_sp_acl_erp_table_ops *ops;
+ unsigned long base_index;
+ unsigned int num_atcam_erps;
+ unsigned int num_max_atcam_erps;
+ unsigned int num_ctcam_erps;
+ unsigned int num_deltas;
+ struct objagg *objagg;
+ struct mutex objagg_lock; /* guards objagg manipulation */
+};
+
+struct mlxsw_sp_acl_erp_table_ops {
+ struct mlxsw_sp_acl_erp *
+ (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+ void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+};
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_second_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_first_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
+};
+
+static bool
+mlxsw_sp_acl_erp_table_is_used(const struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return erp_table->ops != &erp_single_mask_ops &&
+ erp_table->ops != &erp_no_mask_ops;
+}
+
+static unsigned int
+mlxsw_sp_acl_erp_bank_get(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->index % erp->erp_table->erp_core->num_erp_banks;
+}
+
+static unsigned int
+mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+
+ return erp_core->erpt_entries_size[aregion->type];
+}
+
+static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_id)
+{
+ u8 id;
+
+ id = find_first_zero_bit(erp_table->erp_id_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) {
+ __set_bit(id, erp_table->erp_id_bitmap);
+ *p_id = id;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 id)
+{
+ __clear_bit(id, erp_table->erp_id_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (mask->count[bit]++ == 0)
+ __set_bit(bit, mask->bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (--mask->count[bit] == 0)
+ __clear_bit(bit, mask->bitmap);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+ char *master_mask;
+
+ mlxsw_reg_percr_pack(percr_pl, region->id);
+ master_mask = mlxsw_reg_percr_master_mask_data(percr_pl);
+ bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned long bit;
+ int err;
+
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned long bit;
+ int err;
+
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id);
+ if (err)
+ goto err_erp_id_get;
+
+ memcpy(&erp->key, key, sizeof(*key));
+ list_add(&erp->list, &erp_table->atcam_erps_list);
+ erp_table->num_atcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
+ if (err)
+ goto err_master_mask_set;
+
+ return erp;
+
+err_master_mask_set:
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+err_erp_id_get:
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+ kfree(erp);
+}
+
+static int
+mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long *p_index)
+{
+ unsigned int num_rows, entry_size;
+ unsigned long index;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+ return -EINVAL;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (!index)
+ return -ENOBUFS;
+
+ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long index)
+{
+ unsigned long base_index;
+ unsigned int entry_size;
+ size_t size;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+ size = num_erps / erp_core->num_erp_banks * entry_size;
+ gen_pool_free(erp_core->erp_tables, base_index, size);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ if (!list_is_singular(&erp_table->atcam_erps_list))
+ return NULL;
+
+ return list_first_entry(&erp_table->atcam_erps_list,
+ struct mlxsw_sp_acl_erp, list);
+}
+
+static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_index)
+{
+ u8 index;
+
+ index = find_first_zero_bit(erp_table->erp_index_bitmap,
+ erp_table->num_max_atcam_erps);
+ if (index < erp_table->num_max_atcam_erps) {
+ __set_bit(index, erp_table->erp_index_bitmap);
+ *p_index = index;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 index)
+{
+ __clear_bit(index, erp_table->erp_index_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp,
+ u8 *p_erpt_bank, u8 *p_erpt_index)
+{
+ unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table);
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned int row;
+
+ *p_erpt_bank = erp->index % erp_core->num_erp_banks;
+ row = erp->index / erp_core->num_erp_banks;
+ *p_erpt_index = erp_table->base_index + row * entry_size;
+}
+
+static int
+mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index,
+ erp->key.mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index, empty_mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table,
+ bool ctcam_le)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void
+mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ /* It is possible we do not have a master RP when we disable the
+ * table when there are no rules in the A-TCAM and the last C-TCAM
+ * rule is deleted
+ */
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0,
+ master_rp ? master_rp->id : 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ list_for_each_entry(erp, &erp_table->atcam_erps_list, list) {
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+ }
+
+ return 0;
+
+err_table_erp_add:
+ list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list,
+ list)
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned long old_base_index = erp_table->base_index;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ int err;
+
+ if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps)
+ return 0;
+
+ if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return -ENOBUFS;
+
+ num_erps = old_num_erps + erp_core->num_erp_banks;
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = num_erps;
+
+ err = mlxsw_sp_acl_erp_table_relocate(erp_table);
+ if (err)
+ goto err_table_relocate;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le);
+ if (err)
+ goto err_table_enable;
+
+ mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps,
+ erp_table->aregion->type, old_base_index);
+
+ return 0;
+
+err_table_enable:
+err_table_relocate:
+ erp_table->num_max_atcam_erps = old_num_erps;
+ mlxsw_sp_acl_erp_table_free(erp_core, num_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->base_index = old_base_index;
+ return err;
+}
+
+static int
+mlxsw_acl_erp_table_bf_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ int err;
+
+ list_for_each_entry(aentry, &aregion->entries_list, list) {
+ err = mlxsw_sp_acl_bf_entry_add(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+ if (err)
+ goto bf_entry_add_err;
+ }
+
+ return 0;
+
+bf_entry_add_err:
+ list_for_each_entry_continue_reverse(aentry, &aregion->entries_list,
+ list)
+ mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+ return err;
+}
+
+static void
+mlxsw_acl_erp_table_bf_del(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ list_for_each_entry_reverse(aentry, &aregion->entries_list, list)
+ mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+static int
+mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+ int err;
+
+ /* Initially, allocate a single eRP row. Expand later as needed */
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = erp_core->num_erp_banks;
+
+ /* Transition the sole RP currently configured (the master RP)
+ * to the eRP table
+ */
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp) {
+ err = -EINVAL;
+ goto err_table_master_rp;
+ }
+
+ /* Make sure the master RP is using a valid index, as
+ * only a single eRP row is currently allocated.
+ */
+ master_rp->index = 0;
+ __set_bit(master_rp->index, erp_table->erp_index_bitmap);
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
+ if (err)
+ goto err_table_master_rp_add;
+
+ /* Update Bloom filter before enabling eRP table, as rules
+ * on the master RP were not set to Bloom filter up to this
+ * point.
+ */
+ err = mlxsw_acl_erp_table_bf_add(erp_table, master_rp);
+ if (err)
+ goto err_table_bf_add;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
+ if (err)
+ goto err_table_enable;
+
+ return 0;
+
+err_table_enable:
+ mlxsw_acl_erp_table_bf_del(erp_table, master_rp);
+err_table_bf_add:
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+err_table_master_rp_add:
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+err_table_master_rp:
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp)
+ return;
+ mlxsw_acl_erp_table_bf_del(erp_table, master_rp);
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+}
+
+static int
+mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* No need to re-enable lookup in the C-TCAM */
+ if (erp_table->num_ctcam_erps > 1)
+ return 0;
+
+ return mlxsw_sp_acl_erp_table_enable(erp_table, true);
+}
+
+static void
+mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */
+ if (erp_table->num_ctcam_erps > 1)
+ return;
+
+ mlxsw_sp_acl_erp_table_enable(erp_table, false);
+}
+
+static int
+__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *inc_num)
+{
+ int err;
+
+ /* If there are C-TCAM eRP or deltas in use we need to transition
+ * the region to use eRP table, if it is not already done
+ */
+ if (!mlxsw_sp_acl_erp_table_is_used(erp_table)) {
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return err;
+ }
+
+ /* When C-TCAM or deltas are used, the eRP table must be used */
+ if (erp_table->ops != &erp_multiple_masks_ops)
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ (*inc_num)++;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_deltas);
+}
+
+static void
+__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *dec_num)
+{
+ (*dec_num)--;
+
+ /* If there are no C-TCAM eRP or deltas in use, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use.
+ */
+ if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
+ return;
+
+ switch (erp_table->num_atcam_erps) {
+ case 2:
+ /* Keep using the eRP table, but correctly set the
+ * operations pointer so that when an A-TCAM eRP is
+ * deleted we will transition to use the master mask
+ */
+ erp_table->ops = &erp_two_masks_ops;
+ break;
+ case 1:
+ /* We only kept the eRP table because we had C-TCAM
+ * eRPs in use. Now that the last C-TCAM eRP is gone we
+ * can stop using the table and transition to use the
+ * master mask
+ */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ erp_table->ops = &erp_single_mask_ops;
+ break;
+ case 0:
+ /* There are no more eRPs of any kind used by the region
+ * so free its eRP table and transition to initial state
+ */
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ mlxsw_sp_acl_erp_table_free(erp_table->erp_core,
+ erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->ops = &erp_no_mask_ops;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_deltas);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+
+ err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
+ if (err)
+ goto err_erp_ctcam_inc;
+
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
+ if (err)
+ goto err_master_mask_set;
+
+ err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
+ if (err)
+ goto err_erp_region_ctcam_enable;
+
+ return erp;
+
+err_erp_region_ctcam_enable:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+err_master_mask_set:
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+err_erp_ctcam_inc:
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+ kfree(erp);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Expand the eRP table for the new eRP, if needed */
+ err = mlxsw_sp_acl_erp_table_expand(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
+ erp_table->num_deltas == 0)
+ erp_table->ops = &erp_two_masks_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Transition to use eRP table instead of master mask */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_two_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ /* Transition to use master mask instead of eRP table */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+
+ erp_table->ops = &erp_single_mask_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+
+ if (key->ctcam)
+ return ERR_PTR(-EINVAL);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ erp_table->ops = &erp_single_mask_ops;
+
+ return erp;
+}
+
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ erp_table->ops = &erp_no_mask_ops;
+}
+
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ WARN_ON(1);
+}
+
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key key;
+ struct objagg_obj *objagg_obj;
+
+ memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+ key.ctcam = ctcam;
+ mutex_lock(&erp_table->objagg_lock);
+ objagg_obj = objagg_obj_get(erp_table->objagg, &key);
+ mutex_unlock(&erp_table->objagg_lock);
+ if (IS_ERR(objagg_obj))
+ return ERR_CAST(objagg_obj);
+ return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
+}
+
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ mutex_lock(&erp_table->objagg_lock);
+ objagg_obj_put(erp_table->objagg, objagg_obj);
+ mutex_unlock(&erp_table->objagg_lock);
+}
+
+int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+ unsigned int erp_bank;
+
+ if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
+ return 0;
+
+ erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ return mlxsw_sp_acl_bf_entry_add(mlxsw_sp,
+ erp->erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+ unsigned int erp_bank;
+
+ if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
+ return;
+
+ erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ mlxsw_sp_acl_bf_entry_del(mlxsw_sp,
+ erp->erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
+
+ return key->ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+
+ return erp->id;
+}
+
+struct mlxsw_sp_acl_erp_delta {
+ struct mlxsw_sp_acl_erp_key key;
+ u16 start;
+ u8 mask;
+};
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->start;
+}
+
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->mask;
+}
+
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ u16 tmp;
+
+ if (!mask)
+ return 0;
+
+ tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
+ if (start / 8 + 1 < __MASK_LEN)
+ tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
+ tmp >>= start % 8;
+ tmp &= mask;
+ return tmp;
+}
+
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ unsigned char *byte;
+ u16 tmp;
+
+ tmp = mask;
+ tmp <<= start % 8;
+ tmp = ~tmp;
+
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
+ *byte &= tmp & 0xff;
+ if (start / 8 + 1 < __MASK_LEN) {
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
+ *byte &= (tmp >> 8) & 0xff;
+ }
+}
+
+static const struct mlxsw_sp_acl_erp_delta
+mlxsw_sp_acl_erp_delta_default = {};
+
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (!delta)
+ delta = &mlxsw_sp_acl_erp_delta_default;
+ return delta;
+}
+
+static int
+mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
+ const struct mlxsw_sp_acl_erp_key *key,
+ u16 *delta_start, u8 *delta_mask)
+{
+ int offset = 0;
+ int si = -1;
+ u16 pmask;
+ u16 mask;
+ int i;
+
+ /* The difference between 2 masks can be up to 8 consecutive bits. */
+ for (i = 0; i < __MASK_LEN; i++) {
+ if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
+ continue;
+ if (si == -1)
+ si = i;
+ else if (si != i - 1)
+ return -EINVAL;
+ }
+ if (si == -1) {
+ /* The masks are the same, this can happen in case eRPs with
+ * the same mask were created in both A-TCAM and C-TCAM.
+ * The only possible condition under which this can happen
+ * is identical rule insertion. Delta is not possible here.
+ */
+ return -EINVAL;
+ }
+ pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+ mask = (unsigned char) key->mask[__MASK_IDX(si)];
+ if (si + 1 < __MASK_LEN) {
+ pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
+ mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
+ }
+
+ if ((pmask ^ mask) & pmask)
+ return -EINVAL;
+ mask &= ~pmask;
+ while (!(mask & (1 << offset)))
+ offset++;
+ while (!(mask & 1))
+ mask >>= 1;
+ if (mask & 0xff00)
+ return -EINVAL;
+
+ *delta_start = si * 8 + offset;
+ *delta_mask = mask;
+
+ return 0;
+}
+
+static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
+ const void *obj)
+{
+ const struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ const struct mlxsw_sp_acl_erp_key *key = obj;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
+
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ return err ? false : true;
+}
+
+static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
+{
+ const struct mlxsw_sp_acl_erp_key *key1 = obj1;
+ const struct mlxsw_sp_acl_erp_key *key2 = obj2;
+
+ /* For hints purposes, two objects are considered equal
+ * in case the masks are the same. Does not matter what
+ * the "ctcam" value is.
+ */
+ return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
+}
+
+static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ void *obj)
+{
+ struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+ struct mlxsw_sp_acl_erp_delta *delta;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
+
+ if (parent_key->ctcam || key->ctcam)
+ return ERR_PTR(-EINVAL);
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ if (err)
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->start = delta_start;
+ delta->mask = delta_mask;
+
+ err = mlxsw_sp_acl_erp_delta_inc(erp_table);
+ if (err)
+ goto err_erp_delta_inc;
+
+ memcpy(&delta->key, key, sizeof(*key));
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
+ if (err)
+ goto err_master_mask_set;
+
+ return delta;
+
+err_master_mask_set:
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+err_erp_delta_inc:
+ kfree(delta);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
+{
+ struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+ kfree(delta);
+}
+
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj,
+ unsigned int root_id)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+
+ if (!key->ctcam &&
+ root_id != OBJAGG_OBJ_ROOT_ID_INVALID &&
+ root_id >= MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return ERR_PTR(-ENOBUFS);
+ return erp_table->ops->erp_create(erp_table, key);
+}
+
+static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ erp_table->ops->erp_destroy(erp_table, root_priv);
+}
+
+static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ .obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_check = mlxsw_sp_acl_erp_delta_check,
+ .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
+ .delta_create = mlxsw_sp_acl_erp_delta_create,
+ .delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ .root_create = mlxsw_sp_acl_erp_root_create,
+ .root_destroy = mlxsw_sp_acl_erp_root_destroy,
+};
+
+static struct mlxsw_sp_acl_erp_table *
+mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct objagg_hints *hints)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL);
+ if (!erp_table)
+ return ERR_PTR(-ENOMEM);
+
+ erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
+ hints, aregion);
+ if (IS_ERR(erp_table->objagg)) {
+ err = PTR_ERR(erp_table->objagg);
+ goto err_objagg_create;
+ }
+
+ erp_table->erp_core = aregion->atcam->erp_core;
+ erp_table->ops = &erp_no_mask_ops;
+ INIT_LIST_HEAD(&erp_table->atcam_erps_list);
+ erp_table->aregion = aregion;
+ mutex_init(&erp_table->objagg_lock);
+
+ return erp_table;
+
+err_objagg_create:
+ kfree(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ WARN_ON(!list_empty(&erp_table->atcam_erps_list));
+ mutex_destroy(&erp_table->objagg_lock);
+ objagg_destroy(erp_table->objagg);
+ kfree(erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+
+ mlxsw_reg_percr_pack(percr_pl, aregion->region->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0,
+ 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_hints_check(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct objagg_hints *hints, bool *p_rehash_needed)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ const struct objagg_stats *ostats;
+ const struct objagg_stats *hstats;
+ int err;
+
+ *p_rehash_needed = false;
+
+ mutex_lock(&erp_table->objagg_lock);
+ ostats = objagg_stats_get(erp_table->objagg);
+ mutex_unlock(&erp_table->objagg_lock);
+ if (IS_ERR(ostats)) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP stats\n");
+ return PTR_ERR(ostats);
+ }
+
+ hstats = objagg_hints_stats_get(hints);
+ if (IS_ERR(hstats)) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP hints stats\n");
+ err = PTR_ERR(hstats);
+ goto err_hints_stats_get;
+ }
+
+ /* Very basic criterion for now. */
+ if (hstats->root_count < ostats->root_count)
+ *p_rehash_needed = true;
+
+ err = 0;
+
+ objagg_stats_put(hstats);
+err_hints_stats_get:
+ objagg_stats_put(ostats);
+ return err;
+}
+
+void *
+mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct objagg_hints *hints;
+ bool rehash_needed;
+ int err;
+
+ mutex_lock(&erp_table->objagg_lock);
+ hints = objagg_hints_get(erp_table->objagg,
+ OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ mutex_unlock(&erp_table->objagg_lock);
+ if (IS_ERR(hints)) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to create ERP hints\n");
+ return ERR_CAST(hints);
+ }
+ err = mlxsw_sp_acl_erp_hints_check(mlxsw_sp, aregion, hints,
+ &rehash_needed);
+ if (err)
+ goto errout;
+
+ if (!rehash_needed) {
+ err = -EAGAIN;
+ goto errout;
+ }
+ return hints;
+
+errout:
+ objagg_hints_put(hints);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv)
+{
+ struct objagg_hints *hints = hints_priv;
+
+ objagg_hints_put(hints);
+}
+
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ void *hints_priv)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ struct objagg_hints *hints = hints_priv;
+ int err;
+
+ erp_table = mlxsw_sp_acl_erp_table_create(aregion, hints);
+ if (IS_ERR(erp_table))
+ return PTR_ERR(erp_table);
+ aregion->erp_table = erp_table;
+
+ /* Initialize the region's master mask to all zeroes */
+ err = mlxsw_sp_acl_erp_master_mask_init(aregion);
+ if (err)
+ goto err_erp_master_mask_init;
+
+ /* Initialize the region to not use the eRP table */
+ err = mlxsw_sp_acl_erp_region_param_init(aregion);
+ if (err)
+ goto err_erp_region_param_init;
+
+ return 0;
+
+err_erp_region_param_init:
+err_erp_master_mask_init:
+ mlxsw_sp_acl_erp_table_destroy(erp_table);
+ return err;
+}
+
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_erp_table_destroy(aregion->erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB))
+ return -EIO;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int erpt_bank_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS))
+ return -EIO;
+ erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANK_SIZE);
+ erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANKS);
+
+ erp_core->erp_tables = gen_pool_create(0, -1);
+ if (!erp_core->erp_tables)
+ return -ENOMEM;
+ gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL);
+
+ err = gen_pool_add(erp_core->erp_tables,
+ MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size,
+ -1);
+ if (err)
+ goto err_gen_pool_add;
+
+ erp_core->bf = mlxsw_sp_acl_bf_init(mlxsw_sp, erp_core->num_erp_banks);
+ if (IS_ERR(erp_core->bf)) {
+ err = PTR_ERR(erp_core->bf);
+ goto err_bf_init;
+ }
+
+ /* Different regions require masks of different sizes */
+ err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_sizes_query;
+
+ return 0;
+
+err_erp_tables_sizes_query:
+ mlxsw_sp_acl_bf_fini(erp_core->bf);
+err_bf_init:
+err_gen_pool_add:
+ gen_pool_destroy(erp_core->erp_tables);
+ return err;
+}
+
+static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ mlxsw_sp_acl_bf_fini(erp_core->bf);
+ gen_pool_destroy(erp_core->erp_tables);
+}
+
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ int err;
+
+ erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL);
+ if (!erp_core)
+ return -ENOMEM;
+ erp_core->mlxsw_sp = mlxsw_sp;
+ atcam->erp_core = erp_core;
+
+ err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_init;
+
+ return 0;
+
+err_erp_tables_init:
+ kfree(erp_core);
+ return err;
+}
+
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core);
+ kfree(atcam->erp_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
new file mode 100644
index 000000000..50806594d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include "spectrum_acl_flex_actions.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_span.h"
+
+static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first, bool ca)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ u32 kvdl_index;
+ int err;
+
+ /* The first action set of a TCAM entry is stored directly in TCAM,
+ * not KVD linear area.
+ */
+ if (is_first)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, &kvdl_index);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_pefa_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
+ return err;
+}
+
+static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, false);
+}
+
+static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, true);
+}
+
+static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
+ bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ if (is_first)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
+}
+
+static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ int err;
+
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_unpack(pefa_pl, activity);
+ return 0;
+}
+
+static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
+ u16 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char ppbs_pl[MLXSW_REG_PPBS_LEN];
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, &kvdl_index);
+ if (err)
+ return err;
+ mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
+ if (err)
+ goto err_ppbs_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_ppbs_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
+}
+
+static int
+mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index);
+}
+
+static void
+mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index);
+}
+
+static int
+mlxsw_sp_act_mirror_add(void *priv, u16 local_in_port,
+ const struct net_device *out_dev,
+ bool ingress, int *p_span_id)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {};
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ agent_parms.to_dev = out_dev;
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms);
+ if (err)
+ return err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err)
+ goto err_analyzed_port_get;
+
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+ return err;
+}
+
+static void
+mlxsw_sp_act_mirror_del(void *priv, u16 local_in_port, int span_id, bool ingress)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+}
+
+static int mlxsw_sp_act_policer_add(void *priv, u64 rate_bytes_ps, u32 burst,
+ u16 *p_policer_index,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_policer_params params;
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ params.rate = rate_bytes_ps;
+ params.burst = burst;
+ params.bytes = true;
+ return mlxsw_sp_policer_add(mlxsw_sp,
+ MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+ &params, extack, p_policer_index);
+}
+
+static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_policer_del(mlxsw_sp, MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+ policer_index);
+}
+
+static int mlxsw_sp1_act_sampler_add(void *priv, u16 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress, int *p_span_id,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Sampling action is not supported on Spectrum-1");
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp1_act_sampler_del(void *priv, u16 local_port, int span_id,
+ bool ingress)
+{
+ WARN_ON_ONCE(1);
+}
+
+const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
+ .policer_add = mlxsw_sp_act_policer_add,
+ .policer_del = mlxsw_sp_act_policer_del,
+ .sampler_add = mlxsw_sp1_act_sampler_add,
+ .sampler_del = mlxsw_sp1_act_sampler_del,
+};
+
+static int mlxsw_sp2_act_sampler_add(void *priv, u16 local_port,
+ struct psample_group *psample_group,
+ u32 rate, u32 trunc_size, bool truncate,
+ bool ingress, int *p_span_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+ };
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_sample_params params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ params.psample_group = psample_group;
+ params.trunc_size = trunc_size;
+ params.rate = rate;
+ params.truncate = truncate;
+ err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, &params,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get SPAN agent");
+ goto err_span_agent_get;
+ }
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+err_span_agent_get:
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+ return err;
+}
+
+static void mlxsw_sp2_act_sampler_del(void *priv, u16 local_port, int span_id,
+ bool ingress)
+{
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+}
+
+const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
+ .policer_add = mlxsw_sp_act_policer_add,
+ .policer_del = mlxsw_sp_act_policer_del,
+ .sampler_add = mlxsw_sp2_act_sampler_add,
+ .sampler_del = mlxsw_sp2_act_sampler_del,
+ .dummy_first_set = true,
+};
+
+int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_ACTIONS_PER_SET),
+ mlxsw_sp->afa_ops, mlxsw_sp);
+ return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
+}
+
+void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_afa_destroy(mlxsw_sp->afa);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
new file mode 100644
index 000000000..fe436d816
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
+#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
+
+#include "spectrum.h"
+
+int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
new file mode 100644
index 000000000..173808c09
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "spectrum.h"
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+ MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+ MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
+ MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+ MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+ MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+ MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+ MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+ MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
+ char *block)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
+static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+ .blocks = mlxsw_sp1_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
+ .encode_block = mlxsw_sp1_afk_encode_block,
+ .clear_block = mlxsw_sp1_afk_clear_block,
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+};
+
+static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36
+
+/* A block in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32|
+ * +-----------------------------------------------------------------------------------------------+
+ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ */
+MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
+
+/* The key / mask block layout in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | block11_high |
+ * +-----------------------------------------------------------------------------------------------+
+ * | block11_low | block10_high |
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * ...
+ */
+
+struct mlxsw_sp2_afk_block_layout {
+ unsigned short offset;
+ struct mlxsw_item item;
+};
+
+#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \
+ { \
+ .offset = _offset, \
+ { \
+ .shift = _shift, \
+ .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \
+ .name = #_block, \
+ } \
+ } \
+
+static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
+};
+
+static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
+ u64 block_value)
+{
+ const struct mlxsw_sp2_afk_block_layout *block_layout;
+
+ if (WARN_ON(block_index < 0 ||
+ block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout)))
+ return;
+
+ block_layout = &mlxsw_sp2_afk_blocks_layout[block_index];
+ __mlxsw_item_set64(output + block_layout->offset,
+ &block_layout->item, 0, block_value);
+}
+
+static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
+ char *block)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+
+ __mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
+}
+
+static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
+{
+ __mlxsw_sp2_afk_block_value_set(output, block_index, 0);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
+ .blocks = mlxsw_sp2_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+const struct mlxsw_afk_ops mlxsw_sp4_afk_ops = {
+ .blocks = mlxsw_sp4_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp4_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
new file mode 100644
index 000000000..3b9ba8fa2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -0,0 +1,1867 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <trace/events/mlxsw.h>
+
+#include "reg.h"
+#include "core.h"
+#include "resources.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_keys.h"
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->priv_size;
+}
+
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
+
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ u64 max_tcam_regions;
+ u64 max_regions;
+ u64 max_groups;
+ int err;
+
+ mutex_init(&tcam->lock);
+ tcam->vregion_rehash_intrvl =
+ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
+ INIT_LIST_HEAD(&tcam->vregion_list);
+
+ max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_TCAM_REGIONS);
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+
+ /* Use 1:1 mapping between ACL region and TCAM region */
+ if (max_tcam_regions < max_regions)
+ max_regions = max_tcam_regions;
+
+ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
+ if (!tcam->used_regions)
+ return -ENOMEM;
+ tcam->max_regions = max_regions;
+
+ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
+ if (!tcam->used_groups) {
+ err = -ENOMEM;
+ goto err_alloc_used_groups;
+ }
+ tcam->max_groups = max_groups;
+ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
+ return 0;
+
+err_tcam_init:
+ bitmap_free(tcam->used_groups);
+err_alloc_used_groups:
+ bitmap_free(tcam->used_regions);
+ return err;
+}
+
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ mutex_destroy(&tcam->lock);
+ ops->fini(mlxsw_sp, tcam->priv);
+ bitmap_free(tcam->used_groups);
+ bitmap_free(tcam->used_regions);
+}
+
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority)
+{
+ u64 max_priority;
+
+ if (!fillup_priority) {
+ *priority = 0;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+ return -EIO;
+
+ /* Priority range is 1..cap_kvd_size-1. */
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
+ if (rulei->priority >= max_priority)
+ return -EINVAL;
+
+ /* Unlike in TC, in HW, higher number means higher priority. */
+ *priority = max_priority - rulei->priority;
+ return 0;
+}
+
+static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
+ u16 *p_id)
+{
+ u16 id;
+
+ id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
+ if (id < tcam->max_regions) {
+ __set_bit(id, tcam->used_regions);
+ *p_id = id;
+ return 0;
+ }
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
+ u16 id)
+{
+ __clear_bit(id, tcam->used_regions);
+}
+
+static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
+ u16 *p_id)
+{
+ u16 id;
+
+ id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
+ if (id < tcam->max_groups) {
+ __set_bit(id, tcam->used_groups);
+ *p_id = id;
+ return 0;
+ }
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
+ u16 id)
+{
+ __clear_bit(id, tcam->used_groups);
+}
+
+struct mlxsw_sp_acl_tcam_pattern {
+ const enum mlxsw_afk_element *elements;
+ unsigned int elements_count;
+};
+
+struct mlxsw_sp_acl_tcam_group {
+ struct mlxsw_sp_acl_tcam *tcam;
+ u16 id;
+ struct mutex lock; /* guards region list updates */
+ struct list_head region_list;
+ unsigned int region_count;
+};
+
+struct mlxsw_sp_acl_tcam_vgroup {
+ struct mlxsw_sp_acl_tcam_group group;
+ struct list_head vregion_list;
+ struct rhashtable vchunk_ht;
+ const struct mlxsw_sp_acl_tcam_pattern *patterns;
+ unsigned int patterns_count;
+ bool tmplt_elusage_set;
+ struct mlxsw_afk_element_usage tmplt_elusage;
+ bool vregion_rehash_enabled;
+ unsigned int *p_min_prio;
+ unsigned int *p_max_prio;
+};
+
+struct mlxsw_sp_acl_tcam_rehash_ctx {
+ void *hints_priv;
+ bool this_is_rollback;
+ struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
+ * currently migrated.
+ */
+ struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
+ * migration from in
+ * a vchunk being
+ * currently migrated.
+ */
+ struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
+ * migration at
+ * a vchunk being
+ * currently migrated.
+ */
+};
+
+struct mlxsw_sp_acl_tcam_vregion {
+ struct mutex lock; /* Protects consistency of region, region2 pointers
+ * and vchunk_list.
+ */
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head tlist; /* Member of a TCAM */
+ struct list_head vchunk_list; /* List of vchunks under this vregion */
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
+ struct {
+ struct delayed_work dw;
+ struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
+ } rehash;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int ref_count;
+};
+
+struct mlxsw_sp_acl_tcam_vchunk;
+
+struct mlxsw_sp_acl_tcam_chunk {
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct mlxsw_sp_acl_tcam_region *region;
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_tcam_vchunk {
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
+ struct list_head list; /* Member of a TCAM vregion */
+ struct rhash_head ht_node; /* Member of a chunk HT */
+ struct list_head ventry_list;
+ unsigned int priority; /* Priority within the vregion and group */
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ unsigned int ref_count;
+};
+
+struct mlxsw_sp_acl_tcam_entry {
+ struct mlxsw_sp_acl_tcam_ventry *ventry;
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_tcam_ventry {
+ struct mlxsw_sp_acl_tcam_entry *entry;
+ struct list_head list; /* Member of a TCAM vchunk */
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct mlxsw_sp_acl_rule_info *rulei;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
+ .key_len = sizeof(unsigned int),
+ .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
+ .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group)
+{
+ struct mlxsw_sp_acl_tcam_region *region;
+ char pagt_pl[MLXSW_REG_PAGT_LEN];
+ int acl_index = 0;
+
+ mlxsw_reg_pagt_pack(pagt_pl, group->id);
+ list_for_each_entry(region, &group->region_list, list) {
+ bool multi = false;
+
+ /* Check if the next entry in the list has the same vregion. */
+ if (region->list.next != &group->region_list &&
+ list_next_entry(region, list)->vregion == region->vregion)
+ multi = true;
+ mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
+ region->id, multi);
+ }
+ mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_sp_acl_tcam_group *group)
+{
+ int err;
+
+ group->tcam = tcam;
+ INIT_LIST_HEAD(&group->region_list);
+
+ err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
+ if (err)
+ return err;
+
+ mutex_init(&group->lock);
+
+ return 0;
+}
+
+static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
+{
+ struct mlxsw_sp_acl_tcam *tcam = group->tcam;
+
+ mutex_destroy(&group->lock);
+ mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+ WARN_ON(!list_empty(&group->region_list));
+}
+
+static int
+mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ const struct mlxsw_sp_acl_tcam_pattern *patterns,
+ unsigned int patterns_count,
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ bool vregion_rehash_enabled,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ int err;
+
+ vgroup->patterns = patterns;
+ vgroup->patterns_count = patterns_count;
+ vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
+ vgroup->p_min_prio = p_min_prio;
+ vgroup->p_max_prio = p_max_prio;
+
+ if (tmplt_elusage) {
+ vgroup->tmplt_elusage_set = true;
+ memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
+ sizeof(vgroup->tmplt_elusage));
+ }
+ INIT_LIST_HEAD(&vgroup->vregion_list);
+
+ err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&vgroup->vchunk_ht,
+ &mlxsw_sp_acl_tcam_vchunk_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ return 0;
+
+err_rhashtable_init:
+ mlxsw_sp_acl_tcam_group_del(&vgroup->group);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
+{
+ rhashtable_destroy(&vgroup->vchunk_ht);
+ mlxsw_sp_acl_tcam_group_del(&vgroup->group);
+ WARN_ON(!list_empty(&vgroup->vregion_list));
+}
+
+static int
+mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ char ppbt_pl[MLXSW_REG_PPBT_LEN];
+
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
+ group->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ char ppbt_pl[MLXSW_REG_PPBT_LEN];
+
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
+ group->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
+}
+
+static u16
+mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
+{
+ return group->id;
+}
+
+static unsigned int
+mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+ /* As a priority of a vregion, return priority of the first vchunk */
+ vchunk = list_first_entry(&vregion->vchunk_list,
+ typeof(*vchunk), list);
+ return vchunk->priority;
+}
+
+static unsigned int
+mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+ vchunk = list_last_entry(&vregion->vchunk_list,
+ typeof(*vchunk), list);
+ return vchunk->priority;
+}
+
+static void
+mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (list_empty(&vgroup->vregion_list))
+ return;
+ vregion = list_first_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ vregion = list_last_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
+}
+
+static int
+mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int priority,
+ struct mlxsw_sp_acl_tcam_region *next_region)
+{
+ struct mlxsw_sp_acl_tcam_region *region2;
+ struct list_head *pos;
+ int err;
+
+ mutex_lock(&group->lock);
+ if (group->region_count == group->tcam->max_group_size) {
+ err = -ENOBUFS;
+ goto err_region_count_check;
+ }
+
+ if (next_region) {
+ /* If the next region is defined, place the new one
+ * before it. The next one is a sibling.
+ */
+ pos = &next_region->list;
+ } else {
+ /* Position the region inside the list according to priority */
+ list_for_each(pos, &group->region_list) {
+ region2 = list_entry(pos, typeof(*region2), list);
+ if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
+ priority)
+ break;
+ }
+ }
+ list_add_tail(&region->list, pos);
+ region->group = group;
+
+ err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ if (err)
+ goto err_group_update;
+
+ group->region_count++;
+ mutex_unlock(&group->lock);
+ return 0;
+
+err_group_update:
+ list_del(&region->list);
+err_region_count_check:
+ mutex_unlock(&group->lock);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_tcam_group *group = region->group;
+
+ mutex_lock(&group->lock);
+ list_del(&region->list);
+ group->region_count--;
+ mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ mutex_unlock(&group->lock);
+}
+
+static int
+mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ unsigned int priority)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion2;
+ struct list_head *pos;
+ int err;
+
+ /* Position the vregion inside the list according to priority */
+ list_for_each(pos, &vgroup->vregion_list) {
+ vregion2 = list_entry(pos, typeof(*vregion2), list);
+ if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
+ break;
+ }
+ list_add_tail(&vregion->list, pos);
+
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
+ vregion->region,
+ priority, NULL);
+ if (err)
+ goto err_region_attach;
+
+ return 0;
+
+err_region_attach:
+ list_del(&vregion->list);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ list_del(&vregion->list);
+ if (vregion->region2)
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
+ vregion->region2);
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
+}
+
+static struct mlxsw_sp_acl_tcam_vregion *
+mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage,
+ bool *p_need_split)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
+ struct list_head *pos;
+ bool issubset;
+
+ list_for_each(pos, &vgroup->vregion_list) {
+ vregion = list_entry(pos, typeof(*vregion), list);
+
+ /* First, check if the requested priority does not rather belong
+ * under some of the next vregions.
+ */
+ if (pos->next != &vgroup->vregion_list) { /* not last */
+ vregion2 = list_entry(pos->next, typeof(*vregion2),
+ list);
+ if (priority >=
+ mlxsw_sp_acl_tcam_vregion_prio(vregion2))
+ continue;
+ }
+
+ issubset = mlxsw_afk_key_info_subset(vregion->key_info,
+ elusage);
+
+ /* If requested element usage would not fit and the priority
+ * is lower than the currently inspected vregion we cannot
+ * use this region, so return NULL to indicate new vregion has
+ * to be created.
+ */
+ if (!issubset &&
+ priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
+ return NULL;
+
+ /* If requested element usage would not fit and the priority
+ * is higher than the currently inspected vregion we cannot
+ * use this vregion. There is still some hope that the next
+ * vregion would be the fit. So let it be processed and
+ * eventually break at the check right above this.
+ */
+ if (!issubset &&
+ priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
+ continue;
+
+ /* Indicate if the vregion needs to be split in order to add
+ * the requested priority. Split is needed when requested
+ * element usage won't fit into the found vregion.
+ */
+ *p_need_split = !issubset;
+ return vregion;
+ }
+ return NULL; /* New vregion has to be created. */
+}
+
+static void
+mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ struct mlxsw_afk_element_usage *elusage,
+ struct mlxsw_afk_element_usage *out)
+{
+ const struct mlxsw_sp_acl_tcam_pattern *pattern;
+ int i;
+
+ /* In case the template is set, we don't have to look up the pattern
+ * and just use the template.
+ */
+ if (vgroup->tmplt_elusage_set) {
+ memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
+ WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
+ return;
+ }
+
+ for (i = 0; i < vgroup->patterns_count; i++) {
+ pattern = &vgroup->patterns[i];
+ mlxsw_afk_element_usage_fill(out, pattern->elements,
+ pattern->elements_count);
+ if (mlxsw_afk_element_usage_subset(elusage, out))
+ return;
+ }
+ memcpy(out, elusage, sizeof(*out));
+}
+
+static int
+mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ struct mlxsw_afk_key_info *key_info = region->key_info;
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+ unsigned int encodings_count;
+ int i;
+ int err;
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+ region->key_type,
+ MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ region->id, region->tcam_region_info);
+ encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ for (i = 0; i < encodings_count; i++) {
+ u16 encoding;
+
+ encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
+ mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
+ }
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
+ region->key_type, 0, region->id,
+ region->tcam_region_info);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ char pacl_pl[MLXSW_REG_PACL_LEN];
+
+ mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ char pacl_pl[MLXSW_REG_PACL_LEN];
+
+ mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
+ region->tcam_region_info);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
+}
+
+static struct mlxsw_sp_acl_tcam_region *
+mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ void *hints_priv)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+ int err;
+
+ region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
+ if (!region)
+ return ERR_PTR(-ENOMEM);
+ region->mlxsw_sp = mlxsw_sp;
+ region->vregion = vregion;
+ region->key_info = vregion->key_info;
+
+ err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
+ if (err)
+ goto err_region_id_get;
+
+ err = ops->region_associate(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_associate;
+
+ region->key_type = ops->key_type;
+ err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_alloc;
+
+ err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_enable;
+
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
+ region, hints_priv);
+ if (err)
+ goto err_tcam_region_init;
+
+ return region;
+
+err_tcam_region_init:
+ mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+err_tcam_region_enable:
+ mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+err_tcam_region_alloc:
+err_tcam_region_associate:
+ mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
+err_region_id_get:
+ kfree(region);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->region_fini(mlxsw_sp, region->priv);
+ mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
+ region->id);
+ kfree(region);
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
+
+ if (!interval)
+ return;
+ mlxsw_core_schedule_dw(&vregion->rehash.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ int *credits);
+
+static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion =
+ container_of(work, struct mlxsw_sp_acl_tcam_vregion,
+ rehash.dw.work);
+ int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+
+ mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ if (credits < 0)
+ /* Rehash gone out of credits so it was interrupted.
+ * Schedule the work as soon as possible to continue.
+ */
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+}
+
+static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
+
+ /* If a rule was added or deleted from vchunk which is currently
+ * under rehash migration, we have to reset the ventry pointers
+ * to make sure all rules are properly migrated.
+ */
+ if (vregion->rehash.ctx.current_vchunk == vchunk) {
+ vregion->rehash.ctx.start_ventry = NULL;
+ vregion->rehash.ctx.stop_ventry = NULL;
+ }
+}
+
+static void
+mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ /* If a chunk was added or deleted from vregion we have to reset
+ * the current chunk pointer to make sure all chunks
+ * are properly migrated.
+ */
+ vregion->rehash.ctx.current_vchunk = NULL;
+}
+
+static struct mlxsw_sp_acl_tcam_vregion *
+mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ int err;
+
+ vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
+ if (!vregion)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&vregion->vchunk_list);
+ mutex_init(&vregion->lock);
+ vregion->tcam = tcam;
+ vregion->mlxsw_sp = mlxsw_sp;
+ vregion->vgroup = vgroup;
+ vregion->ref_count = 1;
+
+ vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
+ if (IS_ERR(vregion->key_info)) {
+ err = PTR_ERR(vregion->key_info);
+ goto err_key_info_get;
+ }
+
+ vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
+ vregion, NULL);
+ if (IS_ERR(vregion->region)) {
+ err = PTR_ERR(vregion->region);
+ goto err_region_create;
+ }
+
+ err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
+ priority);
+ if (err)
+ goto err_vgroup_vregion_attach;
+
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ /* Create the delayed work for vregion periodic rehash */
+ INIT_DELAYED_WORK(&vregion->rehash.dw,
+ mlxsw_sp_acl_tcam_vregion_rehash_work);
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+ mutex_lock(&tcam->lock);
+ list_add_tail(&vregion->tlist, &tcam->vregion_list);
+ mutex_unlock(&tcam->lock);
+ }
+
+ return vregion;
+
+err_vgroup_vregion_attach:
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
+err_region_create:
+ mlxsw_afk_key_info_put(vregion->key_info);
+err_key_info_get:
+ kfree(vregion);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
+ struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
+
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ mutex_lock(&tcam->lock);
+ list_del(&vregion->tlist);
+ mutex_unlock(&tcam->lock);
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
+ if (vregion->region2)
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
+ mlxsw_afk_key_info_put(vregion->key_info);
+ mutex_destroy(&vregion->lock);
+ kfree(vregion);
+}
+
+u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ u32 vregion_rehash_intrvl;
+
+ if (WARN_ON(!ops->region_rehash_hints_get))
+ return 0;
+ vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
+ return vregion_rehash_intrvl;
+}
+
+int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ u32 val)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
+ return -EINVAL;
+ if (WARN_ON(!ops->region_rehash_hints_get))
+ return -EOPNOTSUPP;
+ tcam->vregion_rehash_intrvl = val;
+ mutex_lock(&tcam->lock);
+ list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
+ if (val)
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mutex_unlock(&tcam->lock);
+ return 0;
+}
+
+static struct mlxsw_sp_acl_tcam_vregion *
+mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_afk_element_usage vregion_elusage;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ bool need_split;
+
+ vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
+ elusage, &need_split);
+ if (vregion) {
+ if (need_split) {
+ /* According to priority, new vchunk should belong to
+ * an existing vregion. However, this vchunk needs
+ * elements that vregion does not contain. We need
+ * to split the existing vregion into two and create
+ * a new vregion for the new vchunk in between.
+ * This is not supported now.
+ */
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ vregion->ref_count++;
+ return vregion;
+ }
+
+ mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
+ &vregion_elusage);
+
+ return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
+ &vregion_elusage);
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ if (--vregion->ref_count)
+ return;
+ mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
+}
+
+static struct mlxsw_sp_acl_tcam_chunk *
+mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+ chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
+ if (!chunk)
+ return ERR_PTR(-ENOMEM);
+ chunk->vchunk = vchunk;
+ chunk->region = region;
+
+ ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
+ return chunk;
+}
+
+static void
+mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->chunk_fini(chunk->priv);
+ kfree(chunk);
+}
+
+static struct mlxsw_sp_acl_tcam_vchunk *
+mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct list_head *pos;
+ int err;
+
+ if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
+ return ERR_PTR(-EINVAL);
+
+ vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
+ if (!vchunk)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&vchunk->ventry_list);
+ vchunk->priority = priority;
+ vchunk->vgroup = vgroup;
+ vchunk->ref_count = 1;
+
+ vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
+ priority, elusage);
+ if (IS_ERR(vregion)) {
+ err = PTR_ERR(vregion);
+ goto err_vregion_get;
+ }
+
+ vchunk->vregion = vregion;
+
+ err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ mutex_lock(&vregion->lock);
+ vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
+ vchunk->vregion->region);
+ if (IS_ERR(vchunk->chunk)) {
+ mutex_unlock(&vregion->lock);
+ err = PTR_ERR(vchunk->chunk);
+ goto err_chunk_create;
+ }
+
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
+
+ /* Position the vchunk inside the list according to priority */
+ list_for_each(pos, &vregion->vchunk_list) {
+ vchunk2 = list_entry(pos, typeof(*vchunk2), list);
+ if (vchunk2->priority > priority)
+ break;
+ }
+ list_add_tail(&vchunk->list, pos);
+ mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
+
+ return vchunk;
+
+err_chunk_create:
+ rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
+err_vregion_get:
+ kfree(vchunk);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
+
+ mutex_lock(&vregion->lock);
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
+ list_del(&vchunk->list);
+ if (vchunk->chunk2)
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
+ mutex_unlock(&vregion->lock);
+ rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
+ mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
+ kfree(vchunk);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
+}
+
+static struct mlxsw_sp_acl_tcam_vchunk *
+mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+
+ vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
+ if (vchunk) {
+ if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
+ elusage)))
+ return ERR_PTR(-EINVAL);
+ vchunk->ref_count++;
+ return vchunk;
+ }
+ return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
+ priority, elusage);
+}
+
+static void
+mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+{
+ if (--vchunk->ref_count)
+ return;
+ mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
+}
+
+static struct mlxsw_sp_acl_tcam_entry *
+mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_entry *entry;
+ int err;
+
+ entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+ entry->ventry = ventry;
+ entry->chunk = chunk;
+
+ err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
+ entry->priv, ventry->rulei);
+ if (err)
+ goto err_entry_add;
+
+ return entry;
+
+err_entry_add:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
+ entry->chunk->priv, entry->priv);
+ kfree(entry);
+}
+
+static int
+mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->entry_action_replace(mlxsw_sp, region->priv,
+ entry->priv, rulei);
+}
+
+static int
+mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ bool *activity)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
+ entry->priv, activity);
+}
+
+static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ int err;
+
+ vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
+ &rulei->values.elusage);
+ if (IS_ERR(vchunk))
+ return PTR_ERR(vchunk);
+
+ ventry->vchunk = vchunk;
+ ventry->rulei = rulei;
+ vregion = vchunk->vregion;
+
+ mutex_lock(&vregion->lock);
+ ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
+ vchunk->chunk);
+ if (IS_ERR(ventry->entry)) {
+ mutex_unlock(&vregion->lock);
+ err = PTR_ERR(ventry->entry);
+ goto err_entry_create;
+ }
+
+ list_add_tail(&ventry->list, &vchunk->ventry_list);
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
+ mutex_unlock(&vregion->lock);
+
+ return 0;
+
+err_entry_create:
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
+ return err;
+}
+
+static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
+
+ mutex_lock(&vregion->lock);
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
+ list_del(&ventry->list);
+ mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
+ mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
+}
+
+static int
+mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
+
+ return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
+ vchunk->vregion->region,
+ ventry->entry, rulei);
+}
+
+static int
+mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ bool *activity)
+{
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
+ ventry->entry, activity);
+}
+
+static int
+mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_tcam_chunk *chunk,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_entry *new_entry;
+
+ /* First check if the entry is not already where we want it to be. */
+ if (ventry->entry->chunk == chunk)
+ return 0;
+
+ if (--(*credits) < 0)
+ return 0;
+
+ new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
+ if (IS_ERR(new_entry))
+ return PTR_ERR(new_entry);
+ mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
+ ventry->entry = new_entry;
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_region *region,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+
+ new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
+ if (IS_ERR(new_chunk))
+ return PTR_ERR(new_chunk);
+ vchunk->chunk2 = vchunk->chunk;
+ vchunk->chunk = new_chunk;
+ ctx->current_vchunk = vchunk;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ vchunk->chunk2 = NULL;
+ ctx->current_vchunk = NULL;
+}
+
+static int
+mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_region *region,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_ventry *ventry;
+ int err;
+
+ if (vchunk->chunk->region != region) {
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
+ region, ctx);
+ if (err)
+ return err;
+ } else if (!vchunk->chunk2) {
+ /* The chunk is already as it should be, nothing to do. */
+ return 0;
+ }
+
+ /* If the migration got interrupted, we have the ventry to start from
+ * stored in context.
+ */
+ if (ctx->start_ventry)
+ ventry = ctx->start_ventry;
+ else
+ ventry = list_first_entry(&vchunk->ventry_list,
+ typeof(*ventry), list);
+
+ list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
+ /* During rollback, once we reach the ventry that failed
+ * to migrate, we are done.
+ */
+ if (ventry == ctx->stop_ventry)
+ break;
+
+ err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
+ vchunk->chunk, credits);
+ if (err) {
+ if (ctx->this_is_rollback) {
+ /* Save the ventry which we ended with and try
+ * to continue later on.
+ */
+ ctx->start_ventry = ventry;
+ return err;
+ }
+ /* Swap the chunk and chunk2 pointers so the follow-up
+ * rollback call will see the original chunk pointer
+ * in vchunk->chunk.
+ */
+ swap(vchunk->chunk, vchunk->chunk2);
+ /* The rollback has to be done from beginning of the
+ * chunk, that is why we have to null the start_ventry.
+ * However, we know where to stop the rollback,
+ * at the current ventry.
+ */
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = ventry;
+ return err;
+ } else if (*credits < 0) {
+ /* We are out of credits, the rest of the ventries
+ * will be migrated later. Save the ventry
+ * which we ended with.
+ */
+ ctx->start_ventry = ventry;
+ return 0;
+ }
+ }
+
+ mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ int err;
+
+ /* If the migration got interrupted, we have the vchunk
+ * we are working on stored in context.
+ */
+ if (ctx->current_vchunk)
+ vchunk = ctx->current_vchunk;
+ else
+ vchunk = list_first_entry(&vregion->vchunk_list,
+ typeof(*vchunk), list);
+
+ list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
+ vregion->region,
+ ctx, credits);
+ if (err || *credits < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
+ int *credits)
+{
+ int err, err2;
+
+ trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
+ /* In case migration was not successful, we need to swap
+ * so the original region pointer is assigned again
+ * to vregion->region.
+ */
+ swap(vregion->region, vregion->region2);
+ ctx->current_vchunk = NULL;
+ ctx->this_is_rollback = true;
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err2) {
+ trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
+ vregion);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
+ /* Let the rollback to be continued later on. */
+ }
+ }
+ mutex_unlock(&vregion->lock);
+ trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
+ return err;
+}
+
+static bool
+mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ return ctx->hints_priv;
+}
+
+static int
+mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ struct mlxsw_sp_acl_tcam_region *new_region;
+ void *hints_priv;
+ int err;
+
+ trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
+
+ hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
+ if (IS_ERR(hints_priv))
+ return PTR_ERR(hints_priv);
+
+ new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
+ vregion, hints_priv);
+ if (IS_ERR(new_region)) {
+ err = PTR_ERR(new_region);
+ goto err_region_create;
+ }
+
+ /* vregion->region contains the pointer to the new region
+ * we are going to migrate to.
+ */
+ vregion->region2 = vregion->region;
+ vregion->region = new_region;
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
+ vregion->region2->group,
+ new_region, priority,
+ vregion->region2);
+ if (err)
+ goto err_group_region_attach;
+
+ ctx->hints_priv = hints_priv;
+ ctx->this_is_rollback = false;
+
+ return 0;
+
+err_group_region_attach:
+ vregion->region = vregion->region2;
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
+err_region_create:
+ ops->region_rehash_hints_put(hints_priv);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
+ ops->region_rehash_hints_put(ctx->hints_priv);
+ ctx->hints_priv = NULL;
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+ int err;
+
+ /* Check if the previous rehash work was interrupted
+ * which means we have to continue it now.
+ * If not, start a new rehash.
+ */
+ if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
+ err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
+ vregion, ctx);
+ if (err) {
+ if (err != -EAGAIN)
+ dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
+ return;
+ }
+ }
+
+ err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ }
+
+ if (*credits >= 0)
+ mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
+ MLXSW_AFK_ELEMENT_IP_TTL_,
+ MLXSW_AFK_ELEMENT_IP_ECN,
+ MLXSW_AFK_ELEMENT_IP_DSCP,
+};
+
+static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+};
+
+static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
+ {
+ .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
+ .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
+ },
+ {
+ .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
+ .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
+ },
+};
+
+#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
+
+struct mlxsw_sp_acl_tcam_flower_ruleset {
+ struct mlxsw_sp_acl_tcam_vgroup vgroup;
+};
+
+struct mlxsw_sp_acl_tcam_flower_rule {
+ struct mlxsw_sp_acl_tcam_ventry ventry;
+};
+
+static int
+mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage, true,
+ p_min_prio, p_max_prio);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
+ mlxsw_sp_port, ingress);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
+ mlxsw_sp_port, ingress);
+}
+
+static u16
+mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
+ &rule->ventry, rulei);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
+{
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
+ activity);
+}
+
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
+ .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
+ .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
+ .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
+ .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
+ .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
+ .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+ .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
+ .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+ .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
+ .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
+};
+
+struct mlxsw_sp_acl_tcam_mr_ruleset {
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct mlxsw_sp_acl_tcam_vgroup vgroup;
+};
+
+struct mlxsw_sp_acl_tcam_mr_rule {
+ struct mlxsw_sp_acl_tcam_ventry ventry;
+};
+
+static int
+mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage, false,
+ p_min_prio, p_max_prio);
+ if (err)
+ return err;
+
+ /* For most of the TCAM clients it would make sense to take a tcam chunk
+ * only when the first rule is written. This is not the case for
+ * multicast router as it is required to bind the multicast router to a
+ * specific ACL Group ID which must exist in HW before multicast router
+ * is initialized.
+ */
+ ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
+ &ruleset->vgroup, 1,
+ tmplt_elusage);
+ if (IS_ERR(ruleset->vchunk)) {
+ err = PTR_ERR(ruleset->vchunk);
+ goto err_chunk_get;
+ }
+
+ return 0;
+
+err_chunk_get:
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ /* Binding is done when initializing multicast router */
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+}
+
+static u16
+mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
+ &rule->ventry, rulei);
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
+ rulei);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ *activity = false;
+
+ return 0;
+}
+
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
+ .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
+ .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
+ .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
+ .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
+ .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
+ .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
+ .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
+ .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
+ .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
+ .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
+};
+
+static const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops_arr[] = {
+ [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
+ [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile)
+{
+ const struct mlxsw_sp_acl_profile_ops *ops;
+
+ if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
+ return NULL;
+ ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
+ if (WARN_ON(!ops))
+ return NULL;
+ return ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
new file mode 100644
index 000000000..edbbc89e7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
+#define _MLXSW_SPECTRUM_ACL_TCAM_H
+
+#include <linux/list.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+ unsigned long *used_regions; /* bit array */
+ unsigned int max_regions;
+ unsigned long *used_groups; /* bit array */
+ unsigned int max_groups;
+ unsigned int max_group_size;
+ struct mutex lock; /* guards vregion list */
+ struct list_head vregion_list;
+ u32 vregion_rehash_intrvl; /* ms */
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ u32 val);
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority);
+
+struct mlxsw_sp_acl_profile_ops {
+ size_t ruleset_priv_size;
+ int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
+ void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ u16 (*ruleset_group_id)(void *ruleset_priv);
+ size_t rule_priv_size;
+ int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile);
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+#define MLXSW_SP_ACL_TCAM_MASK_LEN \
+ (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
+
+struct mlxsw_sp_acl_tcam_group;
+struct mlxsw_sp_acl_tcam_vregion;
+
+struct mlxsw_sp_acl_tcam_region {
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam_group *group;
+ struct list_head list; /* Member of a TCAM group */
+ enum mlxsw_reg_ptar_key_type key_type;
+ u16 id; /* ACL ID and region ID - they are same */
+ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_ctcam_region {
+ struct parman *parman;
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp_acl_ctcam_chunk {
+ struct parman_prio parman_prio;
+};
+
+struct mlxsw_sp_acl_ctcam_entry {
+ struct parman_item parman_item;
+};
+
+struct mlxsw_sp_acl_ctcam_region_ops {
+ int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask);
+ void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority);
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority);
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+static inline unsigned int
+mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return centry->parman_item.index;
+}
+
+enum mlxsw_sp_acl_atcam_region_type {
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB,
+ __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX,
+};
+
+#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \
+ (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1)
+
+struct mlxsw_sp_acl_atcam {
+ struct mlxsw_sp_acl_erp_core *erp_core;
+};
+
+struct mlxsw_sp_acl_atcam_region {
+ struct rhashtable entries_ht; /* A-TCAM only */
+ struct list_head entries_list; /* A-TCAM only */
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ const struct mlxsw_sp_acl_atcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_atcam *atcam;
+ enum mlxsw_sp_acl_atcam_region_type type;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ void *priv;
+};
+
+struct mlxsw_sp_acl_atcam_entry_ht_key {
+ char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
+ * key.
+ */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp_acl_atcam_entry {
+ struct rhash_head ht_node;
+ struct list_head list; /* Member in entries_list */
+ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+ * minus delta bits.
+ */
+ struct {
+ u16 start;
+ u8 mask;
+ u8 value;
+ } delta_info;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
+};
+
+static inline struct mlxsw_sp_acl_atcam_region *
+mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion);
+}
+
+static inline struct mlxsw_sp_acl_atcam_entry *
+mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry);
+}
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id);
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ void *hints_priv,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority);
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk);
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void *
+mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv);
+
+struct mlxsw_sp_acl_erp_delta;
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+
+struct mlxsw_sp_acl_erp_mask;
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask);
+int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void *
+mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv);
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ void *hints_priv);
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+struct mlxsw_sp_acl_bf;
+
+struct mlxsw_sp_acl_bf_ops {
+ unsigned int (*index_get)(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+};
+
+int
+mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void
+mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+struct mlxsw_sp_acl_bf *
+mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks);
+void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
new file mode 100644
index 000000000..c9f1c79f3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -0,0 +1,1786 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/dcbnl.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "port.h"
+#include "reg.h"
+
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+ u8 freeze_mode:1,
+ freeze_size:1;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u16 pool_index;
+ struct mlxsw_cp_sb_occ occ;
+ u8 freeze_pool:1,
+ freeze_thresh:1;
+};
+
+#define MLXSW_SP_SB_INFI -1U
+#define MLXSW_SP_SB_REST -2U
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_mm {
+ u32 min_buff;
+ u32 max_buff;
+ u16 pool_index;
+};
+
+struct mlxsw_sp_sb_pool_des {
+ enum mlxsw_reg_sbxx_dir dir;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_POOL_ING 0
+#define MLXSW_SP_SB_POOL_EGR 4
+#define MLXSW_SP_SB_POOL_EGR_MC 8
+#define MLXSW_SP_SB_POOL_ING_CPU 9
+#define MLXSW_SP_SB_POOL_EGR_CPU 10
+
+static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
+ {MLXSW_REG_SBXX_DIR_INGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 15},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 4},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 4},
+};
+
+static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
+ {MLXSW_REG_SBXX_DIR_INGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 15},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 4},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 4},
+};
+
+#define MLXSW_SP_SB_ING_TC_COUNT 8
+#define MLXSW_SP_SB_EG_TC_COUNT 16
+
+struct mlxsw_sp_sb_port {
+ struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
+ struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
+ struct mlxsw_sp_sb_pm *pms;
+};
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr *prs;
+ struct mlxsw_sp_sb_port *ports;
+ u32 cell_size;
+ u32 max_headroom_cells;
+ u64 sb_size;
+};
+
+struct mlxsw_sp_sb_vals {
+ unsigned int pool_count;
+ const struct mlxsw_sp_sb_pool_des *pool_dess;
+ const struct mlxsw_sp_sb_pm *pms;
+ const struct mlxsw_sp_sb_pm *pms_cpu;
+ const struct mlxsw_sp_sb_pr *prs;
+ const struct mlxsw_sp_sb_mm *mms;
+ const struct mlxsw_sp_sb_cm *cms_ingress;
+ const struct mlxsw_sp_sb_cm *cms_egress;
+ const struct mlxsw_sp_sb_cm *cms_cpu;
+ unsigned int mms_count;
+ unsigned int cms_ingress_count;
+ unsigned int cms_egress_count;
+ unsigned int cms_cpu_count;
+};
+
+struct mlxsw_sp_sb_ops {
+ u32 (*int_buf_size_get)(int mtu, u32 speed);
+};
+
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
+{
+ return mlxsw_sp->sb->cell_size * cells;
+}
+
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
+{
+ return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
+}
+
+static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 size_cells)
+{
+ /* Ports with eight lanes use two headroom buffers between which the
+ * configured headroom size is split. Therefore, multiply the calculated
+ * headroom size by two.
+ */
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
+}
+
+static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
+ u16 pool_index)
+{
+ return &mlxsw_sp->sb->prs[pool_index];
+}
+
+static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
+{
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
+ else
+ return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
+}
+
+static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
+
+ WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ return &sb_port->ing_cms[pg_buff];
+ else
+ return &sb_port->eg_cms[pg_buff];
+}
+
+static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, u16 pool_index)
+{
+ return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
+}
+
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+ enum mlxsw_reg_sbpr_mode mode,
+ u32 size, bool infi_size)
+{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ struct mlxsw_sp_sb_pr *pr;
+ int err;
+
+ mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
+ size, infi_size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+
+ if (infi_size)
+ size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+ pr->mode = mode;
+ pr->size = size;
+ return 0;
+}
+
+static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode,
+ u32 size, bool infi_size)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+
+ /* The FW default descriptor buffer configuration uses only pool 14 for
+ * descriptors.
+ */
+ mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size);
+ mlxsw_reg_sbpr_desc_set(sbpr_pl, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+}
+
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ u8 pg_buff, u32 min_buff, u32 max_buff,
+ bool infi_max, u16 pool_index)
+{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ struct mlxsw_sp_sb_cm *cm;
+ int err;
+
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
+ min_buff, max_buff, infi_max, des->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+
+ if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
+ if (infi_max)
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
+ mlxsw_sp->sb->sb_size);
+
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
+ des->dir);
+ cm->min_buff = min_buff;
+ cm->max_buff = max_buff;
+ cm->pool_index = pool_index;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ u16 pool_index, u32 min_buff, u32 max_buff)
+{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+ int err;
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
+ min_buff, max_buff);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
+ pm->min_buff = min_buff;
+ pm->max_buff = max_buff;
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ u16 pool_index, struct list_head *bulk_list)
+{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+
+ if (local_port == MLXSW_PORT_CPU_PORT &&
+ des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ return 0;
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
+ true, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list, NULL, 0);
+}
+
+static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbpm_pl, size_t sbpm_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
+
+ mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
+}
+
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ u16 pool_index, struct list_head *bulk_list)
+{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+
+ if (local_port == MLXSW_PORT_CPU_PORT &&
+ des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ return 0;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
+ false, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list,
+ mlxsw_sp_sb_pm_occ_query_cb,
+ (unsigned long) pm);
+}
+
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ switch (hdroom->mode) {
+ case MLXSW_SP_HDROOM_MODE_DCB:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
+ break;
+ case MLXSW_SP_HDROOM_MODE_TC:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
+ break;
+ }
+ }
+}
+
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+ int i;
+
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom->bufs.buf[i].lossy = true;
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
+ if (!hdroom->prios.prio[prio].lossy)
+ hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
+ }
+}
+
+static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
+{
+ return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
+
+static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
+{
+ if (lossy)
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
+ else
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+ thres);
+}
+
+static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ u16 delay_cells;
+
+ delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
+
+ /* In the worst case scenario the delay will be made up of packets that
+ * are all of size CELL_SIZE + 1, which means each packet will require
+ * almost twice its true size when buffered in the switch. We therefore
+ * multiply this value by the "cell factor", which is close to 2.
+ *
+ * Another MTU is added in case the transmitting host already started
+ * transmitting a maximum length frame when the PFC packet was received.
+ */
+ return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
+}
+
+static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(mtu, speed);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
+static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (hdroom->prios.prio[prio].buf_idx == buf)
+ return true;
+ }
+ return false;
+}
+
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 reserve_cells;
+ int i;
+
+ /* Internal buffer. */
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
+ mlxsw_sp_port->max_speed);
+ reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
+ hdroom->int_buf.reserve_cells = reserve_cells;
+
+ if (hdroom->int_buf.enable)
+ hdroom->int_buf.size_cells = reserve_cells;
+ else
+ hdroom->int_buf.size_cells = 0;
+
+ /* PG buffers. */
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
+ struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
+ u16 thres_cells;
+ u16 delay_cells;
+
+ if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
+ thres_cells = 0;
+ delay_cells = 0;
+ } else if (buf->lossy) {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = 0;
+ } else {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
+ }
+
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
+
+ buf->thres_cells = thres_cells;
+ if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
+ buf->size_cells = thres_cells + delay_cells;
+ } else {
+ /* Do not allow going below the minimum size, even if
+ * the user requested it.
+ */
+ buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
+ }
+ }
+}
+
+#define MLXSW_SP_PB_UNUSED 8
+
+static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ bool dirty;
+ int err;
+ int i;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
+
+ if (i == MLXSW_SP_PB_UNUSED)
+ continue;
+
+ mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
+ }
+
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
+ return 0;
+}
+
+static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ bool dirty;
+ int prio;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
+
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->prios = hdroom->prios;
+ return 0;
+}
+
+static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ bool dirty;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
+ return 0;
+}
+
+static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ u32 taken_headroom_cells = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
+ taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
+
+ taken_headroom_cells += hdroom->int_buf.reserve_cells;
+ return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
+}
+
+static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom tmp_hdroom;
+ int err;
+ int i;
+
+ /* Port buffers need to be configured in three steps. First, all buffers
+ * with non-zero size are configured. Then, prio-to-buffer map is
+ * updated, allowing traffic to flow to the now non-zero buffers.
+ * Finally, zero-sized buffers are configured, because now no traffic
+ * should be directed to them anymore. This way, in a non-congested
+ * system, no packet drops are introduced by the reconfiguration.
+ */
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ tmp_hdroom = orig_hdroom;
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ if (hdroom->bufs.buf[i].size_cells)
+ tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
+ }
+
+ if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
+ !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
+ return -ENOBUFS;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
+ if (err)
+ goto err_configure_priomap;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_buffers;
+
+ err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_int_buf;
+
+ *mlxsw_sp_port->hdroom = *hdroom;
+ return 0;
+
+err_configure_int_buf:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_buffers:
+ mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_priomap:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
+ return err;
+}
+
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom = {};
+ u32 size9;
+ int prio;
+
+ hdroom.mtu = mlxsw_sp_port->dev->mtu;
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = true;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ /* Buffer 9 is used for control traffic. */
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
+
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
+}
+
+static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_sb_port *sb_port)
+{
+ struct mlxsw_sp_sb_pm *pms;
+
+ pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
+ GFP_KERNEL);
+ if (!pms)
+ return -ENOMEM;
+ sb_port->pms = pms;
+ return 0;
+}
+
+static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
+{
+ kfree(sb_port->pms);
+}
+
+static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_sb_pr *prs;
+ int i;
+ int err;
+
+ mlxsw_sp->sb->ports = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_sb_port),
+ GFP_KERNEL);
+ if (!mlxsw_sp->sb->ports)
+ return -ENOMEM;
+
+ prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
+ GFP_KERNEL);
+ if (!prs) {
+ err = -ENOMEM;
+ goto err_alloc_prs;
+ }
+ mlxsw_sp->sb->prs = prs;
+
+ for (i = 0; i < max_ports; i++) {
+ err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
+ if (err)
+ goto err_sb_port_init;
+ }
+
+ return 0;
+
+err_sb_port_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
+ kfree(mlxsw_sp->sb->prs);
+err_alloc_prs:
+ kfree(mlxsw_sp->sb->ports);
+ return err;
+}
+
+static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ int i;
+
+ for (i = max_ports - 1; i >= 0; i--)
+ mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
+ kfree(mlxsw_sp->sb->prs);
+ kfree(mlxsw_sp->sb->ports);
+}
+
+#define MLXSW_SP_SB_PR(_mode, _size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
+ }
+
+#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
+ .freeze_mode = _freeze_mode, \
+ .freeze_size = _freeze_size, \
+ }
+
+#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
+
+/* Order according to mlxsw_sp1_sb_pool_dess */
+static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
+ true, true),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
+};
+
+#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
+
+/* Order according to mlxsw_sp2_sb_pool_dess */
+static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
+ true, true),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
+};
+
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sb_pr *prs,
+ const struct mlxsw_sp_sb_pool_des *pool_dess,
+ size_t prs_len)
+{
+ /* Round down, unlike mlxsw_sp_bytes_cells(). */
+ u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
+ u32 rest_cells[2] = {sb_cells, sb_cells};
+ int i;
+ int err;
+
+ /* Calculate how much space to give to the "REST" pools in either
+ * direction.
+ */
+ for (i = 0; i < prs_len; i++) {
+ enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
+ u32 size = prs[i].size;
+ u32 size_cells;
+
+ if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
+ continue;
+
+ size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
+ continue;
+
+ rest_cells[dir] -= size_cells;
+ }
+
+ for (i = 0; i < prs_len; i++) {
+ u32 size = prs[i].size;
+ u32 size_cells;
+
+ if (size == MLXSW_SP_SB_INFI) {
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ 0, true);
+ } else if (size == MLXSW_SP_SB_REST) {
+ size_cells = rest_cells[pool_dess[i].dir];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ size_cells, false);
+ } else {
+ size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ size_cells, false);
+ }
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+ MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = _pool, \
+ }
+
+#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_ING, \
+ }
+
+#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR, \
+ }
+
+#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
+ .freeze_pool = true, \
+ .freeze_thresh = true, \
+ }
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
+ MLXSW_SP_SB_CM_ING(10000, 8),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
+};
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
+ MLXSW_SP_SB_CM_ING(0, 7),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
+};
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR(1, 0xff),
+};
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR(1, 0xff),
+};
+
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+};
+
+static bool
+mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+
+ return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
+}
+
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
+{
+ const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
+ int i;
+ int err;
+
+ for (i = 0; i < cms_len; i++) {
+ const struct mlxsw_sp_sb_cm *cm;
+ u32 min_buff;
+ u32 max_buff;
+
+ if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue; /* PG number 8 does not exist, skip it */
+ cm = &cms[i];
+ if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
+ continue;
+
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
+ max_buff = cm->max_buff;
+ if (max_buff == MLXSW_SP_SB_INFI) {
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
+ min_buff, 0,
+ true, cm->pool_index);
+ } else {
+ if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
+ cm->pool_index))
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
+ max_buff);
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
+ min_buff, max_buff,
+ false, cm->pool_index);
+ }
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
+
+ err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp->sb_vals->cms_ingress,
+ mlxsw_sp->sb_vals->cms_ingress_count);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp->sb_vals->cms_egress,
+ mlxsw_sp->sb_vals->cms_egress_count);
+}
+
+static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp->sb_vals->cms_cpu,
+ mlxsw_sp->sb_vals->cms_cpu_count);
+}
+
+#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+/* Order according to mlxsw_sp1_sb_pool_dess */
+static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(10000, 90000),
+ MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+};
+
+/* Order according to mlxsw_sp2_sb_pool_dess */
+static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(10000, 90000),
+ MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+};
+
+/* Order according to mlxsw_sp*_sb_pool_dess */
+static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 90000),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+};
+
+static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ const struct mlxsw_sp_sb_pm *pms,
+ bool skip_ingress)
+{
+ int i, err;
+
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ const struct mlxsw_sp_sb_pm *pm = &pms[i];
+ const struct mlxsw_sp_sb_pool_des *des;
+ u32 max_buff;
+ u32 min_buff;
+
+ des = &mlxsw_sp->sb_vals->pool_dess[i];
+ if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue;
+
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
+ max_buff = pm->max_buff;
+ if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
+ max_buff);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
+ mlxsw_sp->sb_vals->pms, false);
+}
+
+static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
+ true);
+}
+
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR, \
+ }
+
+static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+};
+
+static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbmm_pl[MLXSW_REG_SBMM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
+ const struct mlxsw_sp_sb_pool_des *des;
+ const struct mlxsw_sp_sb_mm *mc;
+ u32 min_buff;
+
+ mc = &mlxsw_sp->sb_vals->mms[i];
+ des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
+ /* All pools used by sb_mm's are initialized using dynamic
+ * thresholds, therefore 'max_buff' isn't specified in cells.
+ */
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
+ mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
+ des->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
+ u16 *p_ingress_len, u16 *p_egress_len)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
+ if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
+ MLXSW_REG_SBXX_DIR_INGRESS)
+ (*p_ingress_len)++;
+ else
+ (*p_egress_len)++;
+ }
+
+ WARN(*p_egress_len == 0, "No egress pools\n");
+}
+
+const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
+ .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
+ .pool_dess = mlxsw_sp1_sb_pool_dess,
+ .pms = mlxsw_sp1_sb_pms,
+ .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
+ .prs = mlxsw_sp1_sb_prs,
+ .mms = mlxsw_sp_sb_mms,
+ .cms_ingress = mlxsw_sp1_sb_cms_ingress,
+ .cms_egress = mlxsw_sp1_sb_cms_egress,
+ .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
+ .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
+ .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
+ .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
+ .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
+};
+
+const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
+ .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
+ .pool_dess = mlxsw_sp2_sb_pool_dess,
+ .pms = mlxsw_sp2_sb_pms,
+ .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
+ .prs = mlxsw_sp2_sb_prs,
+ .mms = mlxsw_sp_sb_mms,
+ .cms_ingress = mlxsw_sp2_sb_cms_ingress,
+ .cms_egress = mlxsw_sp2_sb_cms_egress,
+ .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
+ .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
+ .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
+ .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
+ .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
+};
+
+static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
+{
+ return 3 * mtu + buffer_factor * speed / 1000;
+}
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
+ .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
+ .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
+ .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u32 max_headroom_size;
+ u16 ing_pool_count = 0;
+ u16 eg_pool_count = 0;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
+ return -EIO;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
+ return -EIO;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
+ return -EIO;
+
+ mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
+ if (!mlxsw_sp->sb)
+ return -ENOMEM;
+ mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+ mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER);
+ max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_HEADROOM_SIZE);
+ /* Round down, because this limit must not be overstepped. */
+ mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
+ mlxsw_sp->sb->cell_size;
+
+ err = mlxsw_sp_sb_ports_init(mlxsw_sp);
+ if (err)
+ goto err_sb_ports_init;
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
+ mlxsw_sp->sb_vals->pool_dess,
+ mlxsw_sp->sb_vals->pool_count);
+ if (err)
+ goto err_sb_prs_init;
+ err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
+ if (err)
+ goto err_sb_cpu_port_sb_cms_init;
+ err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
+ if (err)
+ goto err_sb_cpu_port_pms_init;
+ err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+ if (err)
+ goto err_sb_mms_init;
+ mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
+ err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ mlxsw_sp->sb->sb_size,
+ ing_pool_count,
+ eg_pool_count,
+ MLXSW_SP_SB_ING_TC_COUNT,
+ MLXSW_SP_SB_EG_TC_COUNT);
+ if (err)
+ goto err_devlink_sb_register;
+
+ return 0;
+
+err_devlink_sb_register:
+err_sb_mms_init:
+err_sb_cpu_port_pms_init:
+err_sb_cpu_port_sb_cms_init:
+err_sb_prs_init:
+ mlxsw_sp_sb_ports_fini(mlxsw_sp);
+err_sb_ports_init:
+ kfree(mlxsw_sp->sb);
+ return err;
+}
+
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ mlxsw_sp_sb_ports_fini(mlxsw_sp);
+ kfree(mlxsw_sp->sb);
+}
+
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
+ if (!mlxsw_sp_port->hdroom)
+ return -ENOMEM;
+ mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
+
+ err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
+ if (err)
+ goto err_headroom_init;
+ err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_sb_cms_init;
+ err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_sb_pms_init;
+ return 0;
+
+err_port_sb_pms_init:
+err_port_sb_cms_init:
+err_headroom_init:
+ kfree(mlxsw_sp_port->hdroom);
+ return err;
+}
+
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->hdroom);
+}
+
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ enum mlxsw_reg_sbxx_dir dir;
+ struct mlxsw_sp_sb_pr *pr;
+
+ dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+ pool_info->pool_type = (enum devlink_sb_pool_type) dir;
+ pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
+ pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
+ pool_info->cell_size = mlxsw_sp->sb->cell_size;
+ return 0;
+}
+
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ const struct mlxsw_sp_sb_pr *pr;
+ enum mlxsw_reg_sbpr_mode mode;
+
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
+ pr = &mlxsw_sp->sb_vals->prs[pool_index];
+
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
+ return -EINVAL;
+ }
+
+ if (pr->freeze_mode && pr->mode != mode) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
+ return -EINVAL;
+ }
+
+ if (pr->freeze_size && pr->size != size) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
+ return -EINVAL;
+ }
+
+ return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
+ pool_size, false);
+}
+
+#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
+
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+ u32 max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
+ return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
+}
+
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+ u32 threshold, u32 *p_max_buff,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
+ int val;
+
+ val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
+ return -EINVAL;
+ }
+ *p_max_buff = val;
+ } else {
+ *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
+ }
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool_index);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
+ pm->max_buff);
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u32 max_buff;
+ int err;
+
+ if (local_port == MLXSW_PORT_CPU_PORT) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
+ threshold, &max_buff, extack);
+ if (err)
+ return err;
+
+ return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
+ 0, max_buff);
+}
+
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
+ cm->max_buff);
+ *p_pool_index = cm->pool_index;
+ return 0;
+}
+
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ const struct mlxsw_sp_sb_cm *cm;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
+ u32 max_buff;
+ int err;
+
+ if (local_port == MLXSW_PORT_CPU_PORT) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
+ return -EINVAL;
+ }
+
+ if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
+ NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
+ return -EINVAL;
+ }
+
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
+ else
+ cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
+
+ if (cm->freeze_pool && cm->pool_index != pool_index) {
+ NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
+ return -EINVAL;
+ }
+
+ if (cm->freeze_thresh && cm->max_buff != threshold) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
+ threshold, &max_buff, extack);
+ if (err)
+ return err;
+
+ return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
+ 0, max_buff, false, pool_index);
+}
+
+#define MASKED_COUNT_MAX \
+ (MLXSW_REG_SBSR_REC_MAX_COUNT / \
+ (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
+
+struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
+ u8 masked_count;
+ u16 local_port_1;
+};
+
+static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbsr_pl, size_t sbsr_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count;
+ u16 local_port;
+ int rec_index = 0;
+ struct mlxsw_sp_sb_cm *cm;
+ int i;
+
+ memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
+
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ if (local_port == MLXSW_PORT_CPU_PORT) {
+ /* Ingress quotas are not supported for the CPU port */
+ masked_count++;
+ continue;
+ }
+ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+}
+
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, local_port_1, last_local_port;
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count, current_page = 0;
+ unsigned long cb_priv = 0;
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+ local_port = MLXSW_PORT_CPU_PORT;
+next_batch:
+ local_port_1 = local_port;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
+ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
+ if (local_port != MLXSW_PORT_CPU_PORT) {
+ /* Ingress quotas are not supported for the CPU port */
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+ local_port, 1);
+ }
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ cb_ctx.masked_count = masked_count;
+ cb_ctx.local_port_1 = local_port_1;
+ memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
+ cb_priv);
+ if (err)
+ goto out;
+ if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
+ local_port++;
+ goto next_batch;
+ }
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, last_local_port;
+ LIST_HEAD(bulk_list);
+ unsigned int masked_count;
+ u8 current_page = 0;
+ char *sbsr_pl;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+ local_port = MLXSW_PORT_CPU_PORT;
+next_batch:
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
+ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
+ if (local_port != MLXSW_PORT_CPU_PORT) {
+ /* Ingress quotas are not supported for the CPU port */
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+ local_port, 1);
+ }
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, NULL, 0);
+ if (err)
+ goto out;
+ if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
+ local_port++;
+ goto next_batch;
+ }
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool_index);
+
+ *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
+ *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
+ return 0;
+}
+
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
+ *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
new file mode 100644
index 000000000..ee59c7915
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+
+#include "spectrum_cnt.h"
+
+struct mlxsw_sp_counter_sub_pool {
+ u64 size;
+ unsigned int base_index;
+ enum mlxsw_res_id entry_size_res_id;
+ const char *resource_name; /* devlink resource name */
+ u64 resource_id; /* devlink resource id */
+ unsigned int entry_size;
+ unsigned int bank_count;
+ atomic_t active_entries_count;
+};
+
+struct mlxsw_sp_counter_pool {
+ u64 pool_size;
+ unsigned long *usage; /* Usage bitmap */
+ spinlock_t counter_pool_lock; /* Protects counter pool allocations */
+ atomic_t active_entries_count;
+ unsigned int sub_pools_count;
+ struct mlxsw_sp_counter_sub_pool sub_pools[];
+};
+
+static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+ [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+ .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+ .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW,
+ .resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW,
+ .bank_count = 6,
+ },
+ [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
+ .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
+ .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF,
+ .resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF,
+ .bank_count = 2,
+ }
+};
+
+static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
+{
+ const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
+
+ return atomic_read(&sub_pool->active_entries_count);
+}
+
+static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int base_index = 0;
+ enum mlxsw_res_id res_id;
+ int err;
+ int i;
+
+ for (i = 0; i < pool->sub_pools_count; i++) {
+ sub_pool = &pool->sub_pools[i];
+ res_id = sub_pool->entry_size_res_id;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id))
+ return -EIO;
+ sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
+ res_id);
+ err = devl_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
+ if (err)
+ goto err_resource_size_get;
+
+ devl_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
+
+ sub_pool->base_index = base_index;
+ base_index += sub_pool->size;
+ atomic_set(&sub_pool->active_entries_count, 0);
+ }
+ return 0;
+
+err_resource_size_get:
+ for (i--; i >= 0; i--) {
+ sub_pool = &pool->sub_pools[i];
+
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
+ }
+ return err;
+}
+
+static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
+
+ for (i = 0; i < pool->sub_pools_count; i++) {
+ sub_pool = &pool->sub_pools[i];
+
+ WARN_ON(atomic_read(&sub_pool->active_entries_count));
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
+ }
+}
+
+static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
+{
+ const struct mlxsw_sp_counter_pool *pool = priv;
+
+ return atomic_read(&pool->active_entries_count);
+}
+
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_counter_pool *pool;
+ int err;
+
+ pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
+ GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+ mlxsw_sp->counter_pool = pool;
+ pool->sub_pools_count = sub_pools_count;
+ memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
+ flex_array_size(pool, sub_pools, pool->sub_pools_count));
+ spin_lock_init(&pool->counter_pool_lock);
+ atomic_set(&pool->active_entries_count, 0);
+
+ err = devl_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
+ if (err)
+ goto err_pool_resource_size_get;
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
+
+ pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
+ if (!pool->usage) {
+ err = -ENOMEM;
+ goto err_usage_alloc;
+ }
+
+ err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp);
+ if (err)
+ goto err_sub_pools_init;
+
+ return 0;
+
+err_sub_pools_init:
+ bitmap_free(pool->usage);
+err_usage_alloc:
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
+err_pool_resource_size_get:
+ kfree(pool);
+ return err;
+}
+
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
+ WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
+ pool->pool_size);
+ WARN_ON(atomic_read(&pool->active_entries_count));
+ bitmap_free(pool->usage);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
+ kfree(pool);
+}
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int entry_index;
+ unsigned int stop_index;
+ int i, err;
+
+ sub_pool = &pool->sub_pools[sub_pool_id];
+ stop_index = sub_pool->base_index + sub_pool->size;
+ entry_index = sub_pool->base_index;
+
+ spin_lock(&pool->counter_pool_lock);
+ entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
+ if (entry_index == stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
+ /* The sub-pools can contain non-integer number of entries
+ * so we must check for overflow
+ */
+ if (entry_index + sub_pool->entry_size > stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __set_bit(entry_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
+
+ *p_counter_index = entry_index;
+ atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
+ atomic_add(sub_pool->entry_size, &pool->active_entries_count);
+ return 0;
+
+err_alloc:
+ spin_unlock(&pool->counter_pool_lock);
+ return err;
+}
+
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
+
+ if (WARN_ON(counter_index >= pool->pool_size))
+ return;
+ sub_pool = &pool->sub_pools[sub_pool_id];
+ spin_lock(&pool->counter_pool_lock);
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __clear_bit(counter_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
+ atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
+ atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
+}
+
+int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ static struct devlink_resource_size_params size_params;
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ const struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int total_bank_config;
+ u64 sub_pool_size;
+ u64 base_index;
+ u64 pool_size;
+ u64 bank_size;
+ int err;
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE))
+ return -EIO;
+
+ pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE);
+ bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE);
+
+ devlink_resource_size_params_init(&size_params, pool_size,
+ pool_size, bank_size,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ return err;
+
+ /* Allocation is based on bank count which should be
+ * specified for each sub pool statically.
+ */
+ total_bank_config = 0;
+ base_index = 0;
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ sub_pool = &mlxsw_sp_counter_sub_pools[i];
+ sub_pool_size = sub_pool->bank_count * bank_size;
+ /* The last bank can't be fully used */
+ if (base_index + sub_pool_size > pool_size)
+ sub_pool_size = pool_size - base_index;
+ base_index += sub_pool_size;
+
+ devlink_resource_size_params_init(&size_params, sub_pool_size,
+ sub_pool_size, bank_size,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
+ if (err)
+ return err;
+ total_bank_config += sub_pool->bank_count;
+ }
+
+ /* Check config is valid, no bank over subscription */
+ if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
new file mode 100644
index 000000000..15c8d4de8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_CNT_H
+#define _MLXSW_SPECTRUM_CNT_H
+
+#include "core.h"
+#include "spectrum.h"
+
+enum mlxsw_sp_counter_sub_pool_id {
+ MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+};
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index);
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index);
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
new file mode 100644
index 000000000..aff6d4f35
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/dcbnl.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
+ u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ bool has_ets_tc = false;
+ int i, tx_bw_sum = 0;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ has_ets_tc = true;
+ tx_bw_sum += ets->tc_tx_bw[i];
+ break;
+ default:
+ netdev_err(dev, "Only strict priority and ETS are supported\n");
+ return -EINVAL;
+ }
+
+ if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "Invalid TC\n");
+ return -EINVAL;
+ }
+ }
+
+ if (has_ets_tc && tx_bw_sum != 100) {
+ netdev_err(dev, "Total ETS bandwidth should equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int err;
+
+ hdroom = *mlxsw_sp_port->hdroom;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio];
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int i, err;
+
+ /* Egress configuration. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
+ 0, dwrr, weight);
+ if (err) {
+ netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
+ i);
+ goto err_port_ets_set;
+ }
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ ets->prio_tc[i]);
+ if (err) {
+ netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
+ ets->prio_tc[i]);
+ goto err_port_prio_tc_set;
+ }
+ }
+
+ /* Ingress configuration. */
+ err = mlxsw_sp_port_headroom_ets_set(mlxsw_sp_port, ets);
+ if (err)
+ goto err_port_headroom_set;
+
+ return 0;
+
+err_port_headroom_set:
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_prio_tc_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_ets_set:
+ for (i--; i >= 0; i--) {
+ bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = my_ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
+ 0, dwrr, weight);
+ }
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
+ struct dcb_app *app)
+{
+ if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "APP entry with priority value %u is invalid\n",
+ app->priority);
+ return -EINVAL;
+ }
+
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= 64) {
+ netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+ app->protocol);
+ return -EINVAL;
+ }
+ break;
+
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol) {
+ netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ netdev_err(dev, "APP entries with selector %u not supported\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8
+mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u8 prio_mask;
+
+ prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev);
+ if (prio_mask)
+ /* Take the highest configured priority. */
+ return fls(prio_mask) - 1;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ int i;
+
+ dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i])
+ map->map[i] = fls(map->map[i]) - 1;
+ else
+ map->map[i] = default_prio;
+ }
+}
+
+static bool
+mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ bool have_dscp = false;
+ int i;
+
+ dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i]) {
+ map->map[i] = fls64(map->map[i]) - 1;
+ have_dscp = true;
+ }
+ }
+
+ return have_dscp;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpts_pl[MLXSW_REG_QPTS_LEN];
+
+ mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool rewrite_dscp)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qrwe_pl[MLXSW_REG_QRWE_LEN];
+
+ mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port,
+ false, rewrite_dscp);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP;
+ int err;
+
+ if (mlxsw_sp_port->dcb.trust_state == ts)
+ return 0;
+
+ err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp);
+ if (err)
+ goto err_update_qrwe;
+
+ mlxsw_sp_port->dcb.trust_state = ts;
+ return 0;
+
+err_update_qrwe:
+ mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port,
+ mlxsw_sp_port->dcb.trust_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdp(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdp_pl[MLXSW_REG_QPDP_LEN];
+
+ mlxsw_reg_qpdp_pack(qpdp_pl, mlxsw_sp_port->local_port, default_prio);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdp), qpdp_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdpm_pl[MLXSW_REG_QPDPM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdsm_pl[MLXSW_REG_QPDSM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl);
+}
+
+static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct dcb_ieee_app_prio_map prio_map;
+ struct dcb_ieee_app_dscp_map dscp_map;
+ u8 default_prio;
+ bool have_dscp;
+ int err;
+
+ default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ err = mlxsw_sp_port_dcb_app_update_qpdp(mlxsw_sp_port, default_prio);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure port default priority\n");
+ return err;
+ }
+
+ have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
+ &prio_map);
+
+ mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
+ &dscp_map);
+ err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
+ &dscp_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port,
+ &prio_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n");
+ return err;
+ }
+
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP);
+ if (err) {
+ /* A failure to set trust DSCP means that the QPDPM and QPDSM
+ * maps installed above are not in effect. And since we are here
+ * attempting to set trust DSCP, we couldn't have attempted to
+ * switch trust to PCP. Thus no cleanup is necessary.
+ */
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_dcbnl_app_validate(dev, app);
+ if (err)
+ return err;
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ goto err_update;
+
+ return 0;
+
+err_update:
+ dcb_ieee_delapp(dev, app);
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to update DCB APP configuration\n");
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0,
+ maxrate->tc_maxrate[i], 0);
+ if (err) {
+ netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
+ goto err_port_ets_maxrate_set;
+ }
+ }
+
+ memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
+
+ return 0;
+
+err_port_ets_maxrate_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0,
+ my_maxrate->tc_maxrate[i], 0);
+ return err;
+}
+
+static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PPCNT_PRIO_CNT, prio);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ if (err)
+ return err;
+
+ my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
+ my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
+ if (err) {
+ netdev_err(dev, "Failed to get PFC count for priority %d\n",
+ i);
+ return err;
+ }
+ }
+
+ memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_pfc *pfc)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
+ mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int err;
+
+ if (pause_en && pfc->pfc_en) {
+ netdev_err(dev, "PAUSE frames already enabled on port\n");
+ return -EINVAL;
+ }
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pfc->pfc_en)
+ hdroom.delay_bytes = DIV_ROUND_UP(pfc->delay, BITS_PER_BYTE);
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !(pfc->pfc_en & BIT(prio));
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom for PFC\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure PFC\n");
+ goto err_port_pfc_set;
+ }
+
+ memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+
+err_port_pfc_set:
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_getbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom *hdroom = mlxsw_sp_port->hdroom;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int prio;
+ int i;
+
+ buf->total_size = 0;
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ u32 bytes = mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->bufs.buf[i].size_cells);
+
+ if (i < DCBX_MAX_BUFFERS)
+ buf->buffer_size[i] = bytes;
+ buf->total_size += bytes;
+ }
+
+ buf->total_size += mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->int_buf.size_cells);
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ buf->prio2buffer[prio] = hdroom->prios.prio[prio].buf_idx;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_setbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int i;
+
+ hdroom = *mlxsw_sp_port->hdroom;
+
+ if (hdroom.mode != MLXSW_SP_HDROOM_MODE_TC) {
+ netdev_err(dev, "The use of dcbnl_setbuffer is only allowed if egress is configured using TC\n");
+ return -EINVAL;
+ }
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ hdroom.prios.prio[prio].set_buf_idx = buf->prio2buffer[prio];
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom.bufs.buf[i].set_size_cells = mlxsw_sp_bytes_cells(mlxsw_sp,
+ buf->buffer_size[i]);
+
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+}
+
+static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
+ .ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
+ .ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
+ .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp,
+ .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp,
+
+ .getdcbx = mlxsw_sp_dcbnl_getdcbx,
+ .setdcbx = mlxsw_sp_dcbnl_setdcbx,
+
+ .dcbnl_getbuffer = mlxsw_sp_dcbnl_getbuffer,
+ .dcbnl_setbuffer = mlxsw_sp_dcbnl_setbuffer,
+};
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.ets)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.ets);
+}
+
+static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.maxrate)
+ return -ENOMEM;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.maxrate);
+}
+
+static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.pfc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.pfc);
+}
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_maxrate_init;
+ err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_pfc_init;
+
+ mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP;
+ mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
+
+ return 0;
+
+err_port_pfc_init:
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+err_port_maxrate_init:
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+ return err;
+}
+
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
new file mode 100644
index 000000000..5416093c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -0,0 +1,1308 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+enum mlxsw_sp_field_metadata_id {
+ MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX,
+};
+
+static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = {
+ {
+ .name = "erif_port",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ .bitwidth = 32,
+ .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+ },
+ {
+ .name = "l3_forward",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ .bitwidth = 1,
+ },
+ {
+ .name = "l3_drop",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ .bitwidth = 1,
+ },
+ {
+ .name = "adj_index",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX,
+ .bitwidth = 32,
+ },
+ {
+ .name = "adj_size",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE,
+ .bitwidth = 32,
+ },
+ {
+ .name = "adj_hash_index",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX,
+ .bitwidth = 32,
+ },
+};
+
+enum mlxsw_sp_dpipe_header_id {
+ MLXSW_SP_DPIPE_HEADER_METADATA,
+};
+
+static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = {
+ .name = "mlxsw_meta",
+ .id = MLXSW_SP_DPIPE_HEADER_METADATA,
+ .fields = mlxsw_sp_dpipe_fields_metadata,
+ .fields_count = ARRAY_SIZE(mlxsw_sp_dpipe_fields_metadata),
+};
+
+static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = {
+ &mlxsw_sp_dpipe_header_metadata,
+ &devlink_dpipe_header_ethernet,
+ &devlink_dpipe_header_ipv4,
+ &devlink_dpipe_header_ipv6,
+};
+
+static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = {
+ .headers = mlxsw_dpipe_headers,
+ .headers_count = ARRAY_SIZE(mlxsw_dpipe_headers),
+};
+
+static int mlxsw_sp_dpipe_table_erif_actions_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+ int err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+ err = devlink_dpipe_action_put(skb, &action);
+ if (err)
+ return err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_match match = {0};
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static void
+mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match,
+ struct devlink_dpipe_action *action)
+{
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &mlxsw_sp_dpipe_header_metadata;
+ action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int mlxsw_sp_erif_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_value,
+ struct devlink_dpipe_match *match,
+ struct devlink_dpipe_value *action_value,
+ struct devlink_dpipe_action *action)
+{
+ entry->match_values = match_value;
+ entry->match_values_count = 1;
+
+ entry->action_values = action_value;
+ entry->action_values_count = 1;
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u32);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ goto err_action_alloc;
+ return 0;
+
+err_action_alloc:
+ kfree(match_value->value);
+ return -ENOMEM;
+}
+
+static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_rif *rif,
+ bool counters_enabled)
+{
+ u32 *action_value;
+ u32 *rif_value;
+ u64 cnt;
+ int err;
+
+ /* Set Match RIF index */
+ rif_value = entry->match_values->value;
+ *rif_value = mlxsw_sp_rif_index(rif);
+ entry->match_values->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ entry->match_values->mapping_valid = true;
+
+ /* Set Action Forwarding */
+ action_value = entry->action_values->value;
+ *action_value = 1;
+
+ entry->counter_valid = false;
+ entry->counter = 0;
+ entry->index = mlxsw_sp_rif_index(rif);
+
+ if (!counters_enabled)
+ return 0;
+
+ err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &cnt);
+ if (!err) {
+ entry->counter = cnt;
+ entry->counter_valid = true;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink_dpipe_value match_value, action_value;
+ struct devlink_dpipe_action action = {0};
+ struct devlink_dpipe_match match = {0};
+ struct devlink_dpipe_entry entry = {0};
+ struct mlxsw_sp *mlxsw_sp = priv;
+ unsigned int rif_count;
+ int i, j;
+ int err;
+
+ memset(&match_value, 0, sizeof(match_value));
+ memset(&action_value, 0, sizeof(action_value));
+
+ mlxsw_sp_erif_match_action_prepare(&match, &action);
+ err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
+ &action_value, &action);
+ if (err)
+ return err;
+
+ rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ mutex_lock(&mlxsw_sp->router->lock);
+ i = 0;
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ goto err_ctx_prepare;
+ j = 0;
+ for (; i < rif_count; i++) {
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif || !mlxsw_sp_rif_dev(rif))
+ continue;
+ err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
+ counters_enabled);
+ if (err)
+ goto err_entry_get;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, &entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ break;
+ }
+ goto err_entry_append;
+ }
+ j++;
+ }
+
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (i != rif_count)
+ goto start_again;
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ devlink_dpipe_entry_clear(&entry);
+ return 0;
+err_entry_append:
+err_entry_get:
+err_ctx_prepare:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ devlink_dpipe_entry_clear(&entry);
+ return err;
+}
+
+static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int i;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
+ continue;
+ if (enable)
+ mlxsw_sp_rif_counter_alloc(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ else
+ mlxsw_sp_rif_counter_free(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ }
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return 0;
+}
+
+static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
+ .entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump,
+ .counters_set_update = mlxsw_sp_dpipe_table_erif_counters_update,
+ .size_get = mlxsw_sp_dpipe_table_erif_size_get,
+};
+
+static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ return devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, false);
+}
+
+static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devl_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+}
+
+static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type)
+{
+ struct devlink_dpipe_match match = {0};
+ int err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ switch (type) {
+ case AF_INET:
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &devlink_dpipe_header_ipv4;
+ match.field_id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP;
+ break;
+ case AF_INET6:
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &devlink_dpipe_header_ipv6;
+ match.field_id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static int
+mlxsw_sp_dpipe_table_host4_matches_dump(void *priv, struct sk_buff *skb)
+{
+ return mlxsw_sp_dpipe_table_host_matches_dump(skb, AF_INET);
+}
+
+static int
+mlxsw_sp_dpipe_table_host_actions_dump(void *priv, struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &devlink_dpipe_header_ethernet;
+ action.field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+enum mlxsw_sp_dpipe_table_host_match {
+ MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF,
+ MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP,
+ MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT,
+};
+
+static void
+mlxsw_sp_dpipe_table_host_match_action_prepare(struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_action *action,
+ int type)
+{
+ struct devlink_dpipe_match *match;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ switch (type) {
+ case AF_INET:
+ match->header = &devlink_dpipe_header_ipv4;
+ match->field_id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP;
+ break;
+ case AF_INET6:
+ match->header = &devlink_dpipe_header_ipv6;
+ match->field_id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &devlink_dpipe_header_ethernet;
+ action->field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+}
+
+static int
+mlxsw_sp_dpipe_table_host_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_values,
+ struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_value *action_value,
+ struct devlink_dpipe_action *action,
+ int type)
+{
+ struct devlink_dpipe_value *match_value;
+ struct devlink_dpipe_match *match;
+
+ entry->match_values = match_values;
+ entry->match_values_count = MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT;
+
+ entry->action_values = action_value;
+ entry->action_values_count = 1;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP];
+
+ match_value->match = match;
+ switch (type) {
+ case AF_INET:
+ match_value->value_size = sizeof(u32);
+ break;
+ case AF_INET6:
+ match_value->value_size = sizeof(struct in6_addr);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u64);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+__mlxsw_sp_dpipe_table_host_entry_fill(struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_rif *rif,
+ unsigned char *ha, void *dip)
+{
+ struct devlink_dpipe_value *value;
+ u32 *rif_value;
+ u8 *ha_value;
+
+ /* Set Match RIF index */
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF];
+
+ rif_value = value->value;
+ *rif_value = mlxsw_sp_rif_index(rif);
+ value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ value->mapping_valid = true;
+
+ /* Set Match DIP */
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP];
+ memcpy(value->value, dip, value->value_size);
+
+ /* Set Action DMAC */
+ value = entry->action_values;
+ ha_value = value->value;
+ ether_addr_copy(ha_value, ha);
+}
+
+static void
+mlxsw_sp_dpipe_table_host4_entry_fill(struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ struct mlxsw_sp_rif *rif)
+{
+ unsigned char *ha;
+ u32 dip;
+
+ ha = mlxsw_sp_neigh_entry_ha(neigh_entry);
+ dip = mlxsw_sp_neigh4_entry_dip(neigh_entry);
+ __mlxsw_sp_dpipe_table_host_entry_fill(entry, rif, ha, &dip);
+}
+
+static void
+mlxsw_sp_dpipe_table_host6_entry_fill(struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ struct mlxsw_sp_rif *rif)
+{
+ struct in6_addr *dip;
+ unsigned char *ha;
+
+ ha = mlxsw_sp_neigh_entry_ha(neigh_entry);
+ dip = mlxsw_sp_neigh6_entry_dip(neigh_entry);
+
+ __mlxsw_sp_dpipe_table_host_entry_fill(entry, rif, ha, dip);
+}
+
+static void
+mlxsw_sp_dpipe_table_host_entry_fill(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ struct mlxsw_sp_rif *rif,
+ int type)
+{
+ int err;
+
+ switch (type) {
+ case AF_INET:
+ mlxsw_sp_dpipe_table_host4_entry_fill(entry, neigh_entry, rif);
+ break;
+ case AF_INET6:
+ mlxsw_sp_dpipe_table_host6_entry_fill(entry, neigh_entry, rif);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ err = mlxsw_sp_neigh_counter_get(mlxsw_sp, neigh_entry,
+ &entry->counter);
+ if (!err)
+ entry->counter_valid = true;
+}
+
+static int
+mlxsw_sp_dpipe_table_host_entries_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx,
+ int type)
+{
+ int rif_neigh_count = 0;
+ int rif_neigh_skip = 0;
+ int neigh_count = 0;
+ int rif_count;
+ int i, j;
+ int err;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ i = 0;
+ rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ goto err_ctx_prepare;
+ j = 0;
+ rif_neigh_skip = rif_neigh_count;
+ for (; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ if (!rif)
+ continue;
+
+ rif_neigh_count = 0;
+ mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) {
+ int neigh_type = mlxsw_sp_neigh_entry_type(neigh_entry);
+
+ if (neigh_type != type)
+ continue;
+
+ if (neigh_type == AF_INET6 &&
+ mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
+ continue;
+
+ if (rif_neigh_count < rif_neigh_skip)
+ goto skip;
+
+ mlxsw_sp_dpipe_table_host_entry_fill(mlxsw_sp, entry,
+ neigh_entry, rif,
+ type);
+ entry->index = neigh_count;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ else
+ goto out;
+ }
+ goto err_entry_append;
+ }
+ neigh_count++;
+ j++;
+skip:
+ rif_neigh_count++;
+ }
+ rif_neigh_skip = 0;
+ }
+out:
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (i != rif_count)
+ goto start_again;
+
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return 0;
+
+err_ctx_prepare:
+err_entry_append:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
+static int
+mlxsw_sp_dpipe_table_host_entries_dump(struct mlxsw_sp *mlxsw_sp,
+ bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx,
+ int type)
+{
+ struct devlink_dpipe_value match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT];
+ struct devlink_dpipe_match matches[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT];
+ struct devlink_dpipe_value action_value;
+ struct devlink_dpipe_action action = {0};
+ struct devlink_dpipe_entry entry = {0};
+ int err;
+
+ memset(matches, 0, MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT *
+ sizeof(matches[0]));
+ memset(match_values, 0, MLXSW_SP_DPIPE_TABLE_HOST_MATCH_COUNT *
+ sizeof(match_values[0]));
+ memset(&action_value, 0, sizeof(action_value));
+
+ mlxsw_sp_dpipe_table_host_match_action_prepare(matches, &action, type);
+ err = mlxsw_sp_dpipe_table_host_entry_prepare(&entry, match_values,
+ matches, &action_value,
+ &action, type);
+ if (err)
+ goto out;
+
+ err = mlxsw_sp_dpipe_table_host_entries_get(mlxsw_sp, &entry,
+ counters_enabled, dump_ctx,
+ type);
+out:
+ devlink_dpipe_entry_clear(&entry);
+ return err;
+}
+
+static int
+mlxsw_sp_dpipe_table_host4_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_dpipe_table_host_entries_dump(mlxsw_sp,
+ counters_enabled,
+ dump_ctx, AF_INET);
+}
+
+static void
+mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
+ bool enable, int type)
+{
+ int i;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ if (!rif)
+ continue;
+ mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) {
+ int neigh_type = mlxsw_sp_neigh_entry_type(neigh_entry);
+
+ if (neigh_type != type)
+ continue;
+
+ if (neigh_type == AF_INET6 &&
+ mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
+ continue;
+
+ mlxsw_sp_neigh_entry_counter_update(mlxsw_sp,
+ neigh_entry,
+ enable);
+ }
+ }
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static int mlxsw_sp_dpipe_table_host4_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_dpipe_table_host_counters_update(mlxsw_sp, enable, AF_INET);
+ return 0;
+}
+
+static u64
+mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
+{
+ u64 size = 0;
+ int i;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ if (!rif)
+ continue;
+ mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) {
+ int neigh_type = mlxsw_sp_neigh_entry_type(neigh_entry);
+
+ if (neigh_type != type)
+ continue;
+
+ if (neigh_type == AF_INET6 &&
+ mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
+ continue;
+
+ size++;
+ }
+ }
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return size;
+}
+
+static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET);
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
+ .entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump,
+ .counters_set_update = mlxsw_sp_dpipe_table_host4_counters_update,
+ .size_get = mlxsw_sp_dpipe_table_host4_size_get,
+};
+
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
+static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ return err;
+}
+
+static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+}
+
+static int
+mlxsw_sp_dpipe_table_host6_matches_dump(void *priv, struct sk_buff *skb)
+{
+ return mlxsw_sp_dpipe_table_host_matches_dump(skb, AF_INET6);
+}
+
+static int
+mlxsw_sp_dpipe_table_host6_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_dpipe_table_host_entries_dump(mlxsw_sp,
+ counters_enabled,
+ dump_ctx, AF_INET6);
+}
+
+static int mlxsw_sp_dpipe_table_host6_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_dpipe_table_host_counters_update(mlxsw_sp, enable, AF_INET6);
+ return 0;
+}
+
+static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6);
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
+ .entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump,
+ .counters_set_update = mlxsw_sp_dpipe_table_host6_counters_update,
+ .size_get = mlxsw_sp_dpipe_table_host6_size_get,
+};
+
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
+static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ return err;
+}
+
+static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+}
+
+static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_match match = {0};
+ int err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX;
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static int mlxsw_sp_dpipe_table_adj_actions_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+ int err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &devlink_dpipe_header_ethernet;
+ action.field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ err = devlink_dpipe_action_put(skb, &action);
+ if (err)
+ return err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ u64 size = 0;
+
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
+ if (mlxsw_sp_nexthop_is_forward(nh) &&
+ !mlxsw_sp_nexthop_group_has_ipip(nh))
+ size++;
+ return size;
+}
+
+enum mlxsw_sp_dpipe_table_adj_match {
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT,
+};
+
+enum mlxsw_sp_dpipe_table_adj_action {
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC,
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT,
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT,
+};
+
+static void
+mlxsw_sp_dpipe_table_adj_match_action_prepare(struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_action *actions)
+{
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &devlink_dpipe_header_ethernet;
+ action->field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &mlxsw_sp_dpipe_header_metadata;
+ action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_values,
+ struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_value *action_values,
+ struct devlink_dpipe_action *actions)
+{ struct devlink_dpipe_value *action_value;
+ struct devlink_dpipe_value *match_value;
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+
+ entry->match_values = match_values;
+ entry->match_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT;
+
+ entry->action_values = action_values;
+ entry->action_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u64);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u32);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+__mlxsw_sp_dpipe_table_adj_entry_fill(struct devlink_dpipe_entry *entry,
+ u32 adj_index, u32 adj_size,
+ u32 adj_hash_index, unsigned char *ha,
+ struct mlxsw_sp_rif *rif)
+{
+ struct devlink_dpipe_value *value;
+ u32 *p_rif_value;
+ u32 *p_index;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ p_index = value->value;
+ *p_index = adj_index;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ p_index = value->value;
+ *p_index = adj_size;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ p_index = value->value;
+ *p_index = adj_hash_index;
+
+ value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ ether_addr_copy(value->value, ha);
+
+ value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ p_rif_value = value->value;
+ *p_rif_value = mlxsw_sp_rif_index(rif);
+ value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ value->mapping_valid = true;
+}
+
+static void mlxsw_sp_dpipe_table_adj_entry_fill(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct devlink_dpipe_entry *entry)
+{
+ struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
+ unsigned char *ha = mlxsw_sp_nexthop_ha(nh);
+ u32 adj_hash_index = 0;
+ u32 adj_index = 0;
+ u32 adj_size = 0;
+ int err;
+
+ mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, &adj_hash_index);
+ __mlxsw_sp_dpipe_table_adj_entry_fill(entry, adj_index, adj_size,
+ adj_hash_index, ha, rif);
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &entry->counter);
+ if (!err)
+ entry->counter_valid = true;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct mlxsw_sp_nexthop *nh;
+ int entry_index = 0;
+ int nh_count_max;
+ int nh_count = 0;
+ int nh_skip;
+ int j;
+ int err;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ goto err_ctx_prepare;
+ j = 0;
+ nh_skip = nh_count;
+ nh_count = 0;
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!mlxsw_sp_nexthop_is_forward(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
+ continue;
+
+ if (nh_count < nh_skip)
+ goto skip;
+
+ mlxsw_sp_dpipe_table_adj_entry_fill(mlxsw_sp, nh, entry);
+ entry->index = entry_index;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ break;
+ }
+ goto err_entry_append;
+ }
+ entry_index++;
+ j++;
+skip:
+ nh_count++;
+ }
+
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (nh_count != nh_count_max)
+ goto start_again;
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return 0;
+
+err_ctx_prepare:
+err_entry_append:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink_dpipe_value action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT];
+ struct devlink_dpipe_value match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT];
+ struct devlink_dpipe_action actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT];
+ struct devlink_dpipe_match matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT];
+ struct devlink_dpipe_entry entry = {0};
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ memset(matches, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT *
+ sizeof(matches[0]));
+ memset(match_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT *
+ sizeof(match_values[0]));
+ memset(actions, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT *
+ sizeof(actions[0]));
+ memset(action_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT *
+ sizeof(action_values[0]));
+
+ mlxsw_sp_dpipe_table_adj_match_action_prepare(matches, actions);
+ err = mlxsw_sp_dpipe_table_adj_entry_prepare(&entry,
+ match_values, matches,
+ action_values, actions);
+ if (err)
+ goto out;
+
+ err = mlxsw_sp_dpipe_table_adj_entries_get(mlxsw_sp, &entry,
+ counters_enabled, dump_ctx);
+out:
+ devlink_dpipe_entry_clear(&entry);
+ return err;
+}
+
+static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
+{
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_nexthop *nh;
+ u32 adj_hash_index = 0;
+ u32 adj_index = 0;
+ u32 adj_size = 0;
+
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!mlxsw_sp_nexthop_is_forward(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
+ continue;
+
+ mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
+ &adj_hash_index);
+ if (enable)
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ else
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_eth_update(mlxsw_sp,
+ adj_index + adj_hash_index, nh,
+ true, ratr_pl);
+ }
+ return 0;
+}
+
+static u64
+mlxsw_sp_dpipe_table_adj_size_get(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u64 size;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return size;
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
+ .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
+ .counters_set_update = mlxsw_sp_dpipe_table_adj_counters_update,
+ .size_get = mlxsw_sp_dpipe_table_adj_size_get,
+};
+
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
+static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ return err;
+}
+
+static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+}
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ devl_dpipe_headers_register(devlink, &mlxsw_sp_dpipe_headers);
+
+ err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
+ if (err)
+ goto err_erif_table_init;
+
+ err = mlxsw_sp_dpipe_host4_table_init(mlxsw_sp);
+ if (err)
+ goto err_host4_table_init;
+
+ err = mlxsw_sp_dpipe_host6_table_init(mlxsw_sp);
+ if (err)
+ goto err_host6_table_init;
+
+ err = mlxsw_sp_dpipe_adj_table_init(mlxsw_sp);
+ if (err)
+ goto err_adj_table_init;
+
+ return 0;
+err_adj_table_init:
+ mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
+err_host6_table_init:
+ mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
+err_host4_table_init:
+ mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
+err_erif_table_init:
+ devl_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ return err;
+}
+
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ mlxsw_sp_dpipe_adj_table_fini(mlxsw_sp);
+ mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
+ mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
+ mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
+ devl_dpipe_headers_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
new file mode 100644
index 000000000..246dbb3c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_PIPELINE_H_
+#define _MLXSW_PIPELINE_H_
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp);
+
+#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
+#define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4"
+#define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6"
+#define MLXSW_SP_DPIPE_TABLE_NAME_ADJ "mlxsw_adj"
+
+#endif /* _MLXSW_PIPELINE_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
new file mode 100644
index 000000000..dcd79d7e2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -0,0 +1,2023 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "core_env.h"
+
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ strscpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, mlxsw_sp_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sp->bus_info->fw_rev.major,
+ mlxsw_sp->bus_info->fw_rev.minor,
+ mlxsw_sp->bus_info->fw_rev.subminor);
+ strscpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping {
+ u32 status_opcode;
+ enum ethtool_link_ext_state link_ext_state;
+ u8 link_ext_substate;
+};
+
+static const struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping
+mlxsw_sp_link_ext_state_opcode_map[] = {
+ {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED},
+ {3, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+ {4, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED},
+ {36, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE},
+ {38, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE},
+ {39, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+
+ {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED},
+ {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+ {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+ {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0},
+ {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+ {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+ {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK},
+ {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS},
+ {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+ {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+ {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0},
+ {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+ {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE},
+
+ {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+
+ {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+ {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0},
+
+ {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+
+ {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0},
+
+ {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
+
+ {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+
+ {1042, ETHTOOL_LINK_EXT_STATE_MODULE,
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY},
+};
+
+static void
+mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping
+ link_ext_state_mapping,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ switch (link_ext_state_mapping.link_ext_state) {
+ case ETHTOOL_LINK_EXT_STATE_AUTONEG:
+ link_ext_state_info->autoneg =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE:
+ link_ext_state_info->link_training =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH:
+ link_ext_state_info->link_logical_mismatch =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY:
+ link_ext_state_info->bad_signal_integrity =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE:
+ link_ext_state_info->cable_issue =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ case ETHTOOL_LINK_EXT_STATE_MODULE:
+ link_ext_state_info->module =
+ link_ext_state_mapping.link_ext_substate;
+ break;
+ default:
+ break;
+ }
+
+ link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state;
+}
+
+static int
+mlxsw_sp_port_get_link_ext_state(struct net_device *dev,
+ struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+ struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ char pddr_pl[MLXSW_REG_PDDR_LEN];
+ int opcode, err, i;
+ u32 status_opcode;
+
+ if (netif_carrier_ok(dev))
+ return -ENODATA;
+
+ mlxsw_reg_pddr_pack(pddr_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO);
+
+ opcode = MLXSW_REG_PDDR_TRBLSH_GROUP_OPCODE_MONITOR;
+ mlxsw_reg_pddr_trblsh_group_opcode_set(pddr_pl, opcode);
+
+ err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pddr),
+ pddr_pl);
+ if (err)
+ return err;
+
+ status_opcode = mlxsw_reg_pddr_trblsh_status_opcode_get(pddr_pl);
+ if (!status_opcode)
+ return -ENODATA;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_link_ext_state_opcode_map); i++) {
+ link_ext_state_mapping = mlxsw_sp_link_ext_state_opcode_map[i];
+ if (link_ext_state_mapping.status_opcode == status_opcode) {
+ mlxsw_sp_port_set_link_ext_state(link_ext_state_mapping,
+ link_ext_state_info);
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ pause->rx_pause = mlxsw_sp_port->link.rx_pause;
+ pause->tx_pause = mlxsw_sp_port->link.tx_pause;
+}
+
+static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ethtool_pauseparam *pause)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+/* Maximum delay buffer needed in case of PAUSE frames. Similar to PFC delay, but is
+ * measured in bytes. Assumes 100m cable and does not take into account MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY_BYTES 19476
+
+static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = pause->tx_pause || pause->rx_pause;
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int err;
+
+ if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
+ netdev_err(dev, "PFC already enabled on port\n");
+ return -EINVAL;
+ }
+
+ if (pause->autoneg) {
+ netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
+ return -EINVAL;
+ }
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pause_en)
+ hdroom.delay_bytes = MLXSW_SP_PAUSE_DELAY_BYTES;
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !pause_en;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
+ if (err) {
+ netdev_err(dev, "Failed to set PAUSE parameters\n");
+ goto err_port_pause_configure;
+ }
+
+ mlxsw_sp_port->link.rx_pause = pause->rx_pause;
+ mlxsw_sp_port->link.tx_pause = pause->tx_pause;
+
+ return 0;
+
+err_port_pause_configure:
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+ return err;
+}
+
+struct mlxsw_sp_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(const char *payload);
+ bool cells_bytes;
+};
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+ {
+ .str = "if_in_discards",
+ .getter = mlxsw_reg_ppcnt_if_in_discards_get,
+ },
+ {
+ .str = "if_out_discards",
+ .getter = mlxsw_reg_ppcnt_if_out_discards_get,
+ },
+ {
+ .str = "if_out_errors",
+ .getter = mlxsw_reg_ppcnt_if_out_errors_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+ {
+ .str = "ether_stats_undersize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+ },
+ {
+ .str = "ether_stats_oversize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+ },
+ {
+ .str = "ether_stats_fragments",
+ .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+ },
+ {
+ .str = "ether_pkts64octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+ },
+ {
+ .str = "ether_pkts65to127octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+ },
+ {
+ .str = "ether_pkts128to255octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+ },
+ {
+ .str = "ether_pkts256to511octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+ },
+ {
+ .str = "ether_pkts512to1023octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+ },
+ {
+ .str = "ether_pkts1024to1518octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+ },
+ {
+ .str = "ether_pkts1519to2047octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+ },
+ {
+ .str = "ether_pkts2048to4095octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+ },
+ {
+ .str = "ether_pkts4096to8191octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+ },
+ {
+ .str = "ether_pkts8192to10239octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+ {
+ .str = "dot3stats_fcs_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+ },
+ {
+ .str = "dot3stats_symbol_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+ },
+ {
+ .str = "dot3control_in_unknown_opcodes",
+ .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+ },
+ {
+ .str = "dot3in_pause_frames",
+ .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = {
+ {
+ .str = "ecn_marked",
+ .getter = mlxsw_reg_ppcnt_ecn_marked_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+ {
+ .str = "discard_ingress_general",
+ .getter = mlxsw_reg_ppcnt_ingress_general_get,
+ },
+ {
+ .str = "discard_ingress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+ },
+ {
+ .str = "discard_ingress_tag_frame_type",
+ .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+ },
+ {
+ .str = "discard_egress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+ },
+ {
+ .str = "discard_loopback_filter",
+ .getter = mlxsw_reg_ppcnt_loopback_filter_get,
+ },
+ {
+ .str = "discard_egress_general",
+ .getter = mlxsw_reg_ppcnt_egress_general_get,
+ },
+ {
+ .str = "discard_egress_hoq",
+ .getter = mlxsw_reg_ppcnt_egress_hoq_get,
+ },
+ {
+ .str = "discard_egress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_tx_link_down",
+ .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+ },
+ {
+ .str = "discard_egress_stp_filter",
+ .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+ },
+ {
+ .str = "discard_egress_sll",
+ .getter = mlxsw_reg_ppcnt_egress_sll_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
+ {
+ .str = "rx_octets_prio",
+ .getter = mlxsw_reg_ppcnt_rx_octets_get,
+ },
+ {
+ .str = "rx_frames_prio",
+ .getter = mlxsw_reg_ppcnt_rx_frames_get,
+ },
+ {
+ .str = "tx_octets_prio",
+ .getter = mlxsw_reg_ppcnt_tx_octets_get,
+ },
+ {
+ .str = "tx_frames_prio",
+ .getter = mlxsw_reg_ppcnt_tx_frames_get,
+ },
+ {
+ .str = "rx_pause_prio",
+ .getter = mlxsw_reg_ppcnt_rx_pause_get,
+ },
+ {
+ .str = "rx_pause_duration_prio",
+ .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
+ },
+ {
+ .str = "tx_pause_prio",
+ .getter = mlxsw_reg_ppcnt_tx_pause_get,
+ },
+ {
+ .str = "tx_pause_duration_prio",
+ .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
+ {
+ .str = "tc_transmit_queue_tc",
+ .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
+ .cells_bytes = true,
+ },
+ {
+ .str = "tc_no_buffer_discard_uc_tc",
+ .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
+
+struct mlxsw_sp_port_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(struct mlxsw_sp_port *mlxsw_sp_port);
+};
+
+static u64
+mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u64 stats;
+ int err;
+
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index,
+ module, &stats);
+ if (err)
+ return mlxsw_sp_port->module_overheat_initial_val;
+
+ return stats - mlxsw_sp_port->module_overheat_initial_val;
+}
+
+static struct mlxsw_sp_port_stats mlxsw_sp_port_transceiver_stats[] = {
+ {
+ .str = "transceiver_overheat",
+ .getter = mlxsw_sp_port_get_transceiver_overheat_stats,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_transceiver_stats)
+
+#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_EXT_STATS_LEN + \
+ MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
+ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
+ IEEE_8021QAZ_MAX_TCS) + \
+ (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
+ TC_MAX_QUEUE) + \
+ MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN)
+
+static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
+ mlxsw_sp_port_hw_prio_stats[i].str, prio);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
+ mlxsw_sp_port_hw_tc_stats[i].str, tc);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_get_prio_strings(&p, i);
+
+ for (i = 0; i < TC_MAX_QUEUE; i++)
+ mlxsw_sp_port_get_tc_strings(&p, i);
+
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_transceiver_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mlcr_pl[MLXSW_REG_MLCR_LEN];
+ bool active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
+}
+
+static int
+mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
+ int *p_len, enum mlxsw_reg_ppcnt_grp grp)
+{
+ switch (grp) {
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_stats;
+ *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_RFC_2863_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_RFC_2819_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_RFC_3635_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_EXT_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_ext_stats;
+ *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_DISCARD_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+ *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_PRIO_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
+ *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_TC_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
+ *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
+ break;
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void __mlxsw_sp_port_get_stats(struct net_device *dev,
+ enum mlxsw_reg_ppcnt_grp grp, int prio,
+ u64 *data, int data_index)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_hw_stats *hw_stats;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i, len;
+ int err;
+
+ err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
+ if (err)
+ return;
+ mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ for (i = 0; i < len; i++) {
+ data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+ if (!hw_stats[i].cells_bytes)
+ continue;
+ data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
+ data[data_index + i]);
+ }
+}
+
+static void __mlxsw_sp_port_get_env_stats(struct net_device *dev, u64 *data, int data_index,
+ struct mlxsw_sp_port_stats *port_stats,
+ int len)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < len; i++)
+ data[data_index + i] = port_stats[i].getter(mlxsw_sp_port);
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int i, data_index = 0;
+
+ /* IEEE 802.3 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
+ data, data_index);
+ data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+
+ /* RFC 2863 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
+ /* RFC 2819 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
+ /* RFC 3635 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+ /* Extended Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+
+ /* Discard Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
+ /* Per-Priority Counters */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+ }
+
+ /* Per-TC Counters */
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
+ }
+
+ /* PTP counters */
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
+ data, data_index);
+ data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
+
+ /* Transceiver counters */
+ __mlxsw_sp_port_get_env_stats(dev, data, data_index, mlxsw_sp_port_transceiver_stats,
+ MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN);
+ data_index += MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN;
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SP_PORT_ETHTOOL_STATS_LEN +
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+ ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
+ ops->from_ptys_link(mlxsw_sp, eth_proto_cap,
+ cmd->link_modes.supported);
+}
+
+static void
+mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
+ u32 eth_proto_admin, bool autoneg,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ if (!autoneg)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
+ cmd->link_modes.advertising);
+}
+
+static u8
+mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
+{
+ switch (connector_type) {
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR:
+ return PORT_OTHER;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE:
+ return PORT_NONE;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP:
+ return PORT_TP;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI:
+ return PORT_AUI;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC:
+ return PORT_BNC;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII:
+ return PORT_MII;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE:
+ return PORT_FIBRE;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA:
+ return PORT_DA;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER:
+ return PORT_OTHER;
+ default:
+ WARN_ON_ONCE(1);
+ return PORT_OTHER;
+ }
+}
+
+static int mlxsw_sp_port_ptys_query(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper, u8 *p_connector_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, p_eth_proto_cap, p_eth_proto_admin,
+ p_eth_proto_oper);
+ if (p_connector_type)
+ *p_connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
+ return 0;
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ u8 connector_type;
+ bool autoneg;
+ int err;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper, &connector_type);
+ if (err)
+ return err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+ autoneg = mlxsw_sp_port->link.autoneg;
+
+ mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
+
+ mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, cmd);
+
+ cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
+ ops->from_ptys_link_mode(mlxsw_sp, netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap, eth_proto_new;
+ bool autoneg;
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ eth_proto_new = autoneg ?
+ ops->to_ptys_advert_link(mlxsw_sp, cmd) :
+ ops->to_ptys_speed_lanes(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "No supported speed or lanes requested\n");
+ return -EINVAL;
+ }
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_new, autoneg);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->link.autoneg = autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+
+ return 0;
+}
+
+static int mlxsw_sp_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
+ modinfo);
+}
+
+static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index,
+ module, ee, data);
+}
+
+static int
+mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
+}
+
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
+}
+
+static void
+mlxsw_sp_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ phy_stats->SymbolErrorDuringCarrier =
+ mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get(ppcnt_pl);
+}
+
+static void
+mlxsw_sp_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ mac_stats->FramesTransmittedOK =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ mac_stats->FramesReceivedOK =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ mac_stats->FrameCheckSequenceErrors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ mac_stats->AlignmentErrors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+ mac_stats->OctetsTransmittedOK =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ mac_stats->OctetsReceivedOK =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ mac_stats->MulticastFramesXmittedOK =
+ mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get(ppcnt_pl);
+ mac_stats->BroadcastFramesXmittedOK =
+ mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get(ppcnt_pl);
+ mac_stats->MulticastFramesReceivedOK =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+ mac_stats->BroadcastFramesReceivedOK =
+ mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get(ppcnt_pl);
+ mac_stats->InRangeLengthErrors =
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl);
+ mac_stats->OutOfRangeLengthField =
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl);
+ mac_stats->FrameTooLongErrors =
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl);
+}
+
+static void
+mlxsw_sp_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ ctrl_stats->MACControlFramesTransmitted =
+ mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get(ppcnt_pl);
+ ctrl_stats->MACControlFramesReceived =
+ mlxsw_reg_ppcnt_a_mac_control_frames_received_get(ppcnt_pl);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get(ppcnt_pl);
+}
+
+static const struct ethtool_rmon_hist_range mlxsw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 10239 },
+ {}
+};
+
+static void
+mlxsw_sp_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+
+ if (mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_RFC_2819_CNT,
+ 0, ppcnt_pl))
+ return;
+
+ rmon->undersize_pkts =
+ mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get(ppcnt_pl);
+ rmon->oversize_pkts =
+ mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get(ppcnt_pl);
+ rmon->fragments =
+ mlxsw_reg_ppcnt_ether_stats_fragments_get(ppcnt_pl);
+
+ rmon->hist[0] = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get(ppcnt_pl);
+ rmon->hist[1] =
+ mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get(ppcnt_pl);
+ rmon->hist[2] =
+ mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get(ppcnt_pl);
+ rmon->hist[3] =
+ mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get(ppcnt_pl);
+ rmon->hist[4] =
+ mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get(ppcnt_pl);
+ rmon->hist[5] =
+ mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get(ppcnt_pl);
+ rmon->hist[6] =
+ mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get(ppcnt_pl);
+ rmon->hist[7] =
+ mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get(ppcnt_pl);
+ rmon->hist[8] =
+ mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get(ppcnt_pl);
+ rmon->hist[9] =
+ mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get(ppcnt_pl);
+
+ *ranges = mlxsw_rmon_ranges;
+}
+
+static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index,
+ module, flags);
+}
+
+static int
+mlxsw_sp_get_module_power_mode(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params, extack);
+}
+
+static int
+mlxsw_sp_set_module_power_mode(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params->policy, extack);
+}
+
+const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+ .cap_link_lanes_supported = true,
+ .get_drvinfo = mlxsw_sp_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_state = mlxsw_sp_port_get_link_ext_state,
+ .get_pauseparam = mlxsw_sp_port_get_pauseparam,
+ .set_pauseparam = mlxsw_sp_port_set_pauseparam,
+ .get_strings = mlxsw_sp_port_get_strings,
+ .set_phys_id = mlxsw_sp_port_set_phys_id,
+ .get_ethtool_stats = mlxsw_sp_port_get_stats,
+ .get_sset_count = mlxsw_sp_port_get_sset_count,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
+ .get_module_info = mlxsw_sp_get_module_info,
+ .get_module_eeprom = mlxsw_sp_get_module_eeprom,
+ .get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page,
+ .get_ts_info = mlxsw_sp_get_ts_info,
+ .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
+ .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
+ .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
+ .get_rmon_stats = mlxsw_sp_get_rmon_stats,
+ .reset = mlxsw_sp_reset,
+ .get_module_power_mode = mlxsw_sp_get_module_power_mode,
+ .set_module_power_mode = mlxsw_sp_set_module_power_mode,
+};
+
+struct mlxsw_sp1_port_link_mode {
+ enum ethtool_link_mode_bit_indices mask_ethtool;
+ u32 mask;
+ u32 speed;
+};
+
+static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = SPEED_1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = SPEED_10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+};
+
+#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
+
+static void
+mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+}
+
+static void
+mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+ unsigned long *mode)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+ __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
+ mode);
+ }
+}
+
+static u32
+mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+ return mlxsw_sp1_port_link_mode[i].speed;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+static void
+mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp1_port_link_mode link;
+ int i;
+
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->lanes = 0;
+
+ if (!carrier_ok)
+ return;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+ link = mlxsw_sp1_port_link_mode[i];
+ ethtool_params_from_link_mode(cmd,
+ link.mask_ethtool);
+ }
+ }
+}
+
+static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp1_port_link_mode[i].mask) &&
+ mlxsw_sp1_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp1_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
+static u32
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
+ cmd->link_modes.advertising))
+ ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ if (cmd->lanes > width)
+ return ptys_proto;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (cmd->base.speed == mlxsw_sp1_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static void
+mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u16 local_port, u32 proto_admin, bool autoneg)
+{
+ mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
+}
+
+static void
+mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin,
+ p_eth_proto_oper);
+}
+
+static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp1_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
+const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
+ .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
+ .from_ptys_link = mlxsw_sp1_from_ptys_link,
+ .from_ptys_speed = mlxsw_sp1_from_ptys_speed,
+ .from_ptys_link_mode = mlxsw_sp1_from_ptys_link_mode,
+ .ptys_max_speed = mlxsw_sp1_ptys_max_speed,
+ .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
+ .to_ptys_speed_lanes = mlxsw_sp1_to_ptys_speed_lanes,
+ .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
+ .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
+};
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_sgmii_100m[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_5gbase_r[] = {
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = {
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_8[] = {
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
+
+#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
+#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
+#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
+#define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3)
+
+static u8 mlxsw_sp_port_mask_width_get(u8 width)
+{
+ switch (width) {
+ case 1:
+ return MLXSW_SP_PORT_MASK_WIDTH_1X;
+ case 2:
+ return MLXSW_SP_PORT_MASK_WIDTH_2X;
+ case 4:
+ return MLXSW_SP_PORT_MASK_WIDTH_4X;
+ case 8:
+ return MLXSW_SP_PORT_MASK_WIDTH_8X;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+struct mlxsw_sp2_port_link_mode {
+ const enum ethtool_link_mode_bit_indices *mask_ethtool;
+ int m_ethtool_len;
+ u32 mask;
+ u32 speed;
+ u32 width;
+ u8 mask_sup_width;
+};
+
+static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_100,
+ .width = 1,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_1000,
+ .width = 1,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_5000,
+ .width = 1,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_10000,
+ .width = 1,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_40000,
+ .width = 4,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_25000,
+ .width = 1,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_50000,
+ .width = 2,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
+ .speed = SPEED_50000,
+ .width = 1,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_100000,
+ .width = 4,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
+ .speed = SPEED_100000,
+ .width = 2,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_200000,
+ .width = 4,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_400000,
+ .width = 8,
+ },
+};
+
+#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
+
+static void
+mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
+{
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+}
+
+static void
+mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
+ unsigned long *mode)
+{
+ int i;
+
+ for (i = 0; i < link_mode->m_ethtool_len; i++)
+ __set_bit(link_mode->mask_ethtool[i], mode);
+}
+
+static void
+mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+ unsigned long *mode)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+ mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ mode);
+ }
+}
+
+static u32
+mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+ return mlxsw_sp2_port_link_mode[i].speed;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+static void
+mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp2_port_link_mode link;
+ int i;
+
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->lanes = 0;
+
+ if (!carrier_ok)
+ return;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
+ link = mlxsw_sp2_port_link_mode[i];
+ ethtool_params_from_link_mode(cmd,
+ link.mask_ethtool[1]);
+ }
+ }
+}
+
+static int mlxsw_sp2_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp2_port_link_mode[i].mask) &&
+ mlxsw_sp2_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp2_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
+static bool
+mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
+ const unsigned long *mode)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < link_mode->m_ethtool_len; i++) {
+ if (test_bit(link_mode->mask_ethtool[i], mode))
+ cnt++;
+ }
+
+ return cnt == link_mode->m_ethtool_len;
+}
+
+static u32
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ cmd->link_modes.advertising))
+ ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
+ struct mlxsw_sp2_port_link_mode link_mode;
+ u32 ptys_proto = 0;
+ int i;
+
+ if (cmd->lanes > width)
+ return ptys_proto;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (cmd->base.speed == mlxsw_sp2_port_link_mode[i].speed) {
+ link_mode = mlxsw_sp2_port_link_mode[i];
+
+ if (!cmd->lanes) {
+ /* If number of lanes was not set by user space,
+ * choose the link mode that supports the width
+ * of the port.
+ */
+ if (mask_width & link_mode.mask_sup_width)
+ ptys_proto |= link_mode.mask;
+ } else if (cmd->lanes == link_mode.width) {
+ /* Else if the number of lanes was set, choose
+ * the link mode that its actual width equals to
+ * it.
+ */
+ ptys_proto |= link_mode.mask;
+ }
+ }
+ }
+ return ptys_proto;
+}
+
+static void
+mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u16 local_port, u32 proto_admin,
+ bool autoneg)
+{
+ mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
+}
+
+static void
+mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap,
+ p_eth_proto_admin, p_eth_proto_oper);
+}
+
+static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp2_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
+const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
+ .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
+ .from_ptys_link = mlxsw_sp2_from_ptys_link,
+ .from_ptys_speed = mlxsw_sp2_from_ptys_speed,
+ .from_ptys_link_mode = mlxsw_sp2_from_ptys_link_mode,
+ .ptys_max_speed = mlxsw_sp2_ptys_max_speed,
+ .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
+ .to_ptys_speed_lanes = mlxsw_sp2_to_ptys_speed_lanes,
+ .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
+ .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
new file mode 100644
index 000000000..b6ee2d658
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -0,0 +1,1876 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/rtnetlink.h>
+#include <linux/refcount.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_fid_family;
+
+struct mlxsw_sp_fid_core {
+ struct rhashtable fid_ht;
+ struct rhashtable vni_ht;
+ struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
+ unsigned int *port_fid_mappings;
+};
+
+struct mlxsw_sp_fid_port_vid {
+ struct list_head list;
+ u16 local_port;
+ u16 vid;
+};
+
+struct mlxsw_sp_fid {
+ struct list_head list;
+ struct mlxsw_sp_rif *rif;
+ refcount_t ref_count;
+ u16 fid_index;
+ u16 fid_offset;
+ struct mlxsw_sp_fid_family *fid_family;
+ struct rhash_head ht_node;
+
+ struct rhash_head vni_ht_node;
+ enum mlxsw_sp_nve_type nve_type;
+ __be32 vni;
+ u32 nve_flood_index;
+ int nve_ifindex;
+ u8 vni_valid:1,
+ nve_flood_index_valid:1;
+ struct list_head port_vid_list; /* Ordered by local port. */
+};
+
+struct mlxsw_sp_fid_8021q {
+ struct mlxsw_sp_fid common;
+ u16 vid;
+};
+
+struct mlxsw_sp_fid_8021d {
+ struct mlxsw_sp_fid common;
+ int br_ifindex;
+};
+
+static const struct rhashtable_params mlxsw_sp_fid_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, fid_index),
+ .key_offset = offsetof(struct mlxsw_sp_fid, fid_index),
+ .head_offset = offsetof(struct mlxsw_sp_fid, ht_node),
+};
+
+static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, vni),
+ .key_offset = offsetof(struct mlxsw_sp_fid, vni),
+ .head_offset = offsetof(struct mlxsw_sp_fid, vni_ht_node),
+};
+
+struct mlxsw_sp_flood_table {
+ enum mlxsw_sp_flood_type packet_type;
+ enum mlxsw_flood_table_type table_type;
+ int table_index;
+};
+
+struct mlxsw_sp_fid_ops {
+ void (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
+ int (*configure)(struct mlxsw_sp_fid *fid);
+ void (*deconfigure)(struct mlxsw_sp_fid *fid);
+ int (*index_alloc)(struct mlxsw_sp_fid *fid, const void *arg,
+ u16 *p_fid_index);
+ bool (*compare)(const struct mlxsw_sp_fid *fid,
+ const void *arg);
+ int (*port_vid_map)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+ void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+ int (*vni_set)(struct mlxsw_sp_fid *fid);
+ void (*vni_clear)(struct mlxsw_sp_fid *fid);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid);
+ void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
+ void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev);
+ int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif);
+};
+
+struct mlxsw_sp_fid_family {
+ enum mlxsw_sp_fid_type type;
+ size_t fid_size;
+ u16 start_index;
+ u16 end_index;
+ struct list_head fids_list;
+ unsigned long *fids_bitmap;
+ const struct mlxsw_sp_flood_table *flood_tables;
+ int nr_flood_tables;
+ enum mlxsw_sp_rif_type rif_type;
+ const struct mlxsw_sp_fid_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+ bool flood_rsp;
+ enum mlxsw_reg_bridge_type bridge_type;
+ u16 pgt_base;
+ bool smpe_index_valid;
+};
+
+static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+};
+
+static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
+static const int *mlxsw_sp_packet_type_sfgc_types[] = {
+ [MLXSW_SP_FLOOD_TYPE_UC] = mlxsw_sp_sfgc_uc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_BC] = mlxsw_sp_sfgc_bc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+};
+
+bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
+{
+ enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
+
+ return fid_family->start_index == fid_index;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
+ mlxsw_sp_fid_ht_params);
+ if (fid)
+ refcount_inc(&fid->ref_count);
+
+ return fid;
+}
+
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *nve_ifindex = fid->nve_ifindex;
+
+ return 0;
+}
+
+int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_nve_type *p_type)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *p_type = fid->nve_type;
+
+ return 0;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
+ __be32 vni)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni,
+ mlxsw_sp_fid_vni_ht_params);
+ if (fid)
+ refcount_inc(&fid->ref_count);
+
+ return fid;
+}
+
+int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *vni = fid->vni;
+
+ return 0;
+}
+
+int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ int err;
+
+ if (WARN_ON(fid->nve_flood_index_valid))
+ return -EINVAL;
+
+ fid->nve_flood_index = nve_flood_index;
+ fid->nve_flood_index_valid = true;
+ err = ops->nve_flood_index_set(fid);
+ if (err)
+ goto err_nve_flood_index_set;
+
+ return 0;
+
+err_nve_flood_index_set:
+ fid->nve_flood_index_valid = false;
+ return err;
+}
+
+void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+
+ if (WARN_ON(!fid->nve_flood_index_valid))
+ return;
+
+ fid->nve_flood_index_valid = false;
+ ops->nve_flood_index_clear(fid);
+}
+
+bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
+{
+ return fid->nve_flood_index_valid;
+}
+
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
+ __be32 vni, int nve_ifindex)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ int err;
+
+ if (WARN_ON(fid->vni_valid))
+ return -EINVAL;
+
+ fid->nve_type = type;
+ fid->nve_ifindex = nve_ifindex;
+ fid->vni = vni;
+ err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
+ &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+ if (err)
+ return err;
+
+ fid->vni_valid = true;
+ err = ops->vni_set(fid);
+ if (err)
+ goto err_vni_set;
+
+ return 0;
+
+err_vni_set:
+ fid->vni_valid = false;
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+ return err;
+}
+
+void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+
+ if (WARN_ON(!fid->vni_valid))
+ return;
+
+ fid->vni_valid = false;
+ ops->vni_clear(fid);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+}
+
+bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid)
+{
+ return fid->vni_valid;
+}
+
+void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+
+ if (ops->fdb_clear_offload)
+ ops->fdb_clear_offload(fid, nve_dev);
+}
+
+static const struct mlxsw_sp_flood_table *
+mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ if (fid_family->flood_tables[i].packet_type != packet_type)
+ continue;
+ return &fid_family->flood_tables[i];
+ }
+
+ return NULL;
+}
+
+static u16
+mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
+{
+ return fid_family->end_index - fid_family->start_index + 1;
+}
+
+static u16
+mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 fid_offset)
+{
+ u16 num_fids;
+
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ return fid_family->pgt_base + num_fids * flood_table->table_index +
+ fid_offset;
+}
+
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
+ bool member)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_flood_table *flood_table;
+ u16 mid_index;
+
+ if (WARN_ON(!fid_family->flood_tables))
+ return -EINVAL;
+
+ flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
+ if (!flood_table)
+ return -ESRCH;
+
+ mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
+ fid->fid_offset);
+ return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
+ fid->fid_index, local_port, member);
+}
+
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ if (WARN_ON(!fid->fid_family->ops->port_vid_map))
+ return -EINVAL;
+ return fid->fid_family->ops->port_vid_map(fid, mlxsw_sp_port, vid);
+}
+
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid);
+}
+
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->type;
+}
+
+struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
+{
+ return fid->rif;
+}
+
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+
+ return fid_core->fid_family_arr[type]->rif_type;
+}
+
+static struct mlxsw_sp_fid_8021q *
+mlxsw_sp_fid_8021q_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021q, common);
+}
+
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_fid(fid)->vid;
+}
+
+static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
+}
+
+static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
+{
+ return valid ? MLXSW_REG_SFMR_OP_CREATE_FID :
+ MLXSW_REG_SFMR_OP_DESTROY_FID;
+}
+
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
+ fid->fid_offset, fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ fid->fid_index, fid->fid_offset,
+ fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
+
+ if (rif) {
+ mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
+ mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif,
+ bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
+ be32_to_cpu(fid->vni), irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_edit_op(fid, rif);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ if (!fid->vni_valid)
+ return 0;
+
+ return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
+}
+
+static int
+mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
+ irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int
+mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ /* Update the global VID => FID mapping we created when the FID was
+ * configured.
+ */
+ return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
+}
+
+static int
+mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_fid_port_vid *pv,
+ bool irif_valid, u16 irif_index)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
+ fid->fid_index, pv->vid, irif_valid,
+ irif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+ u16 irif_index;
+ int err;
+
+ err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ irif_index = mlxsw_sp_rif_index(rif);
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated with the
+ * ingress RIF.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
+ true,
+ irif_index);
+ if (err)
+ goto err_port_vid_to_fid_rif_update_one;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_rif_update_one:
+ list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
+ bool valid, u8 port_page)
+{
+ u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
+ u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *port_vid;
+ u8 rec_num, entries_num = 0;
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+
+ list_for_each_entry(port_vid, &fid->port_vid_list, list) {
+ /* port_vid_list is sorted by local_port. */
+ if (port_vid->local_port < local_port_start)
+ continue;
+
+ if (port_vid->local_port > local_port_end)
+ break;
+
+ rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
+ valid ? port_vid->vid : 0);
+ entries_num++;
+ }
+
+ if (!entries_num) {
+ kfree(reiv_pl);
+ return 0;
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ if (err)
+ goto err_reg_write;
+
+ kfree(reiv_pl);
+ return 0;
+
+err_reg_write:
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
+ u16 rif_index, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u8 num_port_pages;
+ int err, i;
+
+ num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
+ MLXSW_REG_REIV_REC_MAX_COUNT + 1;
+
+ for (i = 0; i < num_port_pages; i++) {
+ err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
+ if (err)
+ goto err_reiv_handle;
+ }
+
+ return 0;
+
+err_reiv_handle:
+ for (; i >= 0; i--)
+ mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
+ return err;
+}
+
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ u16 rif_index = mlxsw_sp_rif_index(rif);
+ int err;
+
+ err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
+ if (err)
+ goto err_vni_to_fid_rif_update;
+
+ err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
+ if (err)
+ goto err_vid_to_fid_rif_set;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
+ if (err)
+ goto err_erif_eport_to_vid_map;
+
+ fid->rif = rif;
+ return 0;
+
+err_erif_eport_to_vid_map:
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+err_vid_to_fid_rif_set:
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+err_vni_to_fid_rif_update:
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
+{
+ u16 rif_index;
+
+ if (!fid->rif)
+ return;
+
+ rif_index = mlxsw_sp_rif_index(fid->rif);
+ fid->rif = NULL;
+
+ mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_edit_op(fid, fid->rif);
+ if (err)
+ goto err_fid_edit_op;
+
+ return 0;
+
+err_fid_edit_op:
+ mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
+ return err;
+}
+
+static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid = false;
+ u16 irif_index = 0;
+
+ if (fid->rif) {
+ irif_valid = true;
+ irif_index = mlxsw_sp_rif_index(fid->rif);
+ }
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
+ vid, irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static struct mlxsw_sp_fid_8021d *
+mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021d, common);
+}
+
+static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_op(fid, true);
+}
+
+static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 nr_fids, fid_index;
+
+ nr_fids = fid_family->end_index - fid_family->start_index + 1;
+ fid_index = find_first_zero_bit(fid_family->fids_bitmap, nr_fids);
+ if (fid_index == nr_fids)
+ return -ENOBUFS;
+ *p_fid_index = fid_family->start_index + fid_index;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ err = __mlxsw_sp_fid_port_vid_map(fid,
+ mlxsw_sp_port->local_port,
+ vid, true);
+ if (err)
+ goto err_fid_port_vid_map;
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_vp_mode_set;
+
+ return 0;
+
+err_port_vp_mode_set:
+err_fid_port_vid_map:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ false);
+ }
+ return err;
+}
+
+static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+
+ list_for_each_entry_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ false);
+ }
+}
+
+static int
+mlxsw_sp_fid_port_vid_list_add(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp_port_vid;
+
+ port_vid = kzalloc(sizeof(*port_vid), GFP_KERNEL);
+ if (!port_vid)
+ return -ENOMEM;
+
+ port_vid->local_port = local_port;
+ port_vid->vid = vid;
+
+ list_for_each_entry(tmp_port_vid, &fid->port_vid_list, list) {
+ if (tmp_port_vid->local_port > local_port)
+ break;
+ }
+
+ list_add_tail(&port_vid->list, &tmp_port_vid->list);
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_port_vid_list_del(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
+
+ list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
+ if (port_vid->local_port != local_port || port_vid->vid != vid)
+ continue;
+
+ list_del(&port_vid->list);
+ kfree(port_vid);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char smpe_pl[MLXSW_REG_SMPE_LEN];
+
+ mlxsw_reg_smpe_pack(smpe_pl, local_port, fid->fid_index,
+ valid ? vid : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
+}
+
+static int
+mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
+ u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = mlxsw_sp_rif_index(fid->rif);
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
+ if (err)
+ return err;
+
+ if (!fid->rif)
+ return 0;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ valid);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+
+ return 0;
+
+err_erif_eport_to_vid_map_one:
+ mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
+ return err;
+}
+
+static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+}
+
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_vni_op(fid);
+}
+
+static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_vni_op(fid);
+}
+
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_edit_op(fid, fid->rif);
+}
+
+static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_edit_op(fid, fid->rif);
+}
+
+static void
+mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ br_fdb_clear_offload(nve_dev, 0);
+}
+
+static int
+mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
+ .setup = mlxsw_sp_fid_8021d_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021d_compare,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
+};
+
+#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
+#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
+#define MLXSW_SP_FID_8021Q_PGT_BASE 0
+#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 2,
+ },
+};
+
+static bool
+mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
+}
+
+static void
+mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
+}
+
+static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
+
+static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_op(fid, true);
+}
+
+static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ *p_fid_index = fid->fid_family->start_index + rif_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_rfid_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ return fid->fid_index == rif_index + fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ return err;
+
+ /* Using legacy bridge model, we only need to transition the port to
+ * virtual mode since {Port, VID} => FID is done by the firmware upon
+ * RIF creation. Using unified bridge model, we need to map
+ * {Port, VID} => FID and map egress VID.
+ */
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ goto err_port_vid_map;
+
+ if (fid->rif) {
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
+ vid, true);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+ }
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+err_erif_eport_to_vid_map_one:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+err_port_vid_map:
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+}
+
+static int mlxsw_sp_fid_rfid_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_rfid_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int
+mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .setup = mlxsw_sp_fid_rfid_setup,
+ .configure = mlxsw_sp_fid_rfid_configure,
+ .deconfigure = mlxsw_sp_fid_rfid_deconfigure,
+ .index_alloc = mlxsw_sp_fid_rfid_index_alloc,
+ .compare = mlxsw_sp_fid_rfid_compare,
+ .port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
+};
+
+static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
+
+static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_op(fid, true);
+}
+
+static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ *p_fid_index = fid->fid_family->start_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ return true;
+}
+
+static int mlxsw_sp_fid_dummy_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_dummy_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .setup = mlxsw_sp_fid_dummy_setup,
+ .configure = mlxsw_sp_fid_dummy_configure,
+ .deconfigure = mlxsw_sp_fid_dummy_deconfigure,
+ .index_alloc = mlxsw_sp_fid_dummy_index_alloc,
+ .compare = mlxsw_sp_fid_dummy_compare,
+ .vni_set = mlxsw_sp_fid_dummy_vni_set,
+ .vni_clear = mlxsw_sp_fid_dummy_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+};
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ int err;
+
+ err = mlxsw_sp_fid_op(fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
+ if (err)
+ goto err_vid_to_fid_map;
+
+ return 0;
+
+err_vid_to_fid_map:
+ mlxsw_sp_fid_op(fid, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+
+ mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured, otherwise, configure new mapping.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
+ err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ return 0;
+
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+};
+
+/* There are 4K-2 802.1Q FIDs */
+#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
+#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
+ MLXSW_SP_FID_8021Q_MAX - 1)
+
+/* There are 1K 802.1D FIDs */
+#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
+#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
+ MLXSW_SP_FID_8021D_MAX - 1)
+
+/* There is one dummy FID */
+#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
+
+/* There are 11K rFIDs */
+#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
+#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
+ MLXSW_SP_FID_RFID_MAX - 1)
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+ .flood_rsp = true,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
+ list_for_each_entry(fid, &fid_family->fids_list, list) {
+ if (!fid->fid_family->ops->compare(fid, arg))
+ continue;
+ refcount_inc(&fid->ref_count);
+ return fid;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ int err;
+
+ fid = mlxsw_sp_fid_lookup(mlxsw_sp, type, arg);
+ if (fid)
+ return fid;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
+ fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
+ if (!fid)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fid->port_vid_list);
+ fid->fid_family = fid_family;
+
+ err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
+ if (err)
+ goto err_index_alloc;
+ fid->fid_index = fid_index;
+ __set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
+
+ fid->fid_family->ops->setup(fid, arg);
+
+ err = fid->fid_family->ops->configure(fid);
+ if (err)
+ goto err_configure;
+
+ err = rhashtable_insert_fast(&mlxsw_sp->fid_core->fid_ht, &fid->ht_node,
+ mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add(&fid->list, &fid_family->fids_list);
+ refcount_set(&fid->ref_count, 1);
+ return fid;
+
+err_rhashtable_insert:
+ fid->fid_family->ops->deconfigure(fid);
+err_configure:
+ __clear_bit(fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+err_index_alloc:
+ kfree(fid);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+
+ if (!refcount_dec_and_test(&fid->ref_count))
+ return;
+
+ list_del(&fid->list);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->fid_ht,
+ &fid->ht_node, mlxsw_sp_fid_ht_params);
+ fid->fid_family->ops->deconfigure(fid);
+ __clear_bit(fid->fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid->port_vid_list));
+ kfree(fid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
+ u16 vid)
+{
+ return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D,
+ &br_ifindex);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_RFID, &rif_index);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_DUMMY, NULL);
+}
+
+static int
+mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ const int *sfgc_packet_types;
+ u16 num_fids, mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
+ if (err)
+ return err;
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+ if (!sfgc_packet_types[i])
+ continue;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ goto err_reg_write;
+ }
+
+ return 0;
+
+err_reg_write:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 num_fids, mid_base;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+}
+
+static int
+mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+ int err;
+
+ flood_table = &fid_family->flood_tables[i];
+ err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_tables[i];
+ mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
+ }
+}
+
+static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_family *tmpl)
+{
+ u16 nr_fids = tmpl->end_index - tmpl->start_index + 1;
+ struct mlxsw_sp_fid_family *fid_family;
+ int err;
+
+ fid_family = kmemdup(tmpl, sizeof(*fid_family), GFP_KERNEL);
+ if (!fid_family)
+ return -ENOMEM;
+
+ fid_family->mlxsw_sp = mlxsw_sp;
+ INIT_LIST_HEAD(&fid_family->fids_list);
+ fid_family->fids_bitmap = bitmap_zalloc(nr_fids, GFP_KERNEL);
+ if (!fid_family->fids_bitmap) {
+ err = -ENOMEM;
+ goto err_alloc_fids_bitmap;
+ }
+
+ if (fid_family->flood_tables) {
+ err = mlxsw_sp_fid_flood_tables_init(fid_family);
+ if (err)
+ goto err_fid_flood_tables_init;
+ }
+
+ mlxsw_sp->fid_core->fid_family_arr[tmpl->type] = fid_family;
+
+ return 0;
+
+err_fid_flood_tables_init:
+ bitmap_free(fid_family->fids_bitmap);
+err_alloc_fids_bitmap:
+ kfree(fid_family);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid_family *fid_family)
+{
+ mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+
+ if (fid_family->flood_tables)
+ mlxsw_sp_fid_flood_tables_fini(fid_family);
+
+ bitmap_free(fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
+ kfree(fid_family);
+}
+
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ /* Track number of FIDs configured on the port with mapping type
+ * PORT_VID_TO_FID, so that we know when to transition the port
+ * back to non-virtual (VLAN) mode.
+ */
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+
+ return mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+}
+
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+}
+
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_fid_core *fid_core;
+ int err, i;
+
+ fid_core = kzalloc(sizeof(*mlxsw_sp->fid_core), GFP_KERNEL);
+ if (!fid_core)
+ return -ENOMEM;
+ mlxsw_sp->fid_core = fid_core;
+
+ err = rhashtable_init(&fid_core->fid_ht, &mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_fid_init;
+
+ err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
+ if (err)
+ goto err_rhashtable_vni_init;
+
+ fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!fid_core->port_fid_mappings) {
+ err = -ENOMEM;
+ goto err_alloc_port_fid_mappings;
+ }
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
+ err = mlxsw_sp_fid_family_register(mlxsw_sp,
+ mlxsw_sp->fid_family_arr[i]);
+
+ if (err)
+ goto err_fid_ops_register;
+ }
+
+ return 0;
+
+err_fid_ops_register:
+ for (i--; i >= 0; i--) {
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = fid_core->fid_family_arr[i];
+ mlxsw_sp_fid_family_unregister(mlxsw_sp, fid_family);
+ }
+ kfree(fid_core->port_fid_mappings);
+err_alloc_port_fid_mappings:
+ rhashtable_destroy(&fid_core->vni_ht);
+err_rhashtable_vni_init:
+ rhashtable_destroy(&fid_core->fid_ht);
+err_rhashtable_fid_init:
+ kfree(fid_core);
+ return err;
+}
+
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++)
+ mlxsw_sp_fid_family_unregister(mlxsw_sp,
+ fid_core->fid_family_arr[i]);
+ kfree(fid_core->port_fid_mappings);
+ rhashtable_destroy(&fid_core->vni_ht);
+ rhashtable_destroy(&fid_core->fid_ht);
+ kfree(fid_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
new file mode 100644
index 000000000..9e50c823a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_flow_block *
+mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
+{
+ struct mlxsw_sp_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->mall.list);
+ block->mlxsw_sp = mlxsw_sp;
+ block->net = net;
+ return block;
+}
+
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_flow_block_binding *
+mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+static bool
+mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ if (ingress && block->ingress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port, extack);
+ if (err)
+ return err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_binding_alloc;
+ }
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ if (ingress)
+ block->ingress_binding_count++;
+ else
+ block->egress_binding_count++;
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+err_binding_alloc:
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return err;
+}
+
+static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (ingress)
+ block->ingress_binding_count--;
+ else
+ block->egress_binding_count--;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return 0;
+}
+
+static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_mall_replace(mlxsw_sp, flow_block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_mall_destroy(flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_flower_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlxsw_sp_flower_replace(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_DESTROY:
+ mlxsw_sp_flower_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ if (mlxsw_sp_flow_block_disabled(flow_block))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_flow_block_mall_cb(flow_block, type_data);
+ case TC_SETUP_CLSFLOWER:
+ return mlxsw_sp_flow_block_flower_cb(flow_block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlxsw_sp_tc_block_release(void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ mlxsw_sp_flow_block_destroy(flow_block);
+}
+
+static LIST_HEAD(mlxsw_sp_block_cb_list);
+
+static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb) {
+ flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
+ if (!flow_block)
+ return -ENOMEM;
+ block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb,
+ mlxsw_sp, flow_block,
+ mlxsw_sp_tc_block_release);
+ if (IS_ERR(block_cb)) {
+ mlxsw_sp_flow_block_destroy(flow_block);
+ return PTR_ERR(block_cb);
+ }
+ register_block = true;
+ } else {
+ flow_block = flow_block_cb_priv(block_cb);
+ }
+ flow_block_cb_incref(block_cb);
+ err = mlxsw_sp_flow_block_bind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress, f->extack);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = flow_block;
+ else
+ mlxsw_sp_port->eg_flow_block = flow_block;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+ return err;
+}
+
+static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = NULL;
+ else
+ mlxsw_sp_port->eg_flow_block = NULL;
+
+ flow_block = flow_block_cb_priv(block_cb);
+ err = mlxsw_sp_flow_block_unbind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ f->driver_block_list = &mlxsw_sp_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
+ case FLOW_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
new file mode 100644
index 000000000..e91fb205e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/log2.h>
+#include <net/net_namespace.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+static int mlxsw_sp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *act;
+ int mirror_act_count = 0;
+ int police_act_count = 0;
+ int sample_act_count = 0;
+ int err, i;
+
+ if (!flow_action_has_entries(flow_action))
+ return 0;
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) {
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ err = mlxsw_sp_acl_rulei_act_terminate(rulei);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
+ return err;
+ }
+ break;
+ case FLOW_ACTION_DROP: {
+ bool ingress;
+
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
+ return -EOPNOTSUPP;
+ }
+ ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
+ act->cookie, extack);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
+ return err;
+ }
+
+ /* Forbid block with this rulei to be bound
+ * to ingress/egress in future. Ingress rule is
+ * a blocker for egress and vice versa.
+ */
+ if (ingress)
+ rulei->egress_bind_blocker = 1;
+ else
+ rulei->ingress_bind_blocker = 1;
+ }
+ break;
+ case FLOW_ACTION_TRAP:
+ err = mlxsw_sp_acl_rulei_act_trap(rulei);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
+ return err;
+ }
+ break;
+ case FLOW_ACTION_GOTO: {
+ u32 chain_index = act->chain_index;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ u16 group_id;
+
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
+ chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
+ err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
+ return err;
+ }
+ }
+ break;
+ case FLOW_ACTION_REDIRECT: {
+ struct net_device *out_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
+ fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
+ fid_index = mlxsw_sp_fid_index(fid);
+ err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
+ fid_index, extack);
+ if (err)
+ return err;
+
+ out_dev = act->dev;
+ err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
+ out_dev, extack);
+ if (err)
+ return err;
+ }
+ break;
+ case FLOW_ACTION_MIRRED: {
+ struct net_device *out_dev = act->dev;
+
+ if (mirror_act_count++) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
+ block, out_dev,
+ extack);
+ if (err)
+ return err;
+ }
+ break;
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u16 proto = be16_to_cpu(act->vlan.proto);
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
+
+ err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ act->id, vid,
+ proto, prio, extack);
+ if (err)
+ return err;
+ break;
+ }
+ case FLOW_ACTION_PRIORITY:
+ err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
+ act->priority,
+ extack);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_MANGLE: {
+ enum flow_action_mangle_base htype = act->mangle.htype;
+ __be32 be_mask = (__force __be32) act->mangle.mask;
+ __be32 be_val = (__force __be32) act->mangle.val;
+ u32 offset = act->mangle.offset;
+ u32 mask = be32_to_cpu(be_mask);
+ u32 val = be32_to_cpu(be_val);
+
+ err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
+ htype, offset,
+ mask, val, extack);
+ if (err)
+ return err;
+ break;
+ }
+ case FLOW_ACTION_POLICE: {
+ u32 burst;
+
+ if (police_act_count++) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
+ /* The kernel might adjust the requested burst size so
+ * that it is not exactly a power of two. Re-adjust it
+ * here since the hardware only supports burst sizes
+ * that are a power of two.
+ */
+ burst = roundup_pow_of_two(act->police.burst);
+ err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
+ act->hw_index,
+ act->police.rate_bytes_ps,
+ burst, extack);
+ if (err)
+ return err;
+ break;
+ }
+ case FLOW_ACTION_SAMPLE: {
+ if (sample_act_count++) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
+ block,
+ act->sample.psample_group,
+ act->sample.rate,
+ act->sample.trunc_size,
+ act->sample.truncate,
+ extack);
+ if (err)
+ return err;
+ break;
+ }
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (rulei->ipv6_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f,
+ struct mlxsw_sp_flow_block *block)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return 0;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(block->net,
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (!mlxsw_sp_port_dev_check(ingress_dev)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
+ return -EINVAL;
+ }
+
+ mlxsw_sp_port = netdev_priv(ingress_dev);
+ if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
+ return -EINVAL;
+ }
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ mlxsw_sp_port->local_port,
+ 0xFFFFFFFF);
+ return 0;
+}
+
+static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f)
+{
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(f->rule, &match);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &match.key->src,
+ (char *) &match.mask->src, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &match.key->dst,
+ (char *) &match.mask->dst, 4);
+}
+
+static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f)
+{
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(f->rule, &match);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &match.key->src.s6_addr[0x0],
+ &match.mask->src.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &match.key->src.s6_addr[0x4],
+ &match.mask->src.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &match.key->src.s6_addr[0x8],
+ &match.mask->src.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &match.key->src.s6_addr[0xC],
+ &match.mask->src.s6_addr[0xC], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &match.key->dst.s6_addr[0x0],
+ &match.mask->dst.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &match.key->dst.s6_addr[0x4],
+ &match.mask->dst.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &match.key->dst.s6_addr[0x8],
+ &match.mask->dst.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &match.key->dst.s6_addr[0xC],
+ &match.mask->dst.s6_addr[0xC], 4);
+}
+
+static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f,
+ u8 ip_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_ports match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
+ dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports(rule, &match);
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
+ ntohs(match.key->dst),
+ ntohs(match.mask->dst));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ ntohs(match.key->src),
+ ntohs(match.mask->src));
+ return 0;
+}
+
+static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f,
+ u8 ip_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_tcp match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
+ dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_tcp(rule, &match);
+
+ if (match.mask->flags & htons(0x0E00)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
+ dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
+ return -EINVAL;
+ }
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
+ ntohs(match.key->flags),
+ ntohs(match.mask->flags));
+ return 0;
+}
+
+static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f,
+ u16 n_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_ip match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
+ return 0;
+
+ if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
+ dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ip(rule, &match);
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
+ match.key->ttl, match.mask->ttl);
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
+ match.key->tos & 0x3,
+ match.mask->tos & 0x3);
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
+ match.key->tos >> 2,
+ match.mask->tos >> 2);
+
+ return 0;
+}
+
+static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+ u16 n_proto_mask = 0;
+ u16 n_proto_key = 0;
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ int err;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
+ return -EOPNOTSUPP;
+ }
+
+ mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
+
+ err = mlxsw_sp_flower_parse_meta(rulei, f, block);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_ETHERTYPE,
+ n_proto_key, n_proto_mask);
+
+ ip_proto = match.key->ip_proto;
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_IP_PROTO,
+ match.key->ip_proto,
+ match.mask->ip_proto);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ match.key->dst,
+ match.mask->dst, 2);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ match.key->dst + 2,
+ match.mask->dst + 2, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ match.key->src,
+ match.mask->src, 2);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ match.key->src + 2,
+ match.mask->src + 2, 4);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (mlxsw_sp_flow_block_is_egress_bound(block) &&
+ match.mask->vlan_id) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
+ if (match.mask->vlan_id != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VID,
+ match.key->vlan_id,
+ match.mask->vlan_id);
+ if (match.mask->vlan_priority != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_PCP,
+ match.key->vlan_priority,
+ match.mask->vlan_priority);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ mlxsw_sp_flower_parse_ipv4(rulei, f);
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
+ mlxsw_sp_flower_parse_ipv6(rulei, f);
+
+ err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
+ err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
+ if (err)
+ return err;
+
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
+ &f->rule->action,
+ f->common.extack);
+}
+
+static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ unsigned int mall_min_prio;
+ unsigned int mall_max_prio;
+ int err;
+
+ err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
+ &mall_min_prio, &mall_max_prio);
+ if (err) {
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+ if (ingress && f->common.prio <= mall_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (!ingress && f->common.prio >= mall_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ err = mlxsw_sp_flower_mall_prio_check(block, f);
+ if (err)
+ return err;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
+ f->common.extack);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule_create;
+ }
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
+ if (err)
+ goto err_flower_parse;
+
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+
+ err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
+ if (err)
+ goto err_rule_add;
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return 0;
+
+err_rule_add:
+err_rulei_commit:
+err_flower_parse:
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+err_rule_create:
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return err;
+}
+
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (IS_ERR(ruleset))
+ return;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+ if (rule) {
+ mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+ }
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
+
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ u64 drops;
+ int err;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (WARN_ON(IS_ERR(ruleset)))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+ if (!rule)
+ return -EINVAL;
+
+ err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
+ &drops, &lastuse, &used_hw_stats);
+ if (err)
+ goto err_rule_get_stats;
+
+ flow_stats_update(&f->stats, bytes, packets, drops, lastuse,
+ used_hw_stats);
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return 0;
+
+err_rule_get_stats:
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return err;
+}
+
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule_info rulei;
+ int err;
+
+ memset(&rulei, 0, sizeof(rulei));
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
+ if (err)
+ return err;
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ &rulei.values.elusage);
+
+ /* keep the reference to the ruleset */
+ return PTR_ERR_OR_ZERO(ruleset);
+}
+
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (IS_ERR(ruleset))
+ return;
+ /* put the reference to the ruleset kept in create */
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
+
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
+ chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ /* In case there are no flower rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return PTR_ERR(ruleset);
+ mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
new file mode 100644
index 000000000..a2ee695a3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
+#include <net/inet_ecn.h>
+
+#include "spectrum_ipip.h"
+#include "reg.h"
+
+struct ip_tunnel_parm
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
+{
+ struct ip_tunnel *tun = netdev_priv(ol_dev);
+
+ return tun->parms;
+}
+
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
+{
+ struct ip6_tnl *tun = netdev_priv(ol_dev);
+
+ return tun->parms;
+}
+
+static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
+{
+ return !!(parms->i_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
+{
+ return !!(parms->i_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
+{
+ return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
+ be32_to_cpu(parms->i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_ikey(parms) ?
+ be32_to_cpu(parms->i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
+{
+ return mlxsw_sp_ipip_parms4_has_okey(parms) ?
+ be32_to_cpu(parms->o_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_okey(parms) ?
+ be32_to_cpu(parms->o_key) : 0;
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr };
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr };
+}
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_saddr(&parms4);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_saddr(&parms6);
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {0};
+}
+
+static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
+{
+
+ struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+
+ return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_daddr(&parms4);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_daddr(&parms6);
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {0};
+}
+
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
+{
+ union mlxsw_sp_l3addr naddr = {0};
+
+ return !memcmp(&addr, &naddr, sizeof(naddr));
+}
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV4,
+ .saddr = mlxsw_sp_ipip_parms4_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms4_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms4_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
+ };
+}
+
+static int
+mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ __be32 daddr4 = mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev);
+ enum mlxsw_reg_ratr_op op;
+
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
+ adj_index, rif_index);
+ mlxsw_reg_ratr_ipip4_entry_pack(ratr_pl, be32_to_cpu(daddr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct ip_tunnel_parm parms;
+ unsigned int type_check;
+ bool has_ikey;
+ u32 daddr4;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(&parms);
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
+
+ type_check = has_ikey ?
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
+
+ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
+ * remote IP). Thus configure decap so that it filters out packets that
+ * are not IPv4 or have the wrong SIP. IPIP_DECAP_ERROR trap is
+ * generated for packets that fail this criterion. Linux then handles
+ * such packets in slow path and generates ICMP destination unreachable.
+ */
+ daddr4 = be32_to_cpu(mlxsw_sp_ipip_netdev_daddr4(ipip_entry->ol_dev));
+ mlxsw_reg_rtdp_ipip4_pack(rtdp_pl, rif_index,
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV4,
+ type_check, has_ikey, daddr4, ikey);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev);
+ union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev);
+
+ /* Tunnels with unset local or remote address are valid in Linux and
+ * used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access
+ * (NBMA) tunnels. In principle these can be offloaded, but the driver
+ * currently doesn't support this. So punt.
+ */
+ return !mlxsw_sp_l3addr_is_zero(saddr) &&
+ !mlxsw_sp_l3addr_is_zero(daddr);
+}
+
+static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(ol_dev);
+ __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+ bool inherit_ttl = tunnel->parms.iph.ttl == 0;
+ bool inherit_tos = tunnel->parms.iph.tos & 0x1;
+
+ return (tunnel->parms.i_flags & ~okflags) == 0 &&
+ (tunnel->parms.o_flags & ~okflags) == 0 &&
+ inherit_ttl && inherit_tos &&
+ mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev);
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
+ return (struct mlxsw_sp_rif_ipip_lb_config){
+ .lb_ipipt = lb_ipipt,
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
+ .ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
+ .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ ol_dev),
+ };
+}
+
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ const struct mlxsw_sp_ipip_parms *new_parms,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms;
+ bool update_tunnel = false;
+ bool update_decap = false;
+ bool update_nhs = false;
+ int err = 0;
+
+ if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) {
+ u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
+
+ /* Since the local address has changed, if there is another
+ * tunnel with a matching saddr, both need to be demoted.
+ */
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
+ new_parms->proto,
+ new_parms->saddr,
+ ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ update_tunnel = true;
+ } else if (old_parms->okey != new_parms->okey ||
+ old_parms->link != new_parms->link) {
+ update_tunnel = true;
+ } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) {
+ update_nhs = true;
+ } else if (old_parms->ikey != new_parms->ikey) {
+ update_decap = true;
+ }
+
+ if (update_tunnel)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, true,
+ extack);
+ else if (update_nhs)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true,
+ extack);
+ else if (update_decap)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, false,
+ extack);
+ if (err)
+ return err;
+
+ ipip_entry->parms = *new_parms;
+ return 0;
+}
+
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
+ .dev_type = ARPHRD_IPGRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV4,
+ .inc_parsing_depth = false,
+ .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4,
+ .nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
+ .decap_config = mlxsw_sp_ipip_decap_config_gre4,
+ .can_offload = mlxsw_sp_ipip_can_offload_gre4,
+ .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
+ .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
+ .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4,
+ .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4,
+};
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_parms parms = {0};
+
+ WARN_ON_ONCE(1);
+ return parms;
+}
+
+static int
+mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ return false;
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_rif_ipip_lb_config config = {0};
+
+ WARN_ON_ONCE(1);
+ return config;
+}
+
+static int
+mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp1_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp1_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
+ [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
+};
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_parms6_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms6_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms6_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ enum mlxsw_reg_ratr_op op;
+
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
+ adj_index, rif_index);
+ mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl,
+ ipip_entry->dip_kvdl_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct __ip6_tnl_parm parms;
+ unsigned int type_check;
+ bool has_ikey;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms6_ikey(&parms);
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
+
+ type_check = has_ikey ?
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
+
+ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
+ * remote IP). Thus configure decap so that it filters out packets that
+ * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is
+ * generated for packets that fail this criterion. Linux then handles
+ * such packets in slow path and generates ICMP destination unreachable.
+ */
+ mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index,
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6,
+ type_check, has_ikey,
+ ipip_entry->dip_kvdl_index, ikey);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ bool inherit_ttl = tparm.hop_limit == 0;
+ __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+
+ return (tparm.i_flags & ~okflags) == 0 &&
+ (tparm.o_flags & ~okflags) == 0 &&
+ inherit_ttl && inherit_tos &&
+ mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+
+ lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ?
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
+ return (struct mlxsw_sp_rif_ipip_lb_config){
+ .lb_ipipt = lb_ipipt,
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ .ul_protocol = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6,
+ ol_dev),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+ &ipip_entry->parms.daddr.addr6,
+ &ipip_entry->dip_kvdl_index);
+}
+
+static void
+mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp2_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp2_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = {
+ [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops,
+};
+
+static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tieem_pl[MLXSW_REG_TIEEM_LEN];
+
+ mlxsw_reg_tieem_pack(tieem_pl, inner_ecn, outer_ecn);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tieem), tieem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ int err;
+
+ err = mlxsw_sp_ipip_ecn_encap_init_one(mlxsw_sp, i, outer_ecn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tidem_pl[MLXSW_REG_TIDEM_LEN];
+ u8 new_inner_ecn;
+ bool trap_en;
+
+ new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+ &trap_en);
+ mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i, j, err;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ err = mlxsw_sp_ipip_ecn_decap_init_one(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
+{
+ struct net *net = dev_net(ol_dev);
+ struct ip_tunnel *tun4;
+ struct ip6_tnl *tun6;
+
+ switch (ol_dev->type) {
+ case ARPHRD_IPGRE:
+ tun4 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun4->parms.link);
+ case ARPHRD_IP6GRE:
+ tun6 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun6->parms.link);
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
new file mode 100644
index 000000000..8cc259dcc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_IPIP_H_
+#define _MLXSW_IPIP_H_
+
+#include "spectrum_router.h"
+#include <net/ip_fib.h>
+#include <linux/if_tunnel.h>
+#include <net/ip6_tunnel.h>
+
+struct ip_tunnel_parm
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev);
+
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
+
+enum mlxsw_sp_ipip_type {
+ MLXSW_SP_IPIP_TYPE_GRE4,
+ MLXSW_SP_IPIP_TYPE_GRE6,
+ MLXSW_SP_IPIP_TYPE_MAX,
+};
+
+struct mlxsw_sp_ipip_parms {
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr saddr;
+ union mlxsw_sp_l3addr daddr;
+ int link;
+ u32 ikey;
+ u32 okey;
+};
+
+struct mlxsw_sp_ipip_entry {
+ enum mlxsw_sp_ipip_type ipipt;
+ struct net_device *ol_dev; /* Overlay. */
+ struct mlxsw_sp_rif_ipip_lb *ol_lb;
+ struct mlxsw_sp_fib_entry *decap_fib_entry;
+ struct list_head ipip_list_node;
+ struct mlxsw_sp_ipip_parms parms;
+ u32 dip_kvdl_index;
+};
+
+struct mlxsw_sp_ipip_ops {
+ int dev_type;
+ enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
+ bool inc_parsing_depth;
+
+ struct mlxsw_sp_ipip_parms
+ (*parms_init)(const struct net_device *ol_dev);
+
+ int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl);
+
+ bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev);
+
+ /* Return a configuration for creating an overlay loopback RIF. */
+ struct mlxsw_sp_rif_ipip_lb_config
+ (*ol_loopback_config)(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev);
+
+ int (*decap_config)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index);
+
+ int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack);
+ int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+ void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry);
+};
+
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[];
+
+#endif /* _MLXSW_IPIP_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
new file mode 100644
index 000000000..20d72f1c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_kvdl {
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ struct mutex kvdl_lock; /* Protects kvdl allocations */
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
+ struct mlxsw_sp_kvdl *kvdl;
+ int err;
+
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
+ GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ mutex_init(&kvdl->kvdl_lock);
+ kvdl->kvdl_ops = kvdl_ops;
+ mlxsw_sp->kvdl = kvdl;
+
+ err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
+ if (err)
+ goto err_init;
+ return 0;
+
+err_init:
+ mutex_destroy(&kvdl->kvdl_lock);
+ kfree(kvdl);
+ return err;
+}
+
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+
+ kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ mutex_destroy(&kvdl->kvdl_lock);
+ kfree(kvdl);
+}
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index)
+{
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ int err;
+
+ mutex_lock(&kvdl->kvdl_lock);
+ err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
+
+ return err;
+}
+
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
+{
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+
+ mutex_lock(&kvdl->kvdl_lock);
+ kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
+ entry_count, entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
+}
+
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
+{
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+
+ return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_alloc_count);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
new file mode 100644
index 000000000..07b371cd9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "reg.h"
+
+static struct mlxsw_sp_mall_entry *
+mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ if (mall_entry->cookie == cookie)
+ return mall_entry;
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_agent_parms agent_parms = {};
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+ int err;
+
+ if (!mall_entry->mirror.to_dev) {
+ NL_SET_ERR_MSG(extack, "Could not find requested device");
+ return -EINVAL;
+ }
+
+ agent_parms.to_dev = mall_entry->mirror.to_dev;
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
+ &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
+ return err;
+ }
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ parms.probability_rate = 1;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
+ &parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
+ goto err_agent_bind;
+ }
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable, u32 rate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+ mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
+static int
+mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_sample_trigger trigger;
+ int err;
+
+ if (mall_entry->ingress)
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ else
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port->local_port;
+ err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger,
+ &mall_entry->sample.params,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port,
+ mall_entry, extack);
+ if (err)
+ goto err_port_sample_set;
+ return 0;
+
+err_port_sample_set:
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_sample_trigger trigger;
+
+ if (mall_entry->ingress)
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ else
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry);
+ mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
+}
+
+static int
+mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry,
+ extack);
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry,
+ extack);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static void
+mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
+ break;
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ if (list_empty(&block->mall.list))
+ return;
+ block->mall.min_prio = UINT_MAX;
+ block->mall.max_prio = 0;
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ if (mall_entry->priority < block->mall.min_prio)
+ block->mall.min_prio = mall_entry->priority;
+ if (mall_entry->priority > block->mall.max_prio)
+ block->mall.max_prio = mall_entry->priority;
+ }
+}
+
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ unsigned int flower_min_prio;
+ unsigned int flower_max_prio;
+ bool flower_prio_valid;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (f->common.chain_index) {
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
+ &flower_min_prio, &flower_max_prio);
+ if (err) {
+ if (err != -ENOENT) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+ flower_prio_valid = false;
+ /* No flower filters are installed in specified chain. */
+ } else {
+ flower_prio_valid = true;
+ }
+
+ if (protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG(f->common.extack, "matchall rules only supported with 'all' protocol");
+ return -EOPNOTSUPP;
+ }
+
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+ mall_entry->cookie = f->cookie;
+ mall_entry->priority = f->common.prio;
+ mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ switch (act->id) {
+ case FLOW_ACTION_MIRRED:
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
+ mall_entry->mirror.to_dev = act->dev;
+ break;
+ case FLOW_ACTION_SAMPLE:
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
+ mall_entry->sample.params.psample_group = act->sample.psample_group;
+ mall_entry->sample.params.truncate = act->sample.truncate;
+ mall_entry->sample.params.trunc_size = act->sample.trunc_size;
+ mall_entry->sample.params.rate = act->sample.rate;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
+ mall_entry, f->common.extack);
+ if (err)
+ goto rollback;
+ }
+
+ block->rule_count++;
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count++;
+ else
+ block->ingress_blocker_rule_count++;
+ list_add_tail(&mall_entry->list, &block->mall.list);
+ mlxsw_sp_mall_prio_update(block);
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+errout:
+ kfree(mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
+ if (!mall_entry) {
+ NL_SET_ERR_MSG(f->common.extack, "Entry not found");
+ return;
+ }
+
+ list_del(&mall_entry->list);
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count--;
+ else
+ block->ingress_blocker_rule_count--;
+ block->rule_count--;
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+ kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
+ mlxsw_sp_mall_prio_update(block);
+}
+
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ int err;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry,
+ extack);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
+ list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+}
+
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio)
+{
+ if (chain_index || list_empty(&block->mall.list))
+ /* In case there are no matchall rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return -ENOENT;
+ *p_min_prio = block->mall.min_prio;
+ *p_max_prio = block->mall.max_prio;
+ return 0;
+}
+
+static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ u32 rate = mall_entry->sample.params.rate;
+
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(extack, "Sampling is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (rate > MLXSW_REG_MPSC_RATE_MAX) {
+ NL_SET_ERR_MSG(extack, "Unsupported sampling rate");
+ return -EOPNOTSUPP;
+ }
+
+ return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate);
+}
+
+static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
+}
+
+const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = {
+ .sample_add = mlxsw_sp1_mall_sample_add,
+ .sample_del = mlxsw_sp1_mall_sample_del,
+};
+
+static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .to_dev = NULL, /* Mirror to CPU. */
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+ };
+ u32 rate = mall_entry->sample.params.rate;
+ enum mlxsw_sp_span_trigger span_trigger;
+ int err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id,
+ &agent_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
+ return err;
+ }
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
+ goto err_analyzed_port_get;
+ }
+
+ span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ trigger_parms.span_id = mall_entry->sample.span_id;
+ trigger_parms.probability_rate = rate;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
+ goto err_agent_bind;
+ }
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
+ return err;
+}
+
+static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ enum mlxsw_sp_span_trigger span_trigger;
+
+ span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ trigger_parms.span_id = mall_entry->sample.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
+}
+
+const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = {
+ .sample_add = mlxsw_sp2_mall_sample_add,
+ .sample_del = mlxsw_sp2_mall_sample_del,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
new file mode 100644
index 000000000..1f6bc0c7e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -0,0 +1,1075 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/mutex.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+
+#include "spectrum_mr.h"
+#include "spectrum_router.h"
+
+struct mlxsw_sp_mr {
+ const struct mlxsw_sp_mr_ops *mr_ops;
+ void *catchall_route_priv;
+ struct delayed_work stats_update_dw;
+ struct list_head table_list;
+ struct mutex table_list_lock; /* Protects table_list */
+#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
+ unsigned long priv[];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_mr_vif;
+struct mlxsw_sp_mr_vif_ops {
+ bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
+};
+
+struct mlxsw_sp_mr_vif {
+ struct net_device *dev;
+ const struct mlxsw_sp_rif *rif;
+ unsigned long vif_flags;
+
+ /* A list of route_vif_entry structs that point to routes that the VIF
+ * instance is used as one of the egress VIFs
+ */
+ struct list_head route_evif_list;
+
+ /* A list of route_vif_entry structs that point to routes that the VIF
+ * instance is used as an ingress VIF
+ */
+ struct list_head route_ivif_list;
+
+ /* Protocol specific operations for a VIF */
+ const struct mlxsw_sp_mr_vif_ops *ops;
+};
+
+struct mlxsw_sp_mr_route_vif_entry {
+ struct list_head vif_node;
+ struct list_head route_node;
+ struct mlxsw_sp_mr_vif *mr_vif;
+ struct mlxsw_sp_mr_route *mr_route;
+};
+
+struct mlxsw_sp_mr_table;
+struct mlxsw_sp_mr_table_ops {
+ bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *mfc);
+ void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *mfc);
+ bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route);
+};
+
+struct mlxsw_sp_mr_table {
+ struct list_head node;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp *mlxsw_sp;
+ u32 vr_id;
+ struct mlxsw_sp_mr_vif vifs[MAXVIFS];
+ struct list_head route_list;
+ struct mutex route_list_lock; /* Protects route_list */
+ struct rhashtable route_ht;
+ const struct mlxsw_sp_mr_table_ops *ops;
+ char catchall_route_priv[];
+ /* catchall_route_priv has to be always the last item */
+};
+
+struct mlxsw_sp_mr_route {
+ struct list_head node;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mr_route_key key;
+ enum mlxsw_sp_mr_route_action route_action;
+ u16 min_mtu;
+ struct mr_mfc *mfc;
+ void *route_priv;
+ const struct mlxsw_sp_mr_table *mr_table;
+ /* A list of route_vif_entry structs that point to the egress VIFs */
+ struct list_head evif_list;
+ /* A route_vif_entry struct that point to the ingress VIF */
+ struct mlxsw_sp_mr_route_vif_entry ivif;
+};
+
+static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_mr_route_key),
+ .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
+ .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
+ .automatic_shrinking = true,
+};
+
+static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
+{
+ return vif->ops->is_regular(vif) && vif->dev && vif->rif;
+}
+
+static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
+{
+ return vif->dev;
+}
+
+static bool
+mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
+{
+ vifi_t ivif = mr_route->mfc->mfc_parent;
+
+ return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
+}
+
+static int
+mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ int valid_evifs;
+
+ valid_evifs = 0;
+ list_for_each_entry(rve, &mr_route->evif_list, route_node)
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ valid_evifs++;
+ return valid_evifs;
+}
+
+static enum mlxsw_sp_mr_route_action
+mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ /* If the ingress port is not regular and resolved, trap the route */
+ if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* The kernel does not match a (*,G) route that the ingress interface is
+ * not one of the egress interfaces, so trap these kind of routes.
+ */
+ if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
+ mr_route) &&
+ !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* If the route has no valid eVIFs, trap it. */
+ if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* If one of the eVIFs has no RIF, trap-and-forward the route as there
+ * is some more routing to do in software too.
+ */
+ list_for_each_entry(rve, &mr_route->evif_list, route_node)
+ if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
+
+ return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
+}
+
+static enum mlxsw_sp_mr_route_prio
+mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
+{
+ return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
+ mr_route) ?
+ MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
+}
+
+static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ rve = kzalloc(sizeof(*rve), GFP_KERNEL);
+ if (!rve)
+ return -ENOMEM;
+ rve->mr_route = mr_route;
+ rve->mr_vif = mr_vif;
+ list_add_tail(&rve->route_node, &mr_route->evif_list);
+ list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ list_del(&rve->route_node);
+ list_del(&rve->vif_node);
+ kfree(rve);
+}
+
+static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ mr_route->ivif.mr_route = mr_route;
+ mr_route->ivif.mr_vif = mr_vif;
+ list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
+}
+
+static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
+{
+ list_del(&mr_route->ivif.vif_node);
+}
+
+static int
+mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ u16 *erif_indices;
+ u16 irif_index;
+ u16 erif = 0;
+
+ erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
+ GFP_KERNEL);
+ if (!erif_indices)
+ return -ENOMEM;
+
+ list_for_each_entry(rve, &mr_route->evif_list, route_node) {
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
+
+ erif_indices[erif++] = rifi;
+ }
+ }
+
+ if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
+ irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
+ else
+ irif_index = 0;
+
+ route_info->irif_index = irif_index;
+ route_info->erif_indices = erif_indices;
+ route_info->min_mtu = mr_route->min_mtu;
+ route_info->route_action = mr_route->route_action;
+ route_info->erif_num = erif;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
+{
+ kfree(route_info->erif_indices);
+}
+
+static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route,
+ bool replace)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr_route_info route_info;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ int err;
+
+ err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
+ if (err)
+ return err;
+
+ if (!replace) {
+ struct mlxsw_sp_mr_route_params route_params;
+
+ mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
+ GFP_KERNEL);
+ if (!mr_route->route_priv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ route_params.key = mr_route->key;
+ route_params.value = route_info;
+ route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
+ err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
+ mr_route->route_priv,
+ &route_params);
+ if (err)
+ kfree(mr_route->route_priv);
+ } else {
+ err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
+ &route_info);
+ }
+out:
+ mlxsw_sp_mr_route_info_destroy(&route_info);
+ return err;
+}
+
+static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
+ kfree(mr_route->route_priv);
+}
+
+static struct mlxsw_sp_mr_route *
+mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
+ struct mlxsw_sp_mr_route *mr_route;
+ int err = 0;
+ int i;
+
+ /* Allocate and init a new route and fill it with parameters */
+ mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
+ if (!mr_route)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&mr_route->evif_list);
+
+ /* Find min_mtu and link iVIF and eVIFs */
+ mr_route->min_mtu = ETH_MAX_MTU;
+ mr_cache_hold(mfc);
+ mr_route->mfc = mfc;
+ mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
+
+ mr_route->mr_table = mr_table;
+ for (i = 0; i < MAXVIFS; i++) {
+ if (mfc->mfc_un.res.ttls[i] != 255) {
+ err = mlxsw_sp_mr_route_evif_link(mr_route,
+ &mr_table->vifs[i]);
+ if (err)
+ goto err;
+ if (mr_table->vifs[i].dev &&
+ mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
+ mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
+ }
+ }
+ mlxsw_sp_mr_route_ivif_link(mr_route,
+ &mr_table->vifs[mfc->mfc_parent]);
+
+ mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
+ return mr_route;
+err:
+ mr_cache_put(mfc);
+ list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
+ mlxsw_sp_mr_route_evif_unlink(rve);
+ kfree(mr_route);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
+
+ mlxsw_sp_mr_route_ivif_unlink(mr_route);
+ mr_cache_put(mr_route->mfc);
+ list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
+ mlxsw_sp_mr_route_evif_unlink(rve);
+ kfree(mr_route);
+}
+
+static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
+ bool offload)
+{
+ if (offload)
+ mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
+ else
+ mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
+}
+
+static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
+{
+ bool offload;
+
+ offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
+}
+
+static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ WARN_ON_ONCE(!mutex_is_locked(&mr_table->route_list_lock));
+
+ mlxsw_sp_mr_mfc_offload_set(mr_route, false);
+ rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ list_del(&mr_route->node);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_route);
+}
+
+int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc, bool replace)
+{
+ struct mlxsw_sp_mr_route *mr_orig_route = NULL;
+ struct mlxsw_sp_mr_route *mr_route;
+ int err;
+
+ if (!mr_table->ops->is_route_valid(mr_table, mfc))
+ return -EINVAL;
+
+ /* Create a new route */
+ mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
+ if (IS_ERR(mr_route))
+ return PTR_ERR(mr_route);
+
+ /* Find any route with a matching key */
+ mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
+ &mr_route->key,
+ mlxsw_sp_mr_route_ht_params);
+ if (replace) {
+ /* On replace case, make the route point to the new route_priv.
+ */
+ if (WARN_ON(!mr_orig_route)) {
+ err = -ENOENT;
+ goto err_no_orig_route;
+ }
+ mr_route->route_priv = mr_orig_route->route_priv;
+ } else if (mr_orig_route) {
+ /* On non replace case, if another route with the same key was
+ * found, abort, as duplicate routes are used for proxy routes.
+ */
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ err = -EINVAL;
+ goto err_duplicate_route;
+ }
+
+ /* Write the route to the hardware */
+ err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
+ if (err)
+ goto err_mr_route_write;
+
+ /* Put it in the table data-structures */
+ mutex_lock(&mr_table->route_list_lock);
+ list_add_tail(&mr_route->node, &mr_table->route_list);
+ mutex_unlock(&mr_table->route_list_lock);
+ err = rhashtable_insert_fast(&mr_table->route_ht,
+ &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Destroy the original route */
+ if (replace) {
+ rhashtable_remove_fast(&mr_table->route_ht,
+ &mr_orig_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ list_del(&mr_orig_route->node);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
+ }
+
+ mlxsw_sp_mr_mfc_offload_update(mr_route);
+ return 0;
+
+err_rhashtable_insert:
+ mutex_lock(&mr_table->route_list_lock);
+ list_del(&mr_route->node);
+ mutex_unlock(&mr_table->route_list_lock);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
+err_mr_route_write:
+err_no_orig_route:
+err_duplicate_route:
+ mlxsw_sp_mr_route_destroy(mr_table, mr_route);
+ return err;
+}
+
+void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc)
+{
+ struct mlxsw_sp_mr_route *mr_route;
+ struct mlxsw_sp_mr_route_key key;
+
+ mr_table->ops->key_create(mr_table, &key, mfc);
+ mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
+ mlxsw_sp_mr_route_ht_params);
+ if (mr_route) {
+ mutex_lock(&mr_table->route_list_lock);
+ __mlxsw_sp_mr_route_del(mr_table, mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
+ }
+}
+
+/* Should be called after the VIF struct is updated */
+static int
+mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 irif_index;
+ int err;
+
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return 0;
+
+ /* rve->mr_vif->rif is guaranteed to be valid at this stage */
+ irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
+ irif_index);
+ if (err)
+ return err;
+
+ err = mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+ /* No need to rollback here because the iRIF change only takes
+ * place after the action has been updated.
+ */
+ return err;
+
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP);
+ rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+}
+
+/* Should be called after the RIF struct is updated */
+static int
+mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 erif_index = 0;
+ int err;
+
+ /* Add the eRIF */
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_erif_add(mlxsw_sp,
+ rve->mr_route->route_priv,
+ erif_index);
+ if (err)
+ return err;
+ }
+
+ /* Update the route action, as the new eVIF can be a tunnel or a pimreg
+ * device which will require updating the action.
+ */
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action != rve->mr_route->route_action) {
+ err = mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+ goto err_route_action_update;
+ }
+
+ /* Update the minimum MTU */
+ if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
+ rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
+ err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->min_mtu);
+ if (err)
+ goto err_route_min_mtu_update;
+ }
+
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+ return 0;
+
+err_route_min_mtu_update:
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->route_action);
+err_route_action_update:
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+ erif_index);
+ return err;
+}
+
+/* Should be called before the RIF struct is updated */
+static void
+mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 rifi;
+
+ /* If the unresolved RIF was not valid, no need to delete it */
+ if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ return;
+
+ /* Update the route action: if there is only one valid eVIF in the
+ * route, set the action to trap as the VIF deletion will lead to zero
+ * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
+ * determine the route action.
+ */
+ if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
+ route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ else
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+
+ /* Delete the erif from the route */
+ rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+}
+
+static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev,
+ struct mlxsw_sp_mr_vif *mr_vif,
+ unsigned long vif_flags,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
+ int err;
+
+ /* Update the VIF */
+ mr_vif->dev = dev;
+ mr_vif->rif = rif;
+ mr_vif->vif_flags = vif_flags;
+
+ /* Update all routes where this VIF is used as an unresolved iRIF */
+ list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
+ err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
+ if (err)
+ goto err_irif_unresolve;
+ }
+
+ /* Update all routes where this VIF is used as an unresolved eRIF */
+ list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
+ err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
+ if (err)
+ goto err_erif_unresolve;
+ }
+ return 0;
+
+err_erif_unresolve:
+ list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
+ vif_node)
+ mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
+err_irif_unresolve:
+ list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
+ vif_node)
+ mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
+ mr_vif->rif = NULL;
+ return err;
+}
+
+static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ /* Update all routes where this VIF is used as an unresolved eRIF */
+ list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
+ mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
+
+ /* Update all routes where this VIF is used as an unresolved iRIF */
+ list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
+ mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
+
+ /* Update the VIF */
+ mr_vif->dev = dev;
+ mr_vif->rif = NULL;
+}
+
+int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev, vifi_t vif_index,
+ unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
+
+ if (WARN_ON(vif_index >= MAXVIFS))
+ return -EINVAL;
+ if (mr_vif->dev)
+ return -EEXIST;
+ return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
+}
+
+void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
+{
+ struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
+
+ if (WARN_ON(vif_index >= MAXVIFS))
+ return;
+ if (WARN_ON(!mr_vif->dev))
+ return;
+ mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
+}
+
+static struct mlxsw_sp_mr_vif *
+mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
+ const struct net_device *dev)
+{
+ vifi_t vif_index;
+
+ for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
+ if (mr_table->vifs[vif_index].dev == dev)
+ return &mr_table->vifs[vif_index];
+ return NULL;
+}
+
+int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return 0;
+
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return 0;
+ return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
+ mr_vif->vif_flags, rif);
+}
+
+void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return;
+
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return;
+ mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
+}
+
+void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif, int mtu)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return;
+
+ /* Search for a VIF that use that RIF */
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return;
+
+ /* Update all the routes that uses that VIF as eVIF */
+ list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
+ if (mtu < rve->mr_route->min_mtu) {
+ rve->mr_route->min_mtu = mtu;
+ mr->mr_ops->route_min_mtu_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ mtu);
+ }
+ }
+}
+
+/* Protocol specific functions */
+static bool
+mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *c)
+{
+ struct mfc_cache *mfc = (struct mfc_cache *) c;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (mfc->mfc_origin == htonl(INADDR_ANY) &&
+ mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return false;
+ }
+ return true;
+}
+
+static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *c)
+{
+ const struct mfc_cache *mfc = (struct mfc_cache *) c;
+ bool starg;
+
+ starg = (mfc->mfc_origin == htonl(INADDR_ANY));
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = MLXSW_SP_L3_PROTO_IPV4;
+ key->group.addr4 = mfc->mfc_mcastgrp;
+ key->group_mask.addr4 = htonl(0xffffffff);
+ key->source.addr4 = mfc->mfc_origin;
+ key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
+}
+
+static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route)
+{
+ return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
+}
+
+static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
+}
+
+static bool
+mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *c)
+{
+ struct mfc6_cache *mfc = (struct mfc6_cache *) c;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (ipv6_addr_any(&mfc->mf6c_origin) &&
+ ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return false;
+ }
+ return true;
+}
+
+static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *c)
+{
+ const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = MLXSW_SP_L3_PROTO_IPV6;
+ key->group.addr6 = mfc->mf6c_mcastgrp;
+ memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
+ key->source.addr6 = mfc->mf6c_origin;
+ if (!ipv6_addr_any(&mfc->mf6c_origin))
+ memset(&key->source_mask.addr6, 0xff,
+ sizeof(key->source_mask.addr6));
+}
+
+static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route)
+{
+ return ipv6_addr_any(&mr_route->key.source_mask.addr6);
+}
+
+static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & MIFF_REGISTER);
+}
+
+static struct
+mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
+ {
+ .is_regular = mlxsw_sp_mr_vif4_is_regular,
+ },
+ {
+ .is_regular = mlxsw_sp_mr_vif6_is_regular,
+ },
+};
+
+static struct
+mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
+ {
+ .is_route_valid = mlxsw_sp_mr_route4_validate,
+ .key_create = mlxsw_sp_mr_route4_key,
+ .is_route_starg = mlxsw_sp_mr_route4_starg,
+ },
+ {
+ .is_route_valid = mlxsw_sp_mr_route6_validate,
+ .key_create = mlxsw_sp_mr_route6_key,
+ .is_route_starg = mlxsw_sp_mr_route6_starg,
+ },
+
+};
+
+struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
+ u32 vr_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_mr_route_params catchall_route_params = {
+ .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ .key = {
+ .vrid = vr_id,
+ .proto = proto,
+ },
+ .value = {
+ .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
+ }
+ };
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ struct mlxsw_sp_mr_table *mr_table;
+ int err;
+ int i;
+
+ mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
+ GFP_KERNEL);
+ if (!mr_table)
+ return ERR_PTR(-ENOMEM);
+
+ mr_table->vr_id = vr_id;
+ mr_table->mlxsw_sp = mlxsw_sp;
+ mr_table->proto = proto;
+ mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
+ INIT_LIST_HEAD(&mr_table->route_list);
+ mutex_init(&mr_table->route_list_lock);
+
+ err = rhashtable_init(&mr_table->route_ht,
+ &mlxsw_sp_mr_route_ht_params);
+ if (err)
+ goto err_route_rhashtable_init;
+
+ for (i = 0; i < MAXVIFS; i++) {
+ INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
+ INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
+ mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
+ }
+
+ err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
+ mr_table->catchall_route_priv,
+ &catchall_route_params);
+ if (err)
+ goto err_ops_route_create;
+ mutex_lock(&mr->table_list_lock);
+ list_add_tail(&mr_table->node, &mr->table_list);
+ mutex_unlock(&mr->table_list_lock);
+ return mr_table;
+
+err_ops_route_create:
+ rhashtable_destroy(&mr_table->route_ht);
+err_route_rhashtable_init:
+ mutex_destroy(&mr_table->route_list_lock);
+ kfree(mr_table);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
+ mutex_lock(&mr->table_list_lock);
+ list_del(&mr_table->node);
+ mutex_unlock(&mr->table_list_lock);
+ mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
+ &mr_table->catchall_route_priv);
+ rhashtable_destroy(&mr_table->route_ht);
+ mutex_destroy(&mr_table->route_list_lock);
+ kfree(mr_table);
+}
+
+void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
+{
+ struct mlxsw_sp_mr_route *mr_route, *tmp;
+ int i;
+
+ mutex_lock(&mr_table->route_list_lock);
+ list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
+ __mlxsw_sp_mr_route_del(mr_table, mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
+
+ for (i = 0; i < MAXVIFS; i++) {
+ mr_table->vifs[i].dev = NULL;
+ mr_table->vifs[i].rif = NULL;
+ }
+}
+
+bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
+{
+ int i;
+
+ for (i = 0; i < MAXVIFS; i++)
+ if (mr_table->vifs[i].dev)
+ return false;
+ return list_empty(&mr_table->route_list);
+}
+
+static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u64 packets, bytes;
+
+ if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return;
+
+ mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
+ &bytes);
+
+ if (mr_route->mfc->mfc_un.res.pkt != packets)
+ mr_route->mfc->mfc_un.res.lastuse = jiffies;
+ mr_route->mfc->mfc_un.res.pkt = packets;
+ mr_route->mfc->mfc_un.res.bytes = bytes;
+}
+
+static void mlxsw_sp_mr_stats_update(struct work_struct *work)
+{
+ struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
+ stats_update_dw.work);
+ struct mlxsw_sp_mr_table *mr_table;
+ struct mlxsw_sp_mr_route *mr_route;
+ unsigned long interval;
+
+ mutex_lock(&mr->table_list_lock);
+ list_for_each_entry(mr_table, &mr->table_list, node) {
+ mutex_lock(&mr_table->route_list_lock);
+ list_for_each_entry(mr_route, &mr_table->route_list, node)
+ mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
+ mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
+ }
+ mutex_unlock(&mr->table_list_lock);
+
+ interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
+ mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
+}
+
+int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mr_ops *mr_ops)
+{
+ struct mlxsw_sp_mr *mr;
+ unsigned long interval;
+ int err;
+
+ mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
+ if (!mr)
+ return -ENOMEM;
+ mr->mr_ops = mr_ops;
+ mlxsw_sp->mr = mr;
+ INIT_LIST_HEAD(&mr->table_list);
+ mutex_init(&mr->table_list_lock);
+
+ err = mr_ops->init(mlxsw_sp, mr->priv);
+ if (err)
+ goto err;
+
+ /* Create the delayed work for counter updates */
+ INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
+ interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
+ mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
+ return 0;
+err:
+ mutex_destroy(&mr->table_list_lock);
+ kfree(mr);
+ return err;
+}
+
+void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ cancel_delayed_work_sync(&mr->stats_update_dw);
+ mr->mr_ops->fini(mlxsw_sp, mr->priv);
+ mutex_destroy(&mr->table_list_lock);
+ kfree(mr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
new file mode 100644
index 000000000..3cde3671f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_MCROUTER_H
+#define _MLXSW_SPECTRUM_MCROUTER_H
+
+#include <linux/mroute.h>
+#include <linux/mroute6.h>
+#include "spectrum_router.h"
+#include "spectrum.h"
+
+enum mlxsw_sp_mr_route_action {
+ MLXSW_SP_MR_ROUTE_ACTION_FORWARD,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
+};
+
+struct mlxsw_sp_mr_route_key {
+ int vrid;
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr group;
+ union mlxsw_sp_l3addr group_mask;
+ union mlxsw_sp_l3addr source;
+ union mlxsw_sp_l3addr source_mask;
+};
+
+struct mlxsw_sp_mr_route_info {
+ enum mlxsw_sp_mr_route_action route_action;
+ u16 irif_index;
+ u16 *erif_indices;
+ size_t erif_num;
+ u16 min_mtu;
+};
+
+struct mlxsw_sp_mr_route_params {
+ struct mlxsw_sp_mr_route_key key;
+ struct mlxsw_sp_mr_route_info value;
+ enum mlxsw_sp_mr_route_prio prio;
+};
+
+struct mlxsw_sp_mr_ops {
+ int priv_size;
+ int route_priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_params *route_params);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_info *route_info);
+ int (*route_stats)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u64 *packets, u64 *bytes);
+ int (*route_action_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ enum mlxsw_sp_mr_route_action route_action);
+ int (*route_min_mtu_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 min_mtu);
+ int (*route_irif_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 irif_index);
+ int (*route_erif_add)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 erif_index);
+ int (*route_erif_del)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 erif_index);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
+struct mlxsw_sp_mr;
+struct mlxsw_sp_mr_table;
+
+int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mr_ops *mr_ops);
+void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc, bool replace);
+void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc);
+int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev, vifi_t vif_index,
+ unsigned long vif_flags,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index);
+int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif, int mtu);
+struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto);
+void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table);
+void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table);
+bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
new file mode 100644
index 000000000..221aa6a47
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+#include "spectrum_mr_tcam.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp_mr_tcam {
+ void *priv;
+};
+
+/* This struct maps to one RIGR2 register entry */
+struct mlxsw_sp_mr_erif_sublist {
+ struct list_head list;
+ u32 rigr2_kvdl_index;
+ int num_erifs;
+ u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
+ bool synced;
+};
+
+struct mlxsw_sp_mr_tcam_erif_list {
+ struct list_head erif_sublists;
+ u32 kvdl_index;
+};
+
+static bool
+mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist)
+{
+ int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MC_ERIF_LIST_ENTRIES);
+
+ return erif_sublist->num_erifs == erif_list_entries;
+}
+
+static void
+mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ INIT_LIST_HEAD(&erif_list->erif_sublists);
+}
+
+static struct mlxsw_sp_mr_erif_sublist *
+mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist;
+ int err;
+
+ erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
+ if (!erif_sublist)
+ return ERR_PTR(-ENOMEM);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, &erif_sublist->rigr2_kvdl_index);
+ if (err) {
+ kfree(erif_sublist);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
+ return erif_sublist;
+}
+
+static void
+mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist)
+{
+ list_del(&erif_sublist->list);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, erif_sublist->rigr2_kvdl_index);
+ kfree(erif_sublist);
+}
+
+static int
+mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list,
+ u16 erif_index)
+{
+ struct mlxsw_sp_mr_erif_sublist *sublist;
+
+ /* If either there is no erif_entry or the last one is full, allocate a
+ * new one.
+ */
+ if (list_empty(&erif_list->erif_sublists)) {
+ sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
+ if (IS_ERR(sublist))
+ return PTR_ERR(sublist);
+ erif_list->kvdl_index = sublist->rigr2_kvdl_index;
+ } else {
+ sublist = list_last_entry(&erif_list->erif_sublists,
+ struct mlxsw_sp_mr_erif_sublist,
+ list);
+ sublist->synced = false;
+ if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
+ sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
+ erif_list);
+ if (IS_ERR(sublist))
+ return PTR_ERR(sublist);
+ }
+ }
+
+ /* Add the eRIF to the last entry's last index */
+ sublist->erif_indices[sublist->num_erifs++] = erif_index;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
+
+ list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
+ list)
+ mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
+}
+
+static int
+mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *curr_sublist;
+ char rigr2_pl[MLXSW_REG_RIGR2_LEN];
+ int err;
+ int i;
+
+ list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
+ if (curr_sublist->synced)
+ continue;
+
+ /* If the sublist is not the last one, pack the next index */
+ if (list_is_last(&curr_sublist->list,
+ &erif_list->erif_sublists)) {
+ mlxsw_reg_rigr2_pack(rigr2_pl,
+ curr_sublist->rigr2_kvdl_index,
+ false, 0);
+ } else {
+ struct mlxsw_sp_mr_erif_sublist *next_sublist;
+
+ next_sublist = list_next_entry(curr_sublist, list);
+ mlxsw_reg_rigr2_pack(rigr2_pl,
+ curr_sublist->rigr2_kvdl_index,
+ true,
+ next_sublist->rigr2_kvdl_index);
+ }
+
+ /* Pack all the erifs */
+ for (i = 0; i < curr_sublist->num_erifs; i++) {
+ u16 erif_index = curr_sublist->erif_indices[i];
+
+ mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
+ erif_index);
+ }
+
+ /* Write the entry */
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
+ rigr2_pl);
+ if (err)
+ /* No need of a rollback here because this
+ * hardware entry should not be pointed yet.
+ */
+ return err;
+ curr_sublist->synced = true;
+ }
+ return 0;
+}
+
+static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
+ struct mlxsw_sp_mr_tcam_erif_list *from)
+{
+ list_splice(&from->erif_sublists, &to->erif_sublists);
+ to->kvdl_index = from->kvdl_index;
+}
+
+struct mlxsw_sp_mr_tcam_route {
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ u32 counter_index;
+ enum mlxsw_sp_mr_route_action action;
+ struct mlxsw_sp_mr_route_key key;
+ u16 irif_index;
+ u16 min_mtu;
+ void *priv;
+};
+
+static struct mlxsw_afa_block *
+mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_mr_route_action route_action,
+ u16 irif_index, u32 counter_index,
+ u16 min_mtu,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (IS_ERR(afa_block))
+ return afa_block;
+
+ err = mlxsw_afa_block_append_allocated_counter(afa_block,
+ counter_index);
+ if (err)
+ goto err;
+
+ switch (route_action) {
+ case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
+ err = mlxsw_afa_block_append_trap(afa_block,
+ MLXSW_TRAP_ID_ACL1);
+ if (err)
+ goto err;
+ break;
+ case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
+ case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
+ /* If we are about to append a multicast router action, commit
+ * the erif_list.
+ */
+ err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
+ if (err)
+ goto err;
+
+ err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
+ min_mtu, false,
+ erif_list->kvdl_index);
+ if (err)
+ goto err;
+
+ if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
+ err = mlxsw_afa_block_append_trap_and_forward(afa_block,
+ MLXSW_TRAP_ID_ACL2);
+ if (err)
+ goto err;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mlxsw_afa_block_commit(afa_block);
+ if (err)
+ goto err;
+ return afa_block;
+err:
+ mlxsw_afa_block_destroy(afa_block);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
+{
+ mlxsw_afa_block_destroy(afa_block);
+}
+
+static int
+mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < route_info->erif_num; i++) {
+ u16 erif_index = route_info->erif_indices[i];
+
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
+ erif_index);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_params *route_params)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ int err;
+
+ route->key = route_params->key;
+ route->irif_index = route_params->value.irif_index;
+ route->min_mtu = route_params->value.min_mtu;
+ route->action = route_params->value.route_action;
+
+ /* Create the egress RIFs list */
+ mlxsw_sp_mr_erif_list_init(&route->erif_list);
+ err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
+ &route_params->value);
+ if (err)
+ goto err_erif_populate;
+
+ /* Create the flow counter */
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
+ if (err)
+ goto err_counter_alloc;
+
+ /* Create the flexible action block */
+ route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route->action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &route->erif_list);
+ if (IS_ERR(route->afa_block)) {
+ err = PTR_ERR(route->afa_block);
+ goto err_afa_block_create;
+ }
+
+ route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
+ if (!route->priv) {
+ err = -ENOMEM;
+ goto err_route_priv_alloc;
+ }
+
+ /* Write the route to the TCAM */
+ err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
+ &route->key, route->afa_block,
+ route_params->prio);
+ if (err)
+ goto err_route_create;
+ return 0;
+
+err_route_create:
+ kfree(route->priv);
+err_route_priv_alloc:
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+err_afa_block_create:
+ mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
+err_erif_populate:
+err_counter_alloc:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ return err;
+}
+
+static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
+ void *priv, void *route_priv)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
+ kfree(route->priv);
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+}
+
+static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u64 *packets,
+ u64 *bytes)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
+ packets, bytes);
+}
+
+static int
+mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ enum mlxsw_sp_mr_route_action route_action)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new flexible action block */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &route->erif_list);
+ if (IS_ERR(afa_block))
+ return PTR_ERR(afa_block);
+
+ /* Update the TCAM route entry */
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
+ if (err)
+ goto err;
+
+ /* Delete the old one */
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ route->afa_block = afa_block;
+ route->action = route_action;
+ return 0;
+err:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 min_mtu)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new flexible action block */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route->action,
+ route->irif_index,
+ route->counter_index,
+ min_mtu,
+ &route->erif_list);
+ if (IS_ERR(afa_block))
+ return PTR_ERR(afa_block);
+
+ /* Update the TCAM route entry */
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
+ if (err)
+ goto err;
+
+ /* Delete the old one */
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ route->afa_block = afa_block;
+ route->min_mtu = min_mtu;
+ return 0;
+err:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 irif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+
+ if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return -EINVAL;
+ route->irif_index = irif_index;
+ return 0;
+}
+
+static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 erif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ int err;
+
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
+ erif_index);
+ if (err)
+ return err;
+
+ /* Commit the action only if the route action is not TRAP */
+ if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
+ &route->erif_list);
+ return 0;
+}
+
+static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 erif_index)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist;
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+ int i;
+
+ /* Create a copy of the original erif_list without the deleted entry */
+ mlxsw_sp_mr_erif_list_init(&erif_list);
+ list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
+ for (i = 0; i < erif_sublist->num_erifs; i++) {
+ u16 curr_erif = erif_sublist->erif_indices[i];
+
+ if (curr_erif == erif_index)
+ continue;
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
+ curr_erif);
+ if (err)
+ goto err_erif_list_add;
+ }
+ }
+
+ /* Create the flexible action block pointing to the new erif_list */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &erif_list);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Update the TCAM route entry */
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
+ if (err)
+ goto err_route_write;
+
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ route->afa_block = afa_block;
+ mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
+ return 0;
+
+err_route_write:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+err_afa_block_create:
+err_erif_list_add:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
+ return err;
+}
+
+static int
+mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new erif_list */
+ mlxsw_sp_mr_erif_list_init(&erif_list);
+ err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
+ if (err)
+ goto err_erif_populate;
+
+ /* Create the flexible action block pointing to the new erif_list */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route_info->route_action,
+ route_info->irif_index,
+ route->counter_index,
+ route_info->min_mtu,
+ &erif_list);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Update the TCAM route entry */
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
+ if (err)
+ goto err_route_write;
+
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ route->afa_block = afa_block;
+ mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
+ route->action = route_info->route_action;
+ route->irif_index = route_info->irif_index;
+ route->min_mtu = route_info->min_mtu;
+ return 0;
+
+err_route_write:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+err_afa_block_create:
+err_erif_populate:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
+ return -EIO;
+
+ mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
+ if (!mr_tcam->priv)
+ return -ENOMEM;
+
+ err = ops->init(mlxsw_sp, mr_tcam->priv);
+ if (err)
+ goto err_init;
+ return 0;
+
+err_init:
+ kfree(mr_tcam->priv);
+ return err;
+}
+
+static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ ops->fini(mr_tcam->priv);
+ kfree(mr_tcam->priv);
+}
+
+const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp_mr_tcam),
+ .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
+ .init = mlxsw_sp_mr_tcam_init,
+ .route_create = mlxsw_sp_mr_tcam_route_create,
+ .route_update = mlxsw_sp_mr_tcam_route_update,
+ .route_stats = mlxsw_sp_mr_tcam_route_stats,
+ .route_action_update = mlxsw_sp_mr_tcam_route_action_update,
+ .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
+ .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
+ .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
+ .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
+ .route_destroy = mlxsw_sp_mr_tcam_route_destroy,
+ .fini = mlxsw_sp_mr_tcam_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
new file mode 100644
index 000000000..3c84151b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
+#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
+
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+extern const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
new file mode 100644
index 000000000..d2b57a045
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <net/inet_ecn.h>
+#include <net/ipv6.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_nve.h"
+
+const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = {
+ [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp1_nve_vxlan_ops,
+};
+
+const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = {
+ [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp2_nve_vxlan_ops,
+};
+
+struct mlxsw_sp_nve_mc_entry;
+struct mlxsw_sp_nve_mc_record;
+struct mlxsw_sp_nve_mc_list;
+
+struct mlxsw_sp_nve_mc_record_ops {
+ enum mlxsw_reg_tnumt_record_type type;
+ int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr);
+ void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry);
+ void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index);
+ bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr);
+};
+
+struct mlxsw_sp_nve_mc_list_key {
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_mc_ipv6_entry {
+ struct in6_addr addr6;
+ u32 addr6_kvdl_index;
+};
+
+struct mlxsw_sp_nve_mc_entry {
+ union {
+ __be32 addr4;
+ struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry;
+ };
+ u8 valid:1;
+};
+
+struct mlxsw_sp_nve_mc_record {
+ struct list_head list;
+ enum mlxsw_sp_l3proto proto;
+ unsigned int num_entries;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ const struct mlxsw_sp_nve_mc_record_ops *ops;
+ u32 kvdl_index;
+ struct mlxsw_sp_nve_mc_entry entries[];
+};
+
+struct mlxsw_sp_nve_mc_list {
+ struct list_head records_list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_nve_mc_list_key key;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_mc_list_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node),
+};
+
+static int
+mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ mc_entry->addr4 = addr->addr4;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index)
+{
+ u32 udip = be32_to_cpu(mc_entry->addr4);
+
+ mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ return mc_entry->addr4 == addr->addr4;
+}
+
+static const struct mlxsw_sp_nve_mc_record_ops
+mlxsw_sp_nve_mc_record_ipv4_ops = {
+ .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ .entry_add = &mlxsw_sp_nve_mc_record_ipv4_entry_add,
+ .entry_del = &mlxsw_sp_nve_mc_record_ipv4_entry_del,
+ .entry_set = &mlxsw_sp_nve_mc_record_ipv4_entry_set,
+ .entry_compare = &mlxsw_sp_nve_mc_record_ipv4_entry_compare,
+};
+
+static int
+mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp,
+ &addr->addr6, &kvdl_index);
+ if (err)
+ return err;
+
+ mc_entry->ipv6_entry.addr6 = addr->addr6;
+ mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index;
+ return 0;
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+ mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp,
+ &mc_entry->ipv6_entry.addr6);
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index)
+{
+ u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index;
+
+ mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6);
+}
+
+static const struct mlxsw_sp_nve_mc_record_ops
+mlxsw_sp_nve_mc_record_ipv6_ops = {
+ .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ .entry_add = &mlxsw_sp_nve_mc_record_ipv6_entry_add,
+ .entry_del = &mlxsw_sp_nve_mc_record_ipv6_entry_del,
+ .entry_set = &mlxsw_sp_nve_mc_record_ipv6_entry_set,
+ .entry_compare = &mlxsw_sp_nve_mc_record_ipv6_entry_compare,
+};
+
+static const struct mlxsw_sp_nve_mc_record_ops *
+mlxsw_sp_nve_mc_record_ops_arr[] = {
+ [MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops,
+ [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
+};
+
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr->addr4 = cpu_to_be32(uip);
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ return rhashtable_lookup_fast(&nve->mc_list_ht, key,
+ mlxsw_sp_nve_mc_list_ht_params);
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ int err;
+
+ mc_list = kmalloc(sizeof(*mc_list), GFP_KERNEL);
+ if (!mc_list)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&mc_list->records_list);
+ mc_list->key = *key;
+
+ err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node,
+ mlxsw_sp_nve_mc_list_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return mc_list;
+
+err_rhashtable_insert:
+ kfree(mc_list);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node,
+ mlxsw_sp_nve_mc_list_ht_params);
+ WARN_ON(!list_empty(&mc_list->records_list));
+ kfree(mc_list);
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key);
+ if (mc_list)
+ return mc_list;
+
+ return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key);
+}
+
+static void
+mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ if (!list_empty(&mc_list->records_list))
+ return;
+ mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto)
+{
+ unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto];
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ int err;
+
+ mc_record = kzalloc(struct_size(mc_record, entries, num_max_entries),
+ GFP_KERNEL);
+ if (!mc_record)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
+ &mc_record->kvdl_index);
+ if (err)
+ goto err_kvdl_alloc;
+
+ mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto];
+ mc_record->mlxsw_sp = mlxsw_sp;
+ mc_record->mc_list = mc_list;
+ mc_record->proto = proto;
+ list_add_tail(&mc_record->list, &mc_list->records_list);
+
+ return mc_record;
+
+err_kvdl_alloc:
+ kfree(mc_record);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
+
+ list_del(&mc_record->list);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
+ mc_record->kvdl_index);
+ WARN_ON(mc_record->num_entries);
+ kfree(mc_record);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) {
+ unsigned int num_entries = mc_record->num_entries;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ if (mc_record->proto == proto &&
+ num_entries < nve->num_max_mc_entries[proto])
+ return mc_record;
+ }
+
+ return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto);
+}
+
+static void
+mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ if (mc_record->num_entries != 0)
+ return;
+
+ mlxsw_sp_nve_mc_record_destroy(mc_record);
+}
+
+static struct mlxsw_sp_nve_mc_entry *
+mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ if (mc_record->entries[i].valid)
+ continue;
+ return &mc_record->entries[i];
+ }
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type;
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+ struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
+ char tnumt_pl[MLXSW_REG_TNUMT_LEN];
+ unsigned int num_max_entries;
+ unsigned int num_entries = 0;
+ u32 next_kvdl_index = 0;
+ bool next_valid = false;
+ int i;
+
+ if (!list_is_last(&mc_record->list, &mc_list->records_list)) {
+ struct mlxsw_sp_nve_mc_record *next_record;
+
+ next_record = list_next_entry(mc_record, list);
+ next_kvdl_index = next_record->kvdl_index;
+ next_valid = true;
+ }
+
+ mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE,
+ mc_record->kvdl_index, next_valid,
+ next_kvdl_index, mc_record->num_entries);
+
+ num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_entry = &mc_record->entries[i];
+ if (!mc_entry->valid)
+ continue;
+ mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl,
+ num_entries++);
+ }
+
+ WARN_ON(num_entries != mc_record->num_entries);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+ struct mlxsw_sp_nve_mc_record *first_record;
+
+ first_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+
+ return mc_record == first_record;
+}
+
+static struct mlxsw_sp_nve_mc_entry *
+mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_entry = &mc_record->entries[i];
+ if (!mc_entry->valid)
+ continue;
+ if (mc_record->ops->entry_compare(mc_record, mc_entry, addr))
+ return mc_entry;
+ }
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_entry *mc_entry = NULL;
+ int err;
+
+ mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record);
+ if (WARN_ON(!mc_entry))
+ return -EINVAL;
+
+ err = mc_record->ops->entry_add(mc_record, mc_entry, addr);
+ if (err)
+ return err;
+ mc_record->num_entries++;
+ mc_entry->valid = true;
+
+ err = mlxsw_sp_nve_mc_record_refresh(mc_record);
+ if (err)
+ goto err_record_refresh;
+
+ /* If this is a new record and not the first one, then we need to
+ * update the next pointer of the previous entry
+ */
+ if (mc_record->num_entries != 1 ||
+ mlxsw_sp_nve_mc_record_is_first(mc_record))
+ return 0;
+
+ err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list));
+ if (err)
+ goto err_prev_record_refresh;
+
+ return 0;
+
+err_prev_record_refresh:
+err_record_refresh:
+ mc_entry->valid = false;
+ mc_record->num_entries--;
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+
+ mc_entry->valid = false;
+ mc_record->num_entries--;
+
+ /* When the record continues to exist we only need to invalidate
+ * the requested entry
+ */
+ if (mc_record->num_entries != 0) {
+ mlxsw_sp_nve_mc_record_refresh(mc_record);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* If the record needs to be deleted, but it is not the first,
+ * then we need to make sure that the previous record no longer
+ * points to it. Remove deleted record from the list to reflect
+ * that and then re-add it at the end, so that it could be
+ * properly removed by the record destruction code
+ */
+ if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) {
+ struct mlxsw_sp_nve_mc_record *prev_record;
+
+ prev_record = list_prev_entry(mc_record, list);
+ list_del(&mc_record->list);
+ mlxsw_sp_nve_mc_record_refresh(prev_record);
+ list_add_tail(&mc_record->list, &mc_list->records_list);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* If the first record needs to be deleted, but the list is not
+ * singular, then the second record needs to be written in the
+ * first record's address, as this address is stored as a property
+ * of the FID
+ */
+ if (mlxsw_sp_nve_mc_record_is_first(mc_record) &&
+ !list_is_singular(&mc_list->records_list)) {
+ struct mlxsw_sp_nve_mc_record *next_record;
+
+ next_record = list_next_entry(mc_record, list);
+ swap(mc_record->kvdl_index, next_record->kvdl_index);
+ mlxsw_sp_nve_mc_record_refresh(next_record);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* This is the last case where the last remaining record needs to
+ * be deleted. Simply delete the entry
+ */
+ mc_record->ops->entry_del(mc_record, mc_entry);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ struct mlxsw_sp_nve_mc_entry **mc_entry)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ list_for_each_entry(mc_record, &mc_list->records_list, list) {
+ if (mc_record->proto != proto)
+ continue;
+
+ *mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr);
+ if (*mc_entry)
+ return mc_record;
+ }
+
+ return NULL;
+}
+
+static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ int err;
+
+ mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto);
+ if (IS_ERR(mc_record))
+ return PTR_ERR(mc_record);
+
+ err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr);
+ if (err)
+ goto err_ip_add;
+
+ return 0;
+
+err_ip_add:
+ mlxsw_sp_nve_mc_record_put(mc_record);
+ return err;
+}
+
+static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
+ &mc_entry);
+ if (!mc_record)
+ return;
+
+ mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
+ mlxsw_sp_nve_mc_record_put(mc_record);
+}
+
+static int
+mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ /* The address of the first record in the list is a property of
+ * the FID and we never change it. It only needs to be set when
+ * a new list is created
+ */
+ if (mlxsw_sp_fid_nve_flood_index_is_set(fid))
+ return 0;
+
+ mc_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+
+ return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index);
+}
+
+static void
+mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ /* The address of the first record needs to be invalidated only when
+ * the last record is about to be removed
+ */
+ if (!list_is_singular(&mc_list->records_list))
+ return;
+
+ mc_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+ if (mc_record->num_entries != 1)
+ return;
+
+ return mlxsw_sp_fid_nve_flood_index_clear(fid);
+}
+
+int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ int err;
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key);
+ if (IS_ERR(mc_list))
+ return PTR_ERR(mc_list);
+
+ err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr);
+ if (err)
+ goto err_add_ip;
+
+ err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list);
+ if (err)
+ goto err_fid_flood_index_set;
+
+ return 0;
+
+err_fid_flood_index_set:
+ mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
+err_add_ip:
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+ return err;
+}
+
+void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
+ if (!mc_list)
+ return;
+
+ mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
+ mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+}
+
+static void
+mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i];
+
+ if (!mc_entry->valid)
+ continue;
+ mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
+ }
+
+ WARN_ON(mc_record->num_entries);
+ mlxsw_sp_nve_mc_record_put(mc_record);
+}
+
+static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record, *tmp;
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ if (!mlxsw_sp_fid_nve_flood_index_is_set(fid))
+ return;
+
+ mlxsw_sp_fid_nve_flood_index_clear(fid);
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
+ if (WARN_ON(!mc_list))
+ return;
+
+ list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list)
+ mlxsw_sp_nve_mc_record_delete(mc_record);
+
+ WARN_ON(!list_empty(&mc_list->records_list));
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+}
+
+static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+ int err;
+
+ if (nve->num_nve_tunnels++ != 0)
+ return 0;
+
+ nve->config = *config;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &nve->tunnel_index);
+ if (err)
+ goto err_kvdl_alloc;
+
+ ops = nve->nve_ops_arr[config->type];
+ err = ops->init(nve, config);
+ if (err)
+ goto err_ops_init;
+
+ return 0;
+
+err_ops_init:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ nve->tunnel_index);
+err_kvdl_alloc:
+ memset(&nve->config, 0, sizeof(nve->config));
+ nve->num_nve_tunnels--;
+ return err;
+}
+
+static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+
+ ops = nve->nve_ops_arr[nve->config.type];
+
+ if (mlxsw_sp->nve->num_nve_tunnels == 1) {
+ ops->fini(nve);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ nve->tunnel_index);
+ memset(&nve->config, 0, sizeof(nve->config));
+ }
+ nve->num_nve_tunnels--;
+}
+
+static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev,
+ __be32 vni)
+{
+ const struct mlxsw_sp_nve_ops *ops;
+ enum mlxsw_sp_nve_type type;
+
+ if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type)))
+ return;
+
+ ops = mlxsw_sp->nve->nve_ops_arr[type];
+ ops->fdb_clear_offload(nve_dev, vni);
+}
+
+struct mlxsw_sp_nve_ipv6_ht_key {
+ u8 mac[ETH_ALEN];
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_ipv6_ht_node {
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct mlxsw_sp_nve_ipv6_ht_key key;
+ struct in6_addr addr6;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node),
+};
+
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index);
+}
+
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6)
+{
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6);
+}
+
+static struct mlxsw_sp_nve_ipv6_ht_node *
+mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_key key = {};
+
+ ether_addr_copy(key.mac, mac);
+ key.fid_index = fid_index;
+ return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key,
+ mlxsw_sp_nve_ipv6_ht_params);
+}
+
+static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid_index,
+ const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ int err;
+
+ ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL);
+ if (!ipv6_ht_node)
+ return -ENOMEM;
+
+ ether_addr_copy(ipv6_ht_node->key.mac, mac);
+ ipv6_ht_node->key.fid_index = fid_index;
+ ipv6_ht_node->addr6 = *addr6;
+
+ err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list);
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(ipv6_ht_node);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_del(&ipv6_ht_node->list);
+ rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ kfree(ipv6_ht_node);
+}
+
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (!ipv6_ht_node)
+ return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index,
+ new_addr6);
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ ipv6_ht_node->addr6 = *new_addr6;
+ return 0;
+}
+
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (WARN_ON(!ipv6_ht_node))
+ return;
+
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+}
+
+static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list,
+ list) {
+ if (ipv6_ht_node->key.fid_index != fid_index)
+ continue;
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+ }
+}
+
+int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+ struct mlxsw_sp_nve_config config;
+ int err;
+
+ ops = nve->nve_ops_arr[params->type];
+
+ if (!ops->can_offload(nve, params, extack))
+ return -EINVAL;
+
+ memset(&config, 0, sizeof(config));
+ ops->nve_config(nve, params, &config);
+ if (nve->num_nve_tunnels &&
+ memcmp(&config, &nve->config, sizeof(config))) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel");
+ return err;
+ }
+
+ err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni,
+ params->dev->ifindex);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
+ goto err_fid_vni_set;
+ }
+
+ err = ops->fdb_replay(params->dev, params->vni, extack);
+ if (err)
+ goto err_fdb_replay;
+
+ return 0;
+
+err_fdb_replay:
+ mlxsw_sp_fid_vni_clear(fid);
+err_fid_vni_set:
+ mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid)
+{
+ u16 fid_index = mlxsw_sp_fid_index(fid);
+ struct net_device *nve_dev;
+ int nve_ifindex;
+ __be32 vni;
+
+ mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
+ mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+ mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
+
+ if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
+ mlxsw_sp_fid_vni(fid, &vni)))
+ goto out;
+
+ nve_dev = dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
+ if (!nve_dev)
+ goto out;
+
+ mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
+ mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
+
+ dev_put(nve_dev);
+
+out:
+ mlxsw_sp_fid_vni_clear(fid);
+ mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char tnqdr_pl[MLXSW_REG_TNQDR_LEN];
+
+ mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl);
+}
+
+void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
+
+static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char tnqcr_pl[MLXSW_REG_TNQCR_LEN];
+
+ mlxsw_reg_tnqcr_pack(tnqcr_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl);
+}
+
+static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ char tneem_pl[MLXSW_REG_TNEEM_LEN];
+ int err;
+
+ mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem),
+ tneem_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tndem_pl[MLXSW_REG_TNDEM_LEN];
+ u8 new_inner_ecn;
+ bool trap_en;
+
+ new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+ &trap_en);
+ mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
+}
+
+static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ int j;
+
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ int err;
+
+ err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp);
+}
+
+static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6))
+ return -EIO;
+ max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4);
+ mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max;
+ max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6);
+ mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max;
+
+ return 0;
+}
+
+int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nve *nve;
+ int err;
+
+ nve = kzalloc(sizeof(*mlxsw_sp->nve), GFP_KERNEL);
+ if (!nve)
+ return -ENOMEM;
+ mlxsw_sp->nve = nve;
+ nve->mlxsw_sp = mlxsw_sp;
+ nve->nve_ops_arr = mlxsw_sp->nve_ops_arr;
+
+ err = rhashtable_init(&nve->mc_list_ht,
+ &mlxsw_sp_nve_mc_list_ht_params);
+ if (err)
+ goto err_mc_rhashtable_init;
+
+ err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_ipv6_rhashtable_init;
+
+ INIT_LIST_HEAD(&nve->ipv6_addr_list);
+
+ err = mlxsw_sp_nve_qos_init(mlxsw_sp);
+ if (err)
+ goto err_nve_qos_init;
+
+ err = mlxsw_sp_nve_ecn_init(mlxsw_sp);
+ if (err)
+ goto err_nve_ecn_init;
+
+ err = mlxsw_sp_nve_resources_query(mlxsw_sp);
+ if (err)
+ goto err_nve_resources_query;
+
+ return 0;
+
+err_nve_resources_query:
+err_nve_ecn_init:
+err_nve_qos_init:
+ rhashtable_destroy(&nve->ipv6_ht);
+err_ipv6_rhashtable_init:
+ rhashtable_destroy(&nve->mc_list_ht);
+err_mc_rhashtable_init:
+ mlxsw_sp->nve = NULL;
+ kfree(nve);
+ return err;
+}
+
+void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
+ WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list));
+ rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht);
+ rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
+ kfree(mlxsw_sp->nve);
+ mlxsw_sp->nve = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
new file mode 100644
index 000000000..0d21de1d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_NVE_H
+#define _MLXSW_SPECTRUM_NVE_H
+
+#include <linux/netlink.h>
+#include <linux/rhashtable.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_nve_config {
+ enum mlxsw_sp_nve_type type;
+ u8 ttl;
+ u8 learning_en:1;
+ __be16 udp_dport;
+ __be32 flowlabel;
+ u32 ul_tb_id;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr ul_sip;
+};
+
+struct mlxsw_sp_nve {
+ struct mlxsw_sp_nve_config config;
+ struct rhashtable mc_list_ht;
+ struct rhashtable ipv6_ht;
+ struct list_head ipv6_addr_list; /* Saves hash table nodes. */
+ struct mlxsw_sp *mlxsw_sp;
+ const struct mlxsw_sp_nve_ops **nve_ops_arr;
+ unsigned int num_nve_tunnels; /* Protected by RTNL */
+ unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
+ u32 tunnel_index;
+ u16 ul_rif_index; /* Reserved for Spectrum */
+};
+
+struct mlxsw_sp_nve_ops {
+ enum mlxsw_sp_nve_type type;
+ bool (*can_offload)(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack);
+ void (*nve_config)(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct mlxsw_sp_nve_config *config);
+ int (*init)(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config);
+ void (*fini)(struct mlxsw_sp_nve *nve);
+ int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni,
+ struct netlink_ext_ack *extack);
+ void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni);
+};
+
+extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops;
+extern const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
new file mode 100644
index 000000000..cdd8818b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/random.h>
+#include <net/vxlan.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_nve.h"
+
+#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_LEARN)
+#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX)
+
+static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(params->dev);
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ if (vxlan_addr_multicast(&cfg->remote_ip)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
+ return false;
+ }
+
+ if (vxlan_addr_any(&cfg->saddr)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
+ return false;
+ }
+
+ if (cfg->remote_ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
+ return false;
+ }
+
+ if (cfg->port_min || cfg->port_max) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
+ return false;
+ }
+
+ if (cfg->tos != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
+ return false;
+ }
+
+ if (cfg->flags & VXLAN_F_TTL_INHERIT) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
+ return false;
+ }
+
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack))
+ return false;
+ break;
+ case AF_INET6:
+ if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack))
+ return false;
+ break;
+ }
+
+ if (cfg->ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
+ return false;
+ }
+
+ if (cfg->label != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ if (params->ethertype == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN");
+ return false;
+ }
+
+ return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
+}
+
+static void
+mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg,
+ struct mlxsw_sp_nve_config *config)
+{
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
+ config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ break;
+ case AF_INET6:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV6;
+ config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr;
+ break;
+ }
+}
+
+static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_params *params,
+ struct mlxsw_sp_nve_config *config)
+{
+ struct vxlan_dev *vxlan = netdev_priv(params->dev);
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ config->type = MLXSW_SP_NVE_TYPE_VXLAN;
+ config->ttl = cfg->ttl;
+ config->flowlabel = cfg->label;
+ config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
+ config->ul_tb_id = RT_TABLE_MAIN;
+ mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config);
+ config->udp_dport = cfg->dst_port;
+}
+
+static void
+mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
+ const struct mlxsw_sp_nve_config *config)
+{
+ struct in6_addr addr6;
+ u8 udp_sport;
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
+ config->ttl);
+ /* VxLAN driver's default UDP source port range is 32768 (0x8000)
+ * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
+ * to a random number between 0x80 and 0xee
+ */
+ get_random_bytes(&udp_sport, sizeof(udp_sport));
+ udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+
+ switch (config->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl,
+ be32_to_cpu(config->ul_sip.addr4));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ addr6 = config->ul_sip.addr6;
+ mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl,
+ (const char *)&addr6);
+ break;
+ }
+}
+
+static int
+mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_config *config)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ u16 ul_vr_id;
+ int err;
+
+ err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
+ &ul_vr_id);
+ if (err)
+ return err;
+
+ mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
+ mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
+ mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+}
+
+static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+}
+
+static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int tunnel_index)
+{
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+ int err;
+
+ err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+ if (err)
+ goto err_parsing_depth_inc;
+
+ err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
+ if (err)
+ goto err_config_set;
+
+ err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
+ if (err)
+ goto err_rtdp_set;
+
+ err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto,
+ &config->ul_sip,
+ nve->tunnel_index);
+ if (err)
+ goto err_promote_decap;
+
+ return 0;
+
+err_promote_decap:
+err_rtdp_set:
+ mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
+err_config_set:
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+err_parsing_depth_inc:
+ mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
+ return err;
+}
+
+static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
+{
+ struct mlxsw_sp_nve_config *config = &nve->config;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+
+ mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto, &config->ul_sip);
+ mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+ mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
+}
+
+static int
+mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
+ struct netlink_ext_ack *extack)
+{
+ if (WARN_ON(!netif_is_vxlan(nve_dev)))
+ return -EINVAL;
+ return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
+ extack);
+}
+
+static void
+mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
+{
+ if (WARN_ON(!netif_is_vxlan(nve_dev)))
+ return;
+ vxlan_fdb_clear_offload(nve_dev, vni);
+}
+
+const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
+ .nve_config = mlxsw_sp_nve_vxlan_config,
+ .init = mlxsw_sp1_nve_vxlan_init,
+ .fini = mlxsw_sp1_nve_vxlan_fini,
+ .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay,
+ .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
+};
+
+static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+ bool learning_en)
+{
+ char tnpc_pl[MLXSW_REG_TNPC_LEN];
+
+ mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE,
+ learning_en);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
+}
+
+static int
+mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp)
+{
+ char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
+
+ mlxsw_reg_spvid_tport_set(spvid_pl, true);
+ mlxsw_reg_spvid_local_port_set(spvid_pl,
+ MLXSW_REG_TUNNEL_PORT_NVE);
+ mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int
+mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_config *config)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ char spvtr_pl[MLXSW_REG_SPVTR_LEN];
+ u16 ul_rif_index;
+ int err;
+
+ err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
+ &ul_rif_index);
+ if (err)
+ return err;
+ mlxsw_sp->nve->ul_rif_index = ul_rif_index;
+
+ err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
+ if (err)
+ goto err_vxlan_learning_set;
+
+ mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
+ mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+ if (err)
+ goto err_tngcr_write;
+
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+ if (err)
+ goto err_spvtr_write;
+
+ err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp);
+ if (err)
+ goto err_decap_ethertype_set;
+
+ return 0;
+
+err_decap_ethertype_set:
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+err_spvtr_write:
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+err_tngcr_write:
+ mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
+err_vxlan_learning_set:
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
+ return err;
+}
+
+static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
+{
+ char spvtr_pl[MLXSW_REG_SPVTR_LEN];
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+
+ mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
+ MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+ mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
+}
+
+static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int tunnel_index,
+ u16 ul_rif_index)
+{
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+ int err;
+
+ err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+ if (err)
+ goto err_parsing_depth_inc;
+
+ err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
+ if (err)
+ goto err_config_set;
+
+ err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
+ nve->ul_rif_index);
+ if (err)
+ goto err_rtdp_set;
+
+ err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto,
+ &config->ul_sip,
+ nve->tunnel_index);
+ if (err)
+ goto err_promote_decap;
+
+ return 0;
+
+err_promote_decap:
+err_rtdp_set:
+ mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
+err_config_set:
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+err_parsing_depth_inc:
+ mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
+ return err;
+}
+
+static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
+{
+ struct mlxsw_sp_nve_config *config = &nve->config;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+
+ mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto, &config->ul_sip);
+ mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+ mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
+}
+
+const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .can_offload = mlxsw_sp_nve_vxlan_can_offload,
+ .nve_config = mlxsw_sp_nve_vxlan_config,
+ .init = mlxsw_sp2_nve_vxlan_init,
+ .fini = mlxsw_sp2_nve_vxlan_fini,
+ .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay,
+ .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
new file mode 100644
index 000000000..7dd3dba0f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/refcount.h>
+#include <linux/idr.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_pgt {
+ struct idr pgt_idr;
+ u16 end_index; /* Exclusive. */
+ struct mutex lock; /* Protects PGT. */
+ bool smpe_index_valid;
+};
+
+struct mlxsw_sp_pgt_entry {
+ struct list_head ports_list;
+ u16 index;
+ u16 smpe_index;
+};
+
+struct mlxsw_sp_pgt_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+};
+
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
+{
+ int index, err = 0;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
+ mlxsw_sp->pgt->end_index, GFP_KERNEL);
+
+ if (index < 0) {
+ err = index;
+ goto err_idr_alloc;
+ }
+
+ *p_mid = index;
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
+{
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int
+mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ unsigned int idr_cursor;
+ int i, err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ /* This function is supposed to be called several times as part of
+ * driver init, in specific order. Verify that the mid_index is the
+ * first free index in the idr, to be able to free the indexes in case
+ * of error.
+ */
+ idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
+ if (WARN_ON(idr_cursor != mid_base)) {
+ err = -EINVAL;
+ goto err_idr_cursor;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
+ mid_base, mid_base + count, GFP_KERNEL);
+ if (err < 0)
+ goto err_idr_alloc_cyclic;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc_cyclic:
+ for (i--; i >= 0; i--)
+ idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
+err_idr_cursor:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void
+mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
+ int i;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ for (i = 0; i < count; i++)
+ WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+
+ list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
+ if (pgt_entry_port->local_port == local_port)
+ return pgt_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ void *ret;
+ int err;
+
+ pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
+ if (!pgt_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto err_idr_replace;
+ }
+
+ INIT_LIST_HEAD(&pgt_entry->ports_list);
+ pgt_entry->index = mid;
+ pgt_entry->smpe_index = smpe;
+ return pgt_entry;
+
+err_idr_replace:
+ kfree(pgt_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
+ struct mlxsw_sp_pgt_entry *pgt_entry)
+{
+ WARN_ON(!list_empty(&pgt_entry->ports_list));
+
+ pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
+ if (WARN_ON(IS_ERR(pgt_entry)))
+ return;
+
+ kfree(pgt_entry);
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (pgt_entry)
+ return pgt_entry;
+
+ return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
+}
+
+static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (WARN_ON(!pgt_entry))
+ return;
+
+ if (list_empty(&pgt_entry->ports_list))
+ mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
+}
+
+static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
+ bool member)
+{
+ mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
+}
+
+static int
+mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port, bool member)
+{
+ char *smid2_pl;
+ int err;
+
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
+ mlxsw_sp->pgt->smpe_index_valid,
+ pgt_entry->smpe_index);
+
+ mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+
+ kfree(smid2_pl);
+
+ return err;
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ int err;
+
+ pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
+ if (!pgt_entry_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
+ true);
+ if (err)
+ goto err_pgt_entry_port_write;
+
+ pgt_entry_port->local_port = local_port;
+ list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
+
+ return pgt_entry_port;
+
+err_pgt_entry_port_write:
+ kfree(pgt_entry_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
+
+{
+ list_del(&pgt_entry_port->list);
+ mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
+ pgt_entry_port->local_port, false);
+ kfree(pgt_entry_port);
+}
+
+static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ int err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
+ if (IS_ERR(pgt_entry)) {
+ err = PTR_ERR(pgt_entry);
+ goto err_pgt_entry_get;
+ }
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
+ local_port);
+ if (IS_ERR(pgt_entry_port)) {
+ err = PTR_ERR(pgt_entry_port);
+ goto err_pgt_entry_port_get;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_pgt_entry_port_get:
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+err_pgt_entry_get:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
+ u16 mid, u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
+ if (!pgt_entry)
+ goto out;
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
+ if (!pgt_entry_port)
+ goto out;
+
+ mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+
+out:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member)
+{
+ if (member)
+ return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
+ local_port);
+
+ mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
+ return 0;
+}
+
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_pgt *pgt;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
+ return -EIO;
+
+ pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
+ if (!pgt)
+ return -ENOMEM;
+
+ idr_init(&pgt->pgt_idr);
+ pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
+ mutex_init(&pgt->lock);
+ pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
+ mlxsw_sp->pgt = pgt;
+ return 0;
+}
+
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->pgt->lock);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
+ idr_destroy(&mlxsw_sp->pgt->pgt_idr);
+ kfree(mlxsw_sp->pgt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
new file mode 100644
index 000000000..22ebb207c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/idr.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/netlink.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_policer_family {
+ enum mlxsw_sp_policer_type type;
+ enum mlxsw_reg_qpcr_g qpcr_type;
+ struct mlxsw_sp *mlxsw_sp;
+ u16 start_index; /* Inclusive */
+ u16 end_index; /* Exclusive */
+ struct idr policer_idr;
+ struct mutex lock; /* Protects policer_idr */
+ atomic_t policers_count;
+ const struct mlxsw_sp_policer_family_ops *ops;
+};
+
+struct mlxsw_sp_policer {
+ struct mlxsw_sp_policer_params params;
+ u16 index;
+};
+
+struct mlxsw_sp_policer_family_ops {
+ int (*init)(struct mlxsw_sp_policer_family *family);
+ void (*fini)(struct mlxsw_sp_policer_family *family);
+ int (*policer_index_alloc)(struct mlxsw_sp_policer_family *family,
+ struct mlxsw_sp_policer *policer);
+ struct mlxsw_sp_policer * (*policer_index_free)(struct mlxsw_sp_policer_family *family,
+ u16 policer_index);
+ int (*policer_init)(struct mlxsw_sp_policer_family *family,
+ const struct mlxsw_sp_policer *policer);
+ int (*policer_params_check)(const struct mlxsw_sp_policer_family *family,
+ const struct mlxsw_sp_policer_params *params,
+ struct netlink_ext_ack *extack);
+};
+
+struct mlxsw_sp_policer_core {
+ struct mlxsw_sp_policer_family *family_arr[MLXSW_SP_POLICER_TYPE_MAX + 1];
+ const struct mlxsw_sp_policer_core_ops *ops;
+ u8 lowest_bs_bits;
+ u8 highest_bs_bits;
+};
+
+struct mlxsw_sp_policer_core_ops {
+ int (*init)(struct mlxsw_sp_policer_core *policer_core);
+};
+
+static u64 mlxsw_sp_policer_rate_bytes_ps_kbps(u64 rate_bytes_ps)
+{
+ return div_u64(rate_bytes_ps, 1000) * BITS_PER_BYTE;
+}
+
+static u8 mlxsw_sp_policer_burst_bytes_hw_units(u64 burst_bytes)
+{
+ /* Provided burst size is in bytes. The ASIC burst size value is
+ * (2 ^ bs) * 512 bits. Convert the provided size to 512-bit units.
+ */
+ u64 bs512 = div_u64(burst_bytes, 64);
+
+ if (!bs512)
+ return 0;
+
+ return fls64(bs512) - 1;
+}
+
+static u64 mlxsw_sp_policer_single_rate_occ_get(void *priv)
+{
+ struct mlxsw_sp_policer_family *family = priv;
+
+ return atomic_read(&family->policers_count);
+}
+
+static int
+mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family)
+{
+ struct mlxsw_core *core = family->mlxsw_sp->core;
+ struct devlink *devlink;
+
+ /* CPU policers are allocated from the first N policers in the global
+ * range, so skip them.
+ */
+ if (!MLXSW_CORE_RES_VALID(core, MAX_GLOBAL_POLICERS) ||
+ !MLXSW_CORE_RES_VALID(core, MAX_CPU_POLICERS))
+ return -EIO;
+
+ family->start_index = MLXSW_CORE_RES_GET(core, MAX_CPU_POLICERS);
+ family->end_index = MLXSW_CORE_RES_GET(core, MAX_GLOBAL_POLICERS);
+
+ atomic_set(&family->policers_count, 0);
+ devlink = priv_to_devlink(core);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ mlxsw_sp_policer_single_rate_occ_get,
+ family);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family)
+{
+ struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core);
+
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
+ WARN_ON(atomic_read(&family->policers_count) != 0);
+}
+
+static int
+mlxsw_sp_policer_single_rate_index_alloc(struct mlxsw_sp_policer_family *family,
+ struct mlxsw_sp_policer *policer)
+{
+ int id;
+
+ mutex_lock(&family->lock);
+ id = idr_alloc(&family->policer_idr, policer, family->start_index,
+ family->end_index, GFP_KERNEL);
+ mutex_unlock(&family->lock);
+
+ if (id < 0)
+ return id;
+
+ atomic_inc(&family->policers_count);
+ policer->index = id;
+
+ return 0;
+}
+
+static struct mlxsw_sp_policer *
+mlxsw_sp_policer_single_rate_index_free(struct mlxsw_sp_policer_family *family,
+ u16 policer_index)
+{
+ struct mlxsw_sp_policer *policer;
+
+ atomic_dec(&family->policers_count);
+
+ mutex_lock(&family->lock);
+ policer = idr_remove(&family->policer_idr, policer_index);
+ mutex_unlock(&family->lock);
+
+ WARN_ON(!policer);
+
+ return policer;
+}
+
+static int
+mlxsw_sp_policer_single_rate_init(struct mlxsw_sp_policer_family *family,
+ const struct mlxsw_sp_policer *policer)
+{
+ u64 rate_kbps = mlxsw_sp_policer_rate_bytes_ps_kbps(policer->params.rate);
+ u8 bs = mlxsw_sp_policer_burst_bytes_hw_units(policer->params.burst);
+ struct mlxsw_sp *mlxsw_sp = family->mlxsw_sp;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer->index, MLXSW_REG_QPCR_IR_UNITS_K,
+ true, rate_kbps, bs);
+ mlxsw_reg_qpcr_clear_counter_set(qpcr_pl, true);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+static int
+mlxsw_sp_policer_single_rate_params_check(const struct mlxsw_sp_policer_family *family,
+ const struct mlxsw_sp_policer_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_policer_core *policer_core = family->mlxsw_sp->policer_core;
+ u64 rate_bps = params->rate * BITS_PER_BYTE;
+ u8 bs;
+
+ if (!params->bytes) {
+ NL_SET_ERR_MSG_MOD(extack, "Only bandwidth policing is currently supported by single rate policers");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(params->burst)) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
+ return -EINVAL;
+ }
+
+ bs = mlxsw_sp_policer_burst_bytes_hw_units(params->burst);
+
+ if (bs < policer_core->lowest_bs_bits) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
+ return -EINVAL;
+ }
+
+ if (bs > policer_core->highest_bs_bits) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
+ return -EINVAL;
+ }
+
+ if (rate_bps < MLXSW_REG_QPCR_LOWEST_CIR_BITS) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
+ return -EINVAL;
+ }
+
+ if (rate_bps > MLXSW_REG_QPCR_HIGHEST_CIR_BITS) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct mlxsw_sp_policer_family_ops mlxsw_sp_policer_single_rate_ops = {
+ .init = mlxsw_sp_policer_single_rate_family_init,
+ .fini = mlxsw_sp_policer_single_rate_family_fini,
+ .policer_index_alloc = mlxsw_sp_policer_single_rate_index_alloc,
+ .policer_index_free = mlxsw_sp_policer_single_rate_index_free,
+ .policer_init = mlxsw_sp_policer_single_rate_init,
+ .policer_params_check = mlxsw_sp_policer_single_rate_params_check,
+};
+
+static const struct mlxsw_sp_policer_family mlxsw_sp_policer_single_rate_family = {
+ .type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+ .qpcr_type = MLXSW_REG_QPCR_G_GLOBAL,
+ .ops = &mlxsw_sp_policer_single_rate_ops,
+};
+
+static const struct mlxsw_sp_policer_family *mlxsw_sp_policer_family_arr[] = {
+ [MLXSW_SP_POLICER_TYPE_SINGLE_RATE] = &mlxsw_sp_policer_single_rate_family,
+};
+
+int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_policer_type type,
+ const struct mlxsw_sp_policer_params *params,
+ struct netlink_ext_ack *extack, u16 *p_policer_index)
+{
+ struct mlxsw_sp_policer_family *family;
+ struct mlxsw_sp_policer *policer;
+ int err;
+
+ family = mlxsw_sp->policer_core->family_arr[type];
+
+ err = family->ops->policer_params_check(family, params, extack);
+ if (err)
+ return err;
+
+ policer = kmalloc(sizeof(*policer), GFP_KERNEL);
+ if (!policer)
+ return -ENOMEM;
+ policer->params = *params;
+
+ err = family->ops->policer_index_alloc(family, policer);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate policer index");
+ goto err_policer_index_alloc;
+ }
+
+ err = family->ops->policer_init(family, policer);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize policer");
+ goto err_policer_init;
+ }
+
+ *p_policer_index = policer->index;
+
+ return 0;
+
+err_policer_init:
+ family->ops->policer_index_free(family, policer->index);
+err_policer_index_alloc:
+ kfree(policer);
+ return err;
+}
+
+void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_policer_type type, u16 policer_index)
+{
+ struct mlxsw_sp_policer_family *family;
+ struct mlxsw_sp_policer *policer;
+
+ family = mlxsw_sp->policer_core->family_arr[type];
+ policer = family->ops->policer_index_free(family, policer_index);
+ kfree(policer);
+}
+
+int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_policer_type type,
+ u16 policer_index, u64 *p_drops)
+{
+ struct mlxsw_sp_policer_family *family;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ int err;
+
+ family = mlxsw_sp->policer_core->family_arr[type];
+
+ MLXSW_REG_ZERO(qpcr, qpcr_pl);
+ mlxsw_reg_qpcr_pid_set(qpcr_pl, policer_index);
+ mlxsw_reg_qpcr_g_set(qpcr_pl, family->qpcr_type);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+ if (err)
+ return err;
+
+ *p_drops = mlxsw_reg_qpcr_violate_count_get(qpcr_pl);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_policer_family_register(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_policer_family *tmpl)
+{
+ struct mlxsw_sp_policer_family *family;
+ int err;
+
+ family = kmemdup(tmpl, sizeof(*family), GFP_KERNEL);
+ if (!family)
+ return -ENOMEM;
+
+ family->mlxsw_sp = mlxsw_sp;
+ idr_init(&family->policer_idr);
+ mutex_init(&family->lock);
+
+ err = family->ops->init(family);
+ if (err)
+ goto err_family_init;
+
+ if (WARN_ON(family->start_index >= family->end_index)) {
+ err = -EINVAL;
+ goto err_index_check;
+ }
+
+ mlxsw_sp->policer_core->family_arr[tmpl->type] = family;
+
+ return 0;
+
+err_index_check:
+ family->ops->fini(family);
+err_family_init:
+ mutex_destroy(&family->lock);
+ idr_destroy(&family->policer_idr);
+ kfree(family);
+ return err;
+}
+
+static void
+mlxsw_sp_policer_family_unregister(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_policer_family *family)
+{
+ family->ops->fini(family);
+ mutex_destroy(&family->lock);
+ WARN_ON(!idr_is_empty(&family->policer_idr));
+ idr_destroy(&family->policer_idr);
+ kfree(family);
+}
+
+int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_policer_core *policer_core;
+ int i, err;
+
+ policer_core = kzalloc(sizeof(*policer_core), GFP_KERNEL);
+ if (!policer_core)
+ return -ENOMEM;
+ mlxsw_sp->policer_core = policer_core;
+ policer_core->ops = mlxsw_sp->policer_core_ops;
+
+ err = policer_core->ops->init(policer_core);
+ if (err)
+ goto err_init;
+
+ for (i = 0; i < MLXSW_SP_POLICER_TYPE_MAX + 1; i++) {
+ err = mlxsw_sp_policer_family_register(mlxsw_sp, mlxsw_sp_policer_family_arr[i]);
+ if (err)
+ goto err_family_register;
+ }
+
+ return 0;
+
+err_family_register:
+ for (i--; i >= 0; i--) {
+ struct mlxsw_sp_policer_family *family;
+
+ family = mlxsw_sp->policer_core->family_arr[i];
+ mlxsw_sp_policer_family_unregister(mlxsw_sp, family);
+ }
+err_init:
+ kfree(mlxsw_sp->policer_core);
+ return err;
+}
+
+void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = MLXSW_SP_POLICER_TYPE_MAX; i >= 0; i--) {
+ struct mlxsw_sp_policer_family *family;
+
+ family = mlxsw_sp->policer_core->family_arr[i];
+ mlxsw_sp_policer_family_unregister(mlxsw_sp, family);
+ }
+
+ kfree(mlxsw_sp->policer_core);
+}
+
+int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ u64 global_policers, cpu_policers, single_rate_policers;
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_GLOBAL_POLICERS) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
+ return -EIO;
+
+ global_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_GLOBAL_POLICERS);
+ cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
+ single_rate_policers = global_policers - cpu_policers;
+
+ devlink_resource_size_params_init(&size_params, global_policers,
+ global_policers, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, "global_policers",
+ global_policers,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, single_rate_policers,
+ single_rate_policers, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, "single_rate_policers",
+ single_rate_policers,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ &size_params);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlxsw_sp1_policer_core_init(struct mlxsw_sp_policer_core *policer_core)
+{
+ policer_core->lowest_bs_bits = MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP1;
+ policer_core->highest_bs_bits = MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP1;
+
+ return 0;
+}
+
+const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops = {
+ .init = mlxsw_sp1_policer_core_init,
+};
+
+static int
+mlxsw_sp2_policer_core_init(struct mlxsw_sp_policer_core *policer_core)
+{
+ policer_core->lowest_bs_bits = MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP2;
+ policer_core->highest_bs_bits = MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP2;
+
+ return 0;
+}
+
+const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops = {
+ .init = mlxsw_sp2_policer_core_init,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
new file mode 100644
index 000000000..7b01b9c20
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -0,0 +1,1726 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/clocksource.h>
+#include <linux/timecounter.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/rhashtable.h>
+#include <linux/ptp_classify.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
+#include <linux/refcount.h>
+
+#include "spectrum.h"
+#include "spectrum_ptp.h"
+#include "core.h"
+
+#define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29
+#define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */
+#define MLXSW_SP1_PTP_CLOCK_MASK 64
+
+#define MLXSW_SP1_PTP_HT_GC_INTERVAL 500 /* ms */
+
+/* How long, approximately, should the unmatched entries stay in the hash table
+ * before they are collected. Should be evenly divisible by the GC interval.
+ */
+#define MLXSW_SP1_PTP_HT_GC_TIMEOUT 1000 /* ms */
+
+struct mlxsw_sp_ptp_state {
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp1_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ struct rhltable unmatched_ht;
+ spinlock_t unmatched_lock; /* protects the HT */
+ struct delayed_work ht_gc_dw;
+ u32 gc_cycle;
+};
+
+struct mlxsw_sp2_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+ * enabled.
+ */
+ struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
+};
+
+struct mlxsw_sp1_ptp_key {
+ u16 local_port;
+ u8 message_type;
+ u16 sequence_id;
+ u8 domain_number;
+ bool ingress;
+};
+
+struct mlxsw_sp1_ptp_unmatched {
+ struct mlxsw_sp1_ptp_key key;
+ struct rhlist_head ht_node;
+ struct rcu_head rcu;
+ struct sk_buff *skb;
+ u64 timestamp;
+ u32 gc_cycle;
+};
+
+static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp1_ptp_unmatched, key),
+ .key_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, key),
+ .head_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, ht_node),
+};
+
+struct mlxsw_sp_ptp_clock {
+ struct mlxsw_core *core;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+};
+
+struct mlxsw_sp1_ptp_clock {
+ struct mlxsw_sp_ptp_clock common;
+ spinlock_t lock; /* protect this structure */
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+ struct delayed_work overflow_work;
+};
+
+static struct mlxsw_sp1_ptp_state *
+mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp1_ptp_clock *
+mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
+}
+
+static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->common.core;
+ u32 frc_h1, frc_h2, frc_l;
+
+ frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
+ ptp_read_system_prets(sts);
+ frc_l = mlxsw_core_read_frc_l(mlxsw_core);
+ ptp_read_system_postts(sts);
+ frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
+
+ if (frc_h1 != frc_h2) {
+ /* wrap around */
+ ptp_read_system_prets(sts);
+ frc_l = mlxsw_core_read_frc_l(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64) frc_l | (u64) frc_h2 << 32;
+}
+
+static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
+{
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
+
+ return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
+}
+
+static int
+mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
+ freq_adj, 0, 0, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
+{
+ u64 cycles = (u64) nsec;
+
+ cycles <<= tc->cc->shift;
+ cycles = div_u64(cycles, tc->cc->mult);
+
+ return cycles;
+}
+
+static int
+mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->common.core;
+ u64 next_sec, next_sec_in_nsec, cycles;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ char mtpps_pl[MLXSW_REG_MTPPS_LEN];
+ int err;
+
+ next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
+ next_sec_in_nsec = next_sec * NSEC_PER_SEC;
+
+ spin_lock_bh(&clock->lock);
+ cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
+ spin_unlock_bh(&clock->lock);
+
+ mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
+ 0, next_sec, 0, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
+ int neg_adj = 0;
+ u32 diff;
+ u64 adj;
+ s32 ppb;
+
+ ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ adj = clock->nominal_c_mult;
+ adj *= ppb;
+ diff = div_u64(adj, NSEC_PER_SEC);
+
+ spin_lock_bh(&clock->lock);
+ timecounter_read(&clock->tc);
+ clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
+ clock->nominal_c_mult + diff;
+ spin_unlock_bh(&clock->lock);
+
+ return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
+}
+
+static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
+ u64 nsec;
+
+ spin_lock_bh(&clock->lock);
+ timecounter_adjtime(&clock->tc, delta);
+ nsec = timecounter_read(&clock->tc);
+ spin_unlock_bh(&clock->lock);
+
+ return mlxsw_sp1_ptp_phc_settime(clock, nsec);
+}
+
+static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
+ u64 cycles, nsec;
+
+ spin_lock_bh(&clock->lock);
+ cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
+ nsec = timecounter_cyc2time(&clock->tc, cycles);
+ spin_unlock_bh(&clock->lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
+ u64 nsec = timespec64_to_ns(ts);
+
+ spin_lock_bh(&clock->lock);
+ timecounter_init(&clock->tc, &clock->cycles, nsec);
+ nsec = timecounter_read(&clock->tc);
+ spin_unlock_bh(&clock->lock);
+
+ return mlxsw_sp1_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = 100000000,
+ .adjfine = mlxsw_sp1_ptp_adjfine,
+ .adjtime = mlxsw_sp1_ptp_adjtime,
+ .gettimex64 = mlxsw_sp1_ptp_gettimex,
+ .settime64 = mlxsw_sp1_ptp_settime,
+};
+
+static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp1_ptp_clock *clock;
+
+ clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
+
+ spin_lock_bh(&clock->lock);
+ timecounter_read(&clock->tc);
+ spin_unlock_bh(&clock->lock);
+ mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
+}
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ u64 overflow_cycles, nsec, frac = 0;
+ struct mlxsw_sp1_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&clock->lock);
+ clock->cycles.read = mlxsw_sp1_ptp_read_frc;
+ clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
+ clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
+ clock->cycles.shift);
+ clock->nominal_c_mult = clock->cycles.mult;
+ clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
+ clock->common.core = mlxsw_sp->core;
+
+ timecounter_init(&clock->tc, &clock->cycles, 0);
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+ * sure counter is checked at least twice every wrap around.
+ * The period is calculated as the minimum between max HW cycles count
+ * (The clock source mask) and max amount of cycles that can be
+ * multiplied by clock multiplier where the result doesn't exceed
+ * 64bits.
+ */
+ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
+
+ nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
+ clock->overflow_period = nsecs_to_jiffies(nsec);
+
+ INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
+ mlxsw_core_schedule_dw(&clock->overflow_work, 0);
+
+ clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
+ if (IS_ERR(clock->common.ptp)) {
+ err = PTR_ERR(clock->common.ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return &clock->common;
+
+err_ptp_clock_register:
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
+{
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
+ ptp_clock_unregister(clock_common->ptp);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+}
+
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 utc_sec1, utc_sec2, utc_nsec;
+
+ utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (utc_sec1 != utc_sec2) {
+ /* Wrap around. */
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ u32 sec, nsec_rem;
+
+ sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+ 0, sec, nsec_rem, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+ * reversed, positive values mean to decrease the frequency. Adjust the
+ * sign of PPB to this behavior.
+ */
+ return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ /* HW time adjustment range is s16. If out of range, set time instead. */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+ nsec += delta;
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+ }
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+ 0, 0, 0, delta);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+ .adjfine = mlxsw_sp2_ptp_adjfine,
+ .adjtime = mlxsw_sp2_ptp_adjtime,
+ .gettimex64 = mlxsw_sp2_ptp_gettimex,
+ .settime64 = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->core = mlxsw_sp->core;
+
+ clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+ err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+ if (err) {
+ dev_err(dev, "setting UTC time failed %d\n", err);
+ goto err_ptp_phc_settime;
+ }
+
+ clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
+ if (IS_ERR(clock->ptp)) {
+ err = PTR_ERR(clock->ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return clock;
+
+err_ptp_clock_register:
+err_ptp_phc_settime:
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+ ptp_clock_unregister(clock->ptp);
+ kfree(clock);
+}
+
+static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
+ u8 *p_domain_number,
+ u8 *p_message_type,
+ u16 *p_sequence_id)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+
+ ptp_class = ptp_classify_raw(skb);
+
+ switch (ptp_class & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ case PTP_CLASS_V2:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *p_message_type = ptp_get_msgtype(hdr, ptp_class);
+ *p_domain_number = hdr->domain_number;
+ *p_sequence_id = be16_to_cpu(hdr->sequence_id);
+
+ return 0;
+}
+
+/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
+ * error.
+ */
+static int
+mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key,
+ struct sk_buff *skb,
+ u64 timestamp)
+{
+ int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ int err;
+
+ unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
+ if (!unmatched)
+ return -ENOMEM;
+
+ unmatched->key = key;
+ unmatched->skb = skb;
+ unmatched->timestamp = timestamp;
+ unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
+
+ err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ if (err)
+ kfree(unmatched);
+
+ return err;
+}
+
+static struct mlxsw_sp1_ptp_unmatched *
+mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key, int *p_length)
+{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+ struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
+ struct rhlist_head *tmp, *list;
+ int length = 0;
+
+ list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
+ last = unmatched;
+ length++;
+ }
+
+ *p_length = length;
+ return last;
+}
+
+static int
+mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_unmatched *unmatched)
+{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+
+ return rhltable_remove(&ptp_state->unmatched_ht,
+ &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+}
+
+/* This function is called in the following scenarios:
+ *
+ * 1) When a packet is matched with its timestamp.
+ * 2) In several situation when it is necessary to immediately pass on
+ * an SKB without a timestamp.
+ * 3) From GC indirectly through mlxsw_sp1_ptp_unmatched_finish().
+ * This case is similar to 2) above.
+ */
+static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port,
+ bool ingress,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ /* Between capturing the packet and finishing it, there is a window of
+ * opportunity for the originating port to go away (e.g. due to a
+ * split). Also make sure the SKB device reference is still valid.
+ */
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (ingress) {
+ if (hwtstamps)
+ *skb_hwtstamps(skb) = *hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+ } else {
+ /* skb_tstamp_tx() allows hwtstamps to be NULL. */
+ skb_tstamp_tx(skb, hwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key,
+ struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 nsec;
+
+ spin_lock_bh(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ spin_unlock_bh(&clock->lock);
+
+ hwtstamps.hwtstamp = ns_to_ktime(nsec);
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port, key.ingress, &hwtstamps);
+}
+
+static void
+mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_unmatched *unmatched)
+{
+ if (unmatched->skb && unmatched->timestamp)
+ mlxsw_sp1_packet_timestamp(mlxsw_sp, unmatched->key,
+ unmatched->skb,
+ unmatched->timestamp);
+ else if (unmatched->skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
+ unmatched->key.local_port,
+ unmatched->key.ingress, NULL);
+ kfree_rcu(unmatched, rcu);
+}
+
+static void mlxsw_sp1_ptp_unmatched_free_fn(void *ptr, void *arg)
+{
+ struct mlxsw_sp1_ptp_unmatched *unmatched = ptr;
+
+ /* This is invoked at a point where the ports are gone already. Nothing
+ * to do with whatever is left in the HT but to free it.
+ */
+ if (unmatched->skb)
+ dev_kfree_skb_any(unmatched->skb);
+ kfree_rcu(unmatched, rcu);
+}
+
+static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key,
+ struct sk_buff *skb, u64 timestamp)
+{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ int length;
+ int err;
+
+ rcu_read_lock();
+
+ spin_lock(&ptp_state->unmatched_lock);
+
+ unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
+ if (skb && unmatched && unmatched->timestamp) {
+ unmatched->skb = skb;
+ } else if (timestamp && unmatched && unmatched->skb) {
+ unmatched->timestamp = timestamp;
+ } else {
+ /* Either there is no entry to match, or one that is there is
+ * incompatible.
+ */
+ if (length < 100)
+ err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
+ skb, timestamp);
+ else
+ err = -E2BIG;
+ if (err && skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port,
+ key.ingress, NULL);
+ unmatched = NULL;
+ }
+
+ if (unmatched) {
+ err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
+ WARN_ON_ONCE(err);
+ }
+
+ spin_unlock(&ptp_state->unmatched_lock);
+
+ if (unmatched)
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
+
+ rcu_read_unlock();
+}
+
+static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port,
+ bool ingress)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp1_ptp_key key;
+ u8 types;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto immediate;
+
+ types = ingress ? mlxsw_sp_port->ptp.ing_types :
+ mlxsw_sp_port->ptp.egr_types;
+ if (!types)
+ goto immediate;
+
+ memset(&key, 0, sizeof(key));
+ key.local_port = local_port;
+ key.ingress = ingress;
+
+ err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
+ &key.sequence_id);
+ if (err)
+ goto immediate;
+
+ /* For packets whose timestamping was not enabled on this port, don't
+ * bother trying to match the timestamp.
+ */
+ if (!((1 << key.message_type) & types))
+ goto immediate;
+
+ mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
+ return;
+
+immediate:
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
+}
+
+void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ u16 local_port, u8 message_type,
+ u8 domain_number, u16 sequence_id,
+ u64 timestamp)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp1_ptp_key key;
+ u8 types;
+
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ return;
+
+ types = ingress ? mlxsw_sp_port->ptp.ing_types :
+ mlxsw_sp_port->ptp.egr_types;
+
+ /* For message types whose timestamping was not enabled on this port,
+ * don't bother with the timestamp.
+ */
+ if (!((1 << message_type) & types))
+ return;
+
+ memset(&key, 0, sizeof(key));
+ key.local_port = local_port;
+ key.domain_number = domain_number;
+ key.message_type = message_type;
+ key.sequence_id = sequence_id;
+ key.ingress = ingress;
+
+ mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, NULL, timestamp);
+}
+
+void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ skb_reset_mac_header(skb);
+ mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
+}
+
+void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
+}
+
+static void
+mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
+ struct mlxsw_sp1_ptp_unmatched *unmatched)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
+ struct mlxsw_sp_ptp_port_dir_stats *stats;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ /* If an unmatched entry has an SKB, it has to be handed over to the
+ * networking stack. This is usually done from a trap handler, which is
+ * invoked in a softirq context. Here we are going to do it in process
+ * context. If that were to be interrupted by a softirq, it could cause
+ * a deadlock when an attempt is made to take an already-taken lock
+ * somewhere along the sending path. Disable softirqs to prevent this.
+ */
+ local_bh_disable();
+
+ spin_lock(&ptp_state->unmatched_lock);
+ err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ spin_unlock(&ptp_state->unmatched_lock);
+
+ if (err)
+ /* The packet was matched with timestamp during the walk. */
+ goto out;
+
+ mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
+ if (mlxsw_sp_port) {
+ stats = unmatched->key.ingress ?
+ &mlxsw_sp_port->ptp.stats.rx_gcd :
+ &mlxsw_sp_port->ptp.stats.tx_gcd;
+ if (unmatched->skb)
+ stats->packets++;
+ else
+ stats->timestamps++;
+ }
+
+ /* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
+ * the comment at that function states that it can only be called in
+ * soft IRQ context, this pattern of local_bh_disable() +
+ * netif_receive_skb(), in process context, is seen elsewhere in the
+ * kernel, notably in pktgen.
+ */
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
+
+out:
+ local_bh_enable();
+}
+
+static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ struct mlxsw_sp1_ptp_state *ptp_state;
+ struct rhashtable_iter iter;
+ u32 gc_cycle;
+ void *obj;
+
+ ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
+ gc_cycle = ptp_state->gc_cycle++;
+
+ rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((obj = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(obj))
+ continue;
+
+ unmatched = obj;
+ if (unmatched->gc_cycle <= gc_cycle)
+ mlxsw_sp1_ptp_ht_gc_collect(ptp_state, unmatched);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+
+ mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
+ MLXSW_SP1_PTP_HT_GC_INTERVAL);
+}
+
+static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
+{
+ char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
+
+ mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
+}
+
+static int mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp *mlxsw_sp,
+ bool clr)
+{
+ char mogcr_pl[MLXSW_REG_MOGCR_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mogcr_ptp_iftc_set(mogcr_pl, clr);
+ mlxsw_reg_mogcr_ptp_eftc_set(mogcr_pl, clr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+}
+
+static int mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp *mlxsw_sp,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpppc_pl[MLXSW_REG_MTPPPC_LEN];
+
+ mlxsw_reg_mtpppc_pack(mtpppc_pl, ing_types, egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpppc), mtpppc_pl);
+}
+
+struct mlxsw_sp1_ptp_shaper_params {
+ u32 ethtool_speed;
+ enum mlxsw_reg_qpsc_port_speed port_speed;
+ u8 shaper_time_exp;
+ u8 shaper_time_mantissa;
+ u8 shaper_inc;
+ u8 shaper_bs;
+ u8 port_to_shaper_credits;
+ int ing_timestamp_inc;
+ int egr_timestamp_inc;
+};
+
+static const struct mlxsw_sp1_ptp_shaper_params
+mlxsw_sp1_ptp_shaper_params[] = {
+ {
+ .ethtool_speed = SPEED_100,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_100M,
+ .shaper_time_exp = 4,
+ .shaper_time_mantissa = 12,
+ .shaper_inc = 9,
+ .shaper_bs = 1,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -313,
+ .egr_timestamp_inc = 313,
+ },
+ {
+ .ethtool_speed = SPEED_1000,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_1G,
+ .shaper_time_exp = 0,
+ .shaper_time_mantissa = 12,
+ .shaper_inc = 6,
+ .shaper_bs = 0,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -35,
+ .egr_timestamp_inc = 35,
+ },
+ {
+ .ethtool_speed = SPEED_10000,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_10G,
+ .shaper_time_exp = 0,
+ .shaper_time_mantissa = 2,
+ .shaper_inc = 14,
+ .shaper_bs = 1,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -11,
+ .egr_timestamp_inc = 11,
+ },
+ {
+ .ethtool_speed = SPEED_25000,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_25G,
+ .shaper_time_exp = 0,
+ .shaper_time_mantissa = 0,
+ .shaper_inc = 11,
+ .shaper_bs = 1,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -14,
+ .egr_timestamp_inc = 14,
+ },
+};
+
+#define MLXSW_SP1_PTP_SHAPER_PARAMS_LEN ARRAY_SIZE(mlxsw_sp1_ptp_shaper_params)
+
+static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp1_ptp_shaper_params *params;
+ char qpsc_pl[MLXSW_REG_QPSC_LEN];
+ int i, err;
+
+ for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
+ params = &mlxsw_sp1_ptp_shaper_params[i];
+ mlxsw_reg_qpsc_pack(qpsc_pl, params->port_speed,
+ params->shaper_time_exp,
+ params->shaper_time_mantissa,
+ params->shaper_inc, params->shaper_bs,
+ params->port_to_shaper_credits,
+ params->ing_timestamp_inc,
+ params->egr_timestamp_inc);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpsc), qpsc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 event_message_type;
+ int err;
+
+ /* Deliver these message types as PTP0. */
+ event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ event_message_type);
+ if (err)
+ return err;
+
+ /* Everything else is PTP1. */
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ ~event_message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ return 0;
+
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp1_ptp_state *ptp_state;
+ int err;
+
+ err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
+ if (err)
+ return ERR_PTR(err);
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ spin_lock_init(&ptp_state->unmatched_lock);
+
+ err = rhltable_init(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_ht_params);
+ if (err)
+ goto err_hashtable_init;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
+ if (err)
+ goto err_fifo_clr;
+
+ INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
+ mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
+ MLXSW_SP1_PTP_HT_GC_INTERVAL);
+ return &ptp_state->common;
+
+err_fifo_clr:
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
+ rhltable_destroy(&ptp_state->unmatched_ht);
+err_hashtable_init:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp1_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+
+ cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
+ mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
+ mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ rhltable_free_and_destroy(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
+ kfree(ptp_state);
+}
+
+int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ *config = mlxsw_sp_port->ptp.hwtstamp_config;
+ return 0;
+}
+
+static int
+mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0xff;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ ing_types = 0x01;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ing_types = 0x02;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ ing_types = 0x0f;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ ing_types = 0xff;
+ break;
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ *p_rx_filter = rx_filter;
+ return 0;
+}
+
+static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port *tmp;
+ u16 orig_ing_types = 0;
+ u16 orig_egr_types = 0;
+ int err;
+ int i;
+
+ /* MTPPPC configures timestamping globally, not per port. Find the
+ * configuration that contains all configured timestamping requests.
+ */
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
+ tmp = mlxsw_sp->ports[i];
+ if (tmp) {
+ orig_ing_types |= tmp->ptp.ing_types;
+ orig_egr_types |= tmp->ptp.egr_types;
+ }
+ if (tmp && tmp != mlxsw_sp_port) {
+ ing_types |= tmp->ptp.ing_types;
+ egr_types |= tmp->ptp.egr_types;
+ }
+ }
+
+ if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
+ err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
+ return err;
+ }
+ }
+ if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+
+ return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
+ ing_types, egr_types);
+}
+
+static bool mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->ptp.ing_types || mlxsw_sp_port->ptp.egr_types;
+}
+
+static int
+mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_ptps_pack(qeec_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ bool ptps = false;
+ int err, i;
+ u32 speed;
+
+ if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
+ return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
+
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
+ if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
+ ptps = true;
+ break;
+ }
+ }
+
+ return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, ptps);
+}
+
+void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
+ ptp.shaper_dw);
+
+ if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
+ return;
+
+ err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Failed to set up PTP shaper\n");
+}
+
+int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ enum hwtstamp_rx_filters rx_filter;
+ u16 ing_types;
+ u16 egr_types;
+ int err;
+
+ err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
+ &rx_filter);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_ptp_mtpppc_update(mlxsw_sp_port, ing_types, egr_types);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->ptp.hwtstamp_config = *config;
+ mlxsw_sp_port->ptp.ing_types = ing_types;
+ mlxsw_sp_port->ptp.egr_types = egr_types;
+
+ err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
+ if (err)
+ return err;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+
+ return 0;
+}
+
+int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+struct mlxsw_sp_ptp_port_stat {
+ char str[ETH_GSTRING_LEN];
+ ptrdiff_t offset;
+};
+
+#define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD) \
+ { \
+ .str = NAME, \
+ .offset = offsetof(struct mlxsw_sp_ptp_port_stats, \
+ FIELD), \
+ }
+
+static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
+ MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets", rx_gcd.packets),
+ MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
+ MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets", tx_gcd.packets),
+ MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
+};
+
+#undef MLXSW_SP_PTP_PORT_STAT
+
+#define MLXSW_SP_PTP_PORT_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
+
+int mlxsw_sp1_get_stats_count(void)
+{
+ return MLXSW_SP_PTP_PORT_STATS_LEN;
+}
+
+void mlxsw_sp1_get_stats_strings(u8 **p)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
+ memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
+ ETH_GSTRING_LEN);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+ void *stats = &mlxsw_sp_port->ptp.stats;
+ ptrdiff_t offset;
+ int i;
+
+ data += data_index;
+ for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
+ offset = mlxsw_sp_ptp_port_stats[i].offset;
+ *data++ = *(u64 *)(stats + offset);
+ }
+}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
+ return &ptp_state->common;
+
+err_ptp_traps_set:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+ mutex_destroy(&ptp_state->lock);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+ u8 cqe_ts_sec)
+{
+ u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (cqe_ts_sec > (utc_sec & 0xff))
+ /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+ * latter has wrapped after the time stamp was collected.
+ */
+ utc_sec -= 256;
+
+ utc_sec &= ~0xff;
+ utc_sec |= cqe_ts_sec;
+
+ return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_skb_cb *cb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ u64 ts_sec, ts_nsec, nsec;
+
+ WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+ /* The time stamp in the CQE is represented by 38 bits, which is a short
+ * representation of UTC time. Software should create the full time
+ * stamp using the global UTC clock. The seconds have only 8 bits in the
+ * CQE, to create the full time stamp, use the current UTC time and fix
+ * the seconds according to the relation between UTC seconds and CQE
+ * seconds.
+ */
+ ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+ ts_nsec = cb->cqe_ts.nsec;
+
+ nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ *skb_hwtstamps(skb) = hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ skb_tstamp_tx(skb, &hwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ mutex_lock(&ptp_state->lock);
+ *config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ *p_rx_filter = rx_filter;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* In Spectrum-2 and above, all packets get time stamp by
+ * default and the driver fill the time stamp only for event
+ * packets. Return all event types even if only specific types
+ * were required.
+ */
+ ing_types = 0x0f;
+ *p_rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0x0f;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+ mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+ egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+ u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+ egr_types, new_config);
+ if (err)
+ return err;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+ if (err)
+ goto err_ptp_disable;
+
+ return 0;
+
+err_ptp_disable:
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+ return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ enum hwtstamp_rx_filters rx_filter;
+ struct hwtstamp_config new_config;
+ u16 new_ing_types, new_egr_types;
+ bool ptp_enabled;
+ int err;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
+ err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+ &new_egr_types, &rx_filter);
+ if (err)
+ goto err_get_message_types;
+
+ new_config.flags = config->flags;
+ new_config.tx_type = config->tx_type;
+ new_config.rx_filter = rx_filter;
+
+ ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+ mlxsw_sp_port->ptp.egr_types;
+
+ if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+ err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+ new_egr_types, new_config);
+ if (err)
+ goto err_configure_port;
+ } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+ err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+ if (err)
+ goto err_deconfigure_port;
+ }
+
+ mlxsw_sp_port->ptp.ing_types = new_ing_types;
+ mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
+
+ return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+ * their correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return -ENOMEM;
+ }
+ }
+
+ return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+ tx_info);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
new file mode 100644
index 000000000..a8b882309
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_PTP_H
+#define _MLXSW_SPECTRUM_PTP_H
+
+#include <linux/device.h>
+#include <linux/rhashtable.h>
+
+struct mlxsw_sp;
+struct mlxsw_sp_port;
+struct mlxsw_sp_ptp_clock;
+
+static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ return 0;
+}
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ u16 local_port, u8 message_type,
+ u8 domain_number, u16 sequence_id,
+ u64 timestamp);
+
+int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
+
+int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp1_get_stats_count(void);
+void mlxsw_sp1_get_stats_strings(u8 **p);
+void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index);
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+#else
+
+static inline struct mlxsw_sp_ptp_clock *
+mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+}
+
+static inline struct mlxsw_sp_ptp_state *
+mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+{
+}
+
+static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static inline void
+mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ u16 local_port, u8 message_type,
+ u8 domain_number,
+ u16 sequence_id, u64 timestamp)
+{
+}
+
+static inline int
+mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
+{
+}
+
+static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ return mlxsw_sp_ptp_get_ts_info_noptp(info);
+}
+
+static inline int mlxsw_sp1_get_stats_count(void)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp1_get_stats_strings(u8 **p)
+{
+}
+
+static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+}
+
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+}
+
+static inline struct mlxsw_sp_ptp_state *
+mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+{
+}
+
+static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static inline int
+mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ return mlxsw_sp_ptp_get_ts_info_noptp(info);
+}
+
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
+static inline int mlxsw_sp2_get_stats_count(void)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp2_get_stats_strings(u8 **p)
+{
+}
+
+static inline void mlxsw_sp2_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
new file mode 100644
index 000000000..4243d3b88
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -0,0 +1,2337 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/pkt_cls.h>
+#include <net/red.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "reg.h"
+
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
+ MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
+
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+ MLXSW_SP_QDISC_PRIO,
+ MLXSW_SP_QDISC_ETS,
+ MLXSW_SP_QDISC_TBF,
+ MLXSW_SP_QDISC_FIFO,
+};
+
+struct mlxsw_sp_qdisc;
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ /* unoffload - to be used for a qdisc that stops being offloaded without
+ * being destroyed.
+ */
+ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent);
+ unsigned int num_classes;
+
+ u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+ int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+};
+
+struct mlxsw_sp_qdisc_ets_band {
+ u8 prio_bitmap;
+ int tclass_num;
+};
+
+struct mlxsw_sp_qdisc_ets_data {
+ struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ u64 backlog;
+ } stats_base;
+
+ union {
+ struct mlxsw_sp_qdisc_ets_data *ets_data;
+ };
+
+ struct mlxsw_sp_qdisc_ops *ops;
+ struct mlxsw_sp_qdisc *parent;
+ struct mlxsw_sp_qdisc *qdiscs;
+ unsigned int num_classes;
+};
+
+struct mlxsw_sp_qdisc_state {
+ struct mlxsw_sp_qdisc root_qdisc;
+
+ /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
+ * created first. When notifications for these FIFOs arrive, it is not
+ * known what qdisc their parent handle refers to. It could be a
+ * newly-created PRIO that will replace the currently-offloaded one, or
+ * it could be e.g. a RED that will be attached below it.
+ *
+ * As the notifications start to arrive, use them to note what the
+ * future parent handle is, and keep track of which child FIFOs were
+ * seen. Then when the parent is known, retroactively offload those
+ * FIFOs.
+ */
+ u32 future_handle;
+ bool future_fifos[IEEE_8021QAZ_MAX_TCS];
+ struct mutex lock; /* Protects qdisc state. */
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
+{
+ return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
+ struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
+ void *),
+ void *data)
+{
+ struct mlxsw_sp_qdisc *tmp;
+ unsigned int i;
+
+ if (pre) {
+ tmp = pre(qdisc, data);
+ if (tmp)
+ return tmp;
+ }
+
+ if (qdisc->ops) {
+ for (i = 0; i < qdisc->num_classes; i++) {
+ tmp = &qdisc->qdiscs[i];
+ if (qdisc->ops) {
+ tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
+ if (tmp)
+ return tmp;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
+{
+ u32 parent = *(u32 *)data;
+
+ if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
+ if (qdisc->ops->find_class)
+ return qdisc->ops->find_class(qdisc, parent);
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (!qdisc_state)
+ return NULL;
+ if (parent == TC_H_ROOT)
+ return &qdisc_state->root_qdisc;
+ return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
+ mlxsw_sp_qdisc_walk_cb_find, &parent);
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
+{
+ u32 handle = *(u32 *)data;
+
+ if (qdisc->ops && qdisc->handle == handle)
+ return qdisc;
+ return NULL;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (!qdisc_state)
+ return NULL;
+ return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
+ mlxsw_sp_qdisc_walk_cb_find_by_handle,
+ &handle);
+}
+
+static void
+mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *tmp;
+
+ for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
+ tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
+}
+
+static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return 0xff;
+ if (!parent->ops->get_prio_bitmap)
+ return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
+ return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return MLXSW_SP_PORT_DEFAULT_TCLASS;
+ if (!parent->ops->get_tclass_num)
+ return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
+ return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ int err_hdroom = 0;
+ int err = 0;
+ int i;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ }
+
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_qdisc->qdiscs[i]);
+ mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
+ if (mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ mlxsw_sp_qdisc->num_classes = 0;
+ kfree(mlxsw_sp_qdisc->qdiscs);
+ mlxsw_sp_qdisc->qdiscs = NULL;
+ return err_hdroom ?: err;
+}
+
+struct mlxsw_sp_qdisc_tree_validate {
+ bool forbid_ets;
+ bool forbid_root_tbf;
+ bool forbid_tbf;
+ bool forbid_red;
+};
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate);
+
+static int
+mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
+ validate);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ switch (mlxsw_sp_qdisc->ops->type) {
+ case MLXSW_SP_QDISC_FIFO:
+ break;
+ case MLXSW_SP_QDISC_RED:
+ if (validate.forbid_red)
+ return -EINVAL;
+ validate.forbid_red = true;
+ validate.forbid_root_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_TBF:
+ if (validate.forbid_root_tbf) {
+ if (validate.forbid_tbf)
+ return -EINVAL;
+ /* This is a TC TBF. */
+ validate.forbid_tbf = true;
+ validate.forbid_ets = true;
+ } else {
+ /* This is root TBF. */
+ validate.forbid_root_tbf = true;
+ }
+ break;
+ case MLXSW_SP_QDISC_PRIO:
+ case MLXSW_SP_QDISC_ETS:
+ if (validate.forbid_ets)
+ return -EINVAL;
+ validate.forbid_root_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
+}
+
+static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_qdisc_tree_validate validate = {};
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
+}
+
+static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ struct mlxsw_sp_hdroom orig_hdroom;
+ unsigned int i;
+ int err;
+
+ err = ops->check_params(mlxsw_sp_port, params);
+ if (err)
+ return err;
+
+ if (ops->num_classes) {
+ mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
+ sizeof(*mlxsw_sp_qdisc->qdiscs),
+ GFP_KERNEL);
+ if (!mlxsw_sp_qdisc->qdiscs)
+ return -ENOMEM;
+
+ for (i = 0; i < ops->num_classes; i++)
+ mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
+ }
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = orig_hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err)
+ goto err_hdroom_configure;
+ }
+
+ mlxsw_sp_qdisc->num_classes = ops->num_classes;
+ mlxsw_sp_qdisc->ops = ops;
+ mlxsw_sp_qdisc->handle = handle;
+ err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
+ if (err)
+ goto err_replace;
+
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_replace;
+
+ return 0;
+
+err_replace:
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ mlxsw_sp_qdisc->num_classes = 0;
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+err_hdroom_configure:
+ kfree(mlxsw_sp_qdisc->qdiscs);
+ mlxsw_sp_qdisc->qdiscs = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
+{
+ struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
+ int err;
+
+ err = ops->check_params(mlxsw_sp_port, params);
+ if (err)
+ goto unoffload;
+
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
+ if (err)
+ goto unoffload;
+
+ /* Check if the Qdisc changed. That includes a situation where an
+ * invisible Qdisc replaces another one, or is being added for the
+ * first time.
+ */
+ if (mlxsw_sp_qdisc->handle != handle) {
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+unoffload:
+ if (ops->unoffload)
+ ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ if (!mlxsw_sp_qdisc->ops)
+ return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
+ mlxsw_sp_qdisc, ops, params);
+ else
+ return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
+ mlxsw_sp_qdisc, params);
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->backlog[tclass_num] +
+ xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->tail_drop[tclass_num] +
+ xstats->tail_drop[tclass_num + 8];
+}
+
+static void
+mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
+ u8 prio_bitmap, u64 *tx_packets,
+ u64 *tx_bytes)
+{
+ int i;
+
+ *tx_packets = 0;
+ *tx_bytes = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (prio_bitmap & BIT(i)) {
+ *tx_packets += xstats->tx_packets[i];
+ *tx_bytes += xstats->tx_bytes[i];
+ }
+ }
+}
+
+static void
+mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 *p_tx_bytes, u64 *p_tx_packets,
+ u64 *p_drops, u64 *p_backlog)
+{
+ struct mlxsw_sp_port_xstats *xstats;
+ u64 tx_bytes, tx_packets;
+ u8 prio_bitmap;
+ int tclass_num;
+
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
+ &tx_packets, &tx_bytes);
+
+ *p_tx_packets += tx_packets;
+ *p_tx_bytes += tx_bytes;
+ *p_drops += xstats->wred_drop[tclass_num] +
+ mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
+ *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
+}
+
+static void
+mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 tx_bytes, u64 tx_packets,
+ u64 drops, u64 backlog,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes -= stats_base->tx_bytes;
+ tx_packets -= stats_base->tx_packets;
+ drops -= stats_base->drops;
+ backlog -= stats_base->backlog;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
+
+ stats_base->backlog += backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+}
+
+static void
+mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
+}
+
+static int
+mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+ int tclass_num, u32 min, u32 max,
+ u32 probability, bool is_wred, bool is_ecn)
+{
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+ char cwtp_cmd[MLXSW_REG_CWTP_LEN];
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
+
+ mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
+ mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
+ roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
+ roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
+ probability);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
+ if (err)
+ return err;
+
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
+ MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
+}
+
+static int
+mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+ int tclass_num)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
+ MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct red_stats *red_base;
+ u8 prio_bitmap;
+ int tclass_num;
+
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
+
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
+ &stats_base->tx_packets,
+ &stats_base->tx_bytes);
+ red_base->prob_mark = xstats->tc_ecn[tclass_num];
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
+
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+ stats_base->backlog = 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
+}
+
+static int
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+
+ if (p->min > p->max) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: min %u is bigger then max %u\n", p->min,
+ p->max);
+ return -EINVAL;
+ }
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: max value %u is too big\n", p->max);
+ return -EINVAL;
+ }
+ if (p->min == 0 || p->max == 0) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: 0 value is illegal for min and max\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc);
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle);
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ int tclass_num;
+ u32 min, max;
+ u64 prob;
+ int err;
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ /* calculate probability in percentage */
+ prob = p->probability;
+ prob *= 100;
+ prob = DIV_ROUND_UP(prob, 1 << 16);
+ prob = DIV_ROUND_UP(prob, 1 << 16);
+ min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
+ max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
+ min, max, prob,
+ !p->is_nodrop, p->is_ecn);
+}
+
+static void
+mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
+{
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ qstats->backlog -= backlog;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
+ int tclass_num;
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
+ pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+ xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ u64 overlimits;
+ int tclass_num;
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
+ overlimits = xstats->wred_drop[tclass_num] +
+ xstats->tc_ecn[tclass_num] - stats_base->overlimits;
+
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_base->overlimits += overlimits;
+
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent)
+{
+ /* RED and TBF are formally classful qdiscs, but all class references,
+ * including X:0, just refer to the same one class.
+ */
+ return &mlxsw_sp_qdisc->qdiscs[0];
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .unoffload = mlxsw_sp_qdisc_red_unoffload,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+ .find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
+};
+
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle);
+
+static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_RED_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_RED_XSTATS:
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
+ case TC_RED_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_RED_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ u64 backlog_cells = 0;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog_cells);
+
+ mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
+ mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
+ mlxsw_sp_qdisc->stats_base.drops = drops;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static enum mlxsw_reg_qeec_hr
+mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
+ return MLXSW_REG_QEEC_HR_PORT;
+
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
+ * to shaping. That is unlike RED, however UC queue lengths are going to
+ * be different than MC ones due to different pool and quota
+ * configurations, so the configuration is not applicable. For shaper on
+ * the other hand, subjecting the overall stream to the configured
+ * shaper makes sense. Also note that that is what we do for
+ * ieee_setmaxrate().
+ */
+ return MLXSW_REG_QEEC_HR_SUBGROUP;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+}
+
+static int
+mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 max_size, u8 *p_burst_size)
+{
+ /* TBF burst size is configured in bytes. The ASIC burst size value is
+ * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
+ */
+ u32 bs512 = max_size / 64;
+ u8 bs = fls(bs512);
+
+ if (!bs)
+ return -EINVAL;
+ --bs;
+
+ /* Demand a power of two. */
+ if ((1 << bs) != bs512)
+ return -EINVAL;
+
+ if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
+ bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
+ return -EINVAL;
+
+ *p_burst_size = bs;
+ return 0;
+}
+
+static u32
+mlxsw_sp_qdisc_tbf_max_size(u8 bs)
+{
+ return (1U << bs) * 64;
+}
+
+static u64
+mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
+{
+ /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
+ * Kbits/s.
+ */
+ return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
+ dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: rate of %lluKbps must be below %u\n",
+ rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (err) {
+ u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
+
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
+ p->max_size,
+ mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
+ mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ int tclass_num;
+ u8 burst_size;
+ int err;
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (WARN_ON_ONCE(err))
+ /* check_params above was supposed to reject this value. */
+ return -EINVAL;
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
+ rate_kbps, burst_size);
+}
+
+static void
+mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
+ .type = MLXSW_SP_QDISC_TBF,
+ .check_params = mlxsw_sp_qdisc_tbf_check_params,
+ .replace = mlxsw_sp_qdisc_tbf_replace,
+ .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
+ .destroy = mlxsw_sp_qdisc_tbf_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+ .find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
+};
+
+static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_TBF_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_tbf,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_TBF_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_TBF_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_TBF_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
+ .type = MLXSW_SP_QDISC_FIFO,
+ .check_params = mlxsw_sp_qdisc_fifo_check_params,
+ .replace = mlxsw_sp_qdisc_fifo_replace,
+ .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[band])
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ qdisc_state->future_handle = handle;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+}
+
+static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ unsigned int band;
+ u32 parent_handle;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
+ if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
+ parent_handle = TC_H_MAJ(p->parent);
+ if (parent_handle != qdisc_state->future_handle) {
+ /* This notifications is for a different Qdisc than
+ * previously. Wipe the future cache.
+ */
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
+ parent_handle);
+ }
+
+ band = TC_H_MIN(p->parent) - 1;
+ if (band < IEEE_8021QAZ_MAX_TCS) {
+ if (p->command == TC_FIFO_REPLACE)
+ qdisc_state->future_fifos[band] = true;
+ else if (p->command == TC_FIFO_DESTROY)
+ qdisc_state->future_fifos[band] = false;
+ }
+ }
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_FIFO_REPLACE) {
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo, NULL);
+ }
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_FIFO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_FIFO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_FIFO_REPLACE: /* Handled above. */
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0, false, 0);
+ }
+
+ kfree(mlxsw_sp_qdisc->ets_data);
+ mlxsw_sp_qdisc->ets_data = NULL;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+}
+
+static int
+__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
+{
+ if (nbands > IEEE_8021QAZ_MAX_TCS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *mlxsw_sp_port)
+{
+ u64 backlog;
+
+ if (mlxsw_sp_qdisc->ops) {
+ backlog = mlxsw_sp_qdisc->stats_base.backlog;
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ mlxsw_sp_qdisc->stats_base.backlog = backlog;
+ }
+
+ return NULL;
+}
+
+static void
+mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
+ mlxsw_sp_port);
+}
+
+static int
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 handle, unsigned int nbands,
+ const unsigned int *quanta,
+ const unsigned int *weights,
+ const u8 *priomap)
+{
+ struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
+ struct mlxsw_sp_qdisc_ets_band *ets_band;
+ struct mlxsw_sp_qdisc *child_qdisc;
+ u8 old_priomap, new_priomap;
+ int i, band;
+ int err;
+
+ if (!ets_data) {
+ ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
+ if (!ets_data)
+ return -ENOMEM;
+ mlxsw_sp_qdisc->ets_data = ets_data;
+
+ for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+
+ ets_band = &ets_data->bands[band];
+ ets_band->tclass_num = tclass_num;
+ }
+ }
+
+ for (band = 0; band < nbands; band++) {
+ int tclass_num;
+
+ child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
+ ets_band = &ets_data->bands[band];
+
+ tclass_num = ets_band->tclass_num;
+ old_priomap = ets_band->prio_bitmap;
+ new_priomap = 0;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass_num, 0, !!quanta[band],
+ weights[band]);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (priomap[i] == band) {
+ new_priomap |= BIT(i);
+ if (BIT(i) & old_priomap)
+ continue;
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
+ i, tclass_num);
+ if (err)
+ return err;
+ }
+ }
+
+ ets_band->prio_bitmap = new_priomap;
+
+ if (old_priomap != new_priomap)
+ mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
+ child_qdisc);
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
+ band, child_qdisc);
+ if (err)
+ return err;
+ }
+ for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
+ ets_band = &ets_data->bands[band];
+ ets_band->prio_bitmap = 0;
+
+ child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ ets_band->tclass_num, 0, false, 0);
+ }
+
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
+ handle, p->bands, zeroes,
+ zeroes, p->priomap);
+}
+
+static void
+__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
+{
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ qstats->backlog -= backlog;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ struct mlxsw_sp_qdisc *tc_qdisc;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
+ int i;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
+ }
+
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
+ return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
+
+ stats_base->drops = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
+ stats_base->drops += xstats->wred_drop[i];
+ }
+
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u32 parent)
+{
+ int child_index = TC_H_MIN(parent);
+ int band = child_index - 1;
+
+ if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
+ return NULL;
+ return &mlxsw_sp_qdisc->qdiscs[band];
+}
+
+static struct mlxsw_sp_qdisc_ets_band *
+mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
+
+ if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
+ band = 0;
+ return &mlxsw_sp_qdisc->ets_data->bands[band];
+}
+
+static u8
+mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
+}
+
+static int
+mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+ .type = MLXSW_SP_QDISC_PRIO,
+ .check_params = mlxsw_sp_qdisc_prio_check_params,
+ .replace = mlxsw_sp_qdisc_prio_replace,
+ .unoffload = mlxsw_sp_qdisc_prio_unoffload,
+ .destroy = mlxsw_sp_qdisc_prio_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+ .find_class = mlxsw_sp_qdisc_prio_find_class,
+ .num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
+};
+
+static int
+mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
+ handle, p->bands, p->quanta,
+ p->weights, p->priomap);
+}
+
+static void
+mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
+ .type = MLXSW_SP_QDISC_ETS,
+ .check_params = mlxsw_sp_qdisc_ets_check_params,
+ .replace = mlxsw_sp_qdisc_ets_replace,
+ .unoffload = mlxsw_sp_qdisc_ets_unoffload,
+ .destroy = mlxsw_sp_qdisc_ets_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+ .find_class = mlxsw_sp_qdisc_prio_find_class,
+ .num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
+};
+
+/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
+ * graph is free of cycles). These operations do not change the parent handle
+ * though, which means it can be incomplete (if there is more than one class
+ * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
+ * linked to a different class and then removed from the original class).
+ *
+ * E.g. consider this sequence of operations:
+ *
+ * # tc qdisc add dev swp1 root handle 1: prio
+ * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
+ * RED: set bandwidth to 10Mbit
+ * # tc qdisc link dev swp1 handle 13: parent 1:2
+ *
+ * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
+ * child. But RED will still only claim that 1:3 is its parent. If it's removed
+ * from that band, its only parent will be 1:2, but it will continue to claim
+ * that it is in fact 1:3.
+ *
+ * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
+ * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
+ * notification to offload the child Qdisc, based on its parent handle, and use
+ * the graft operation to validate that the class where the child is actually
+ * grafted corresponds to the parent handle. If the two don't match, we
+ * unoffload the child.
+ */
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
+{
+ struct mlxsw_sp_qdisc *old_qdisc;
+ u32 parent;
+
+ if (band < mlxsw_sp_qdisc->num_classes &&
+ mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
+ return 0;
+
+ if (!child_handle) {
+ /* This is an invisible FIFO replacing the original Qdisc.
+ * Ignore it--the original Qdisc's destroy will follow.
+ */
+ return 0;
+ }
+
+ /* See if the grafted qdisc is already offloaded on any tclass. If so,
+ * unoffload it.
+ */
+ old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
+ child_handle);
+ if (old_qdisc)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
+
+ parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+ parent);
+ if (!WARN_ON(!mlxsw_sp_qdisc))
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ return -EOPNOTSUPP;
+}
+
+static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_PRIO_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_prio,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_PRIO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_PRIO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_PRIO_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_ETS_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_ets,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_ETS_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_ETS_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_ETS_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->qdisc->lock);
+ err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
+ mutex_unlock(&mlxsw_sp_port->qdisc->lock);
+
+ return err;
+}
+
+struct mlxsw_sp_qevent_block {
+ struct list_head binding_list;
+ struct list_head mall_entry_list;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp_qevent_binding {
+ struct list_head list;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u32 handle;
+ int tclass_num;
+ enum mlxsw_sp_span_trigger span_trigger;
+ unsigned int action_mask;
+};
+
+static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
+
+static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ const struct mlxsw_sp_span_agent_parms *agent_parms,
+ int *p_span_id)
+{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
+ struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ bool ingress;
+ int span_id;
+ int err;
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
+ if (err)
+ return err;
+
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err)
+ goto err_analyzed_port_get;
+
+ trigger_parms.span_id = span_id;
+ trigger_parms.probability_rate = 1;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ if (err)
+ goto err_agent_bind;
+
+ err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
+ qevent_binding->tclass_num);
+ if (err)
+ goto err_trigger_enable;
+
+ *p_span_id = span_id;
+ return 0;
+
+err_trigger_enable:
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+ return err;
+}
+
+static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ int span_id)
+{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
+ struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {
+ .span_id = span_id,
+ };
+ bool ingress;
+
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+
+ mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
+ qevent_binding->tclass_num);
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
+ &trigger_parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+}
+
+static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .to_dev = mall_entry->mirror.to_dev,
+ };
+
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
+ &agent_parms, &mall_entry->mirror.span_id);
+}
+
+static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
+ };
+ int err;
+
+ err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
+ &agent_parms.policer_enable,
+ &agent_parms.policer_id);
+ if (err)
+ return err;
+
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
+ &agent_parms, &mall_entry->trap.span_id);
+}
+
+static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
+}
+
+static int
+mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
+{
+ if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
+ NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
+ return -EOPNOTSUPP;
+ }
+
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
+ return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
+ default:
+ /* This should have been validated away. */
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
+ return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
+ default:
+ WARN_ON(1);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ int err;
+
+ list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
+ err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
+ qevent_binding, extack);
+ if (err)
+ goto err_entry_configure;
+ }
+
+ return 0;
+
+err_entry_configure:
+ list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
+ mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
+ qevent_binding);
+ return err;
+}
+
+static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
+ mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
+ qevent_binding);
+}
+
+static int
+mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_qevent_binding *qevent_binding;
+ int err;
+
+ list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
+ err = mlxsw_sp_qevent_binding_configure(qevent_block,
+ qevent_binding,
+ extack);
+ if (err)
+ goto err_binding_configure;
+ }
+
+ return 0;
+
+err_binding_configure:
+ list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
+ mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
+ return err;
+}
+
+static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
+{
+ struct mlxsw_sp_qevent_binding *qevent_binding;
+
+ list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
+ mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
+}
+
+static struct mlxsw_sp_mall_entry *
+mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall_entry_list, list)
+ if (mall_entry->cookie == cookie)
+ return mall_entry;
+
+ return NULL;
+}
+
+static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qevent_block *qevent_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ /* It should not currently be possible to replace a matchall rule. So
+ * this must be a new rule.
+ */
+ if (!list_empty(&qevent_block->mall_entry_list)) {
+ NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
+ return -EOPNOTSUPP;
+ }
+ if (f->rule->action.num_entries != 1) {
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.chain_index) {
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+ if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
+ NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
+ return -EOPNOTSUPP;
+ }
+
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+ mall_entry->cookie = f->cookie;
+
+ if (act->id == FLOW_ACTION_MIRRED) {
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
+ mall_entry->mirror.to_dev = act->dev;
+ } else if (act->id == FLOW_ACTION_TRAP) {
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
+ } else {
+ NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
+ err = -EOPNOTSUPP;
+ goto err_unsupported_action;
+ }
+
+ list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
+
+ err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
+ if (err)
+ goto err_block_configure;
+
+ return 0;
+
+err_block_configure:
+ list_del(&mall_entry->list);
+err_unsupported_action:
+ kfree(mall_entry);
+ return err;
+}
+
+static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
+ if (!mall_entry)
+ return;
+
+ mlxsw_sp_qevent_block_deconfigure(qevent_block);
+
+ list_del(&mall_entry->list);
+ kfree(mall_entry);
+}
+
+static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_qevent_mall_destroy(qevent_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net)
+{
+ struct mlxsw_sp_qevent_block *qevent_block;
+
+ qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
+ if (!qevent_block)
+ return NULL;
+
+ INIT_LIST_HEAD(&qevent_block->binding_list);
+ INIT_LIST_HEAD(&qevent_block->mall_entry_list);
+ qevent_block->mlxsw_sp = mlxsw_sp;
+ return qevent_block;
+}
+
+static void
+mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
+{
+ WARN_ON(!list_empty(&qevent_block->binding_list));
+ WARN_ON(!list_empty(&qevent_block->mall_entry_list));
+ kfree(qevent_block);
+}
+
+static void mlxsw_sp_qevent_block_release(void *cb_priv)
+{
+ struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
+
+ mlxsw_sp_qevent_block_destroy(qevent_block);
+}
+
+static struct mlxsw_sp_qevent_binding *
+mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
+{
+ struct mlxsw_sp_qevent_binding *binding;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return ERR_PTR(-ENOMEM);
+
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->handle = handle;
+ binding->tclass_num = tclass_num;
+ binding->span_trigger = span_trigger;
+ binding->action_mask = action_mask;
+ return binding;
+}
+
+static void
+mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
+{
+ kfree(binding);
+}
+
+static struct mlxsw_sp_qevent_binding *
+mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle,
+ enum mlxsw_sp_span_trigger span_trigger)
+{
+ struct mlxsw_sp_qevent_binding *qevent_binding;
+
+ list_for_each_entry(qevent_binding, &block->binding_list, list)
+ if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
+ qevent_binding->handle == handle &&
+ qevent_binding->span_trigger == span_trigger)
+ return qevent_binding;
+ return NULL;
+}
+
+static int
+mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_qevent_binding *qevent_binding;
+ struct mlxsw_sp_qevent_block *qevent_block;
+ struct flow_block_cb *block_cb;
+ struct mlxsw_sp_qdisc *qdisc;
+ bool register_block = false;
+ int tclass_num;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
+ if (!block_cb) {
+ qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
+ if (!qevent_block)
+ return -ENOMEM;
+ block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
+ mlxsw_sp_qevent_block_release);
+ if (IS_ERR(block_cb)) {
+ mlxsw_sp_qevent_block_destroy(qevent_block);
+ return PTR_ERR(block_cb);
+ }
+ register_block = true;
+ } else {
+ qevent_block = flow_block_cb_priv(block_cb);
+ }
+ flow_block_cb_incref(block_cb);
+
+ qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
+ if (!qdisc) {
+ NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
+ err = -ENOENT;
+ goto err_find_qdisc;
+ }
+
+ if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
+ span_trigger))) {
+ err = -EEXIST;
+ goto err_binding_exists;
+ }
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
+ qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
+ f->sch->handle,
+ tclass_num,
+ span_trigger,
+ action_mask);
+ if (IS_ERR(qevent_binding)) {
+ err = PTR_ERR(qevent_binding);
+ goto err_binding_create;
+ }
+
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
+ f->extack);
+ if (err)
+ goto err_binding_configure;
+
+ list_add(&qevent_binding->list, &qevent_block->binding_list);
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
+ }
+
+ return 0;
+
+err_binding_configure:
+ mlxsw_sp_qevent_binding_destroy(qevent_binding);
+err_binding_create:
+err_binding_exists:
+err_find_qdisc:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+ return err;
+}
+
+static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_qevent_binding *qevent_binding;
+ struct mlxsw_sp_qevent_block *qevent_block;
+ struct flow_block_cb *block_cb;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
+ if (!block_cb)
+ return;
+ qevent_block = flow_block_cb_priv(block_cb);
+
+ qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
+ span_trigger);
+ if (!qevent_binding)
+ return;
+
+ list_del(&qevent_binding->list);
+ mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
+ mlxsw_sp_qevent_binding_destroy(qevent_binding);
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+static int
+mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
+{
+ f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
+ span_trigger,
+ action_mask);
+ case FLOW_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
+ BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+ action_mask);
+}
+
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_ECN,
+ action_mask);
+}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state;
+
+ qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
+ if (!qdisc_state)
+ return -ENOMEM;
+
+ mutex_init(&qdisc_state->lock);
+ mlxsw_sp_port->qdisc = qdisc_state;
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mutex_destroy(&mlxsw_sp_port->qdisc->lock);
+ kfree(mlxsw_sp_port->qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
new file mode 100644
index 000000000..ab0aa1a61
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -0,0 +1,10617 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/rhashtable.h>
+#include <linux/bitops.h>
+#include <linux/in6.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/gcd.h>
+#include <linux/if_macvlan.h>
+#include <linux/refcount.h>
+#include <linux/jhash.h>
+#include <linux/net_namespace.h>
+#include <linux/mutex.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include <net/arp.h>
+#include <net/inet_dscp.h>
+#include <net/ip_fib.h>
+#include <net/ip6_fib.h>
+#include <net/nexthop.h>
+#include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
+#include <net/l3mdev.h>
+#include <net/addrconf.h>
+#include <net/ndisc.h>
+#include <net/ipv6.h>
+#include <net/fib_notifier.h>
+#include <net/switchdev.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_ipip.h"
+#include "spectrum_mr.h"
+#include "spectrum_mr_tcam.h"
+#include "spectrum_router.h"
+#include "spectrum_span.h"
+
+struct mlxsw_sp_fib;
+struct mlxsw_sp_vr;
+struct mlxsw_sp_lpm_tree;
+struct mlxsw_sp_rif_ops;
+
+struct mlxsw_sp_rif {
+ struct list_head nexthop_list;
+ struct list_head neigh_list;
+ struct net_device *dev; /* NULL for underlay RIF */
+ struct mlxsw_sp_fid *fid;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif_index;
+ u8 mac_profile_id;
+ u16 vr_id;
+ const struct mlxsw_sp_rif_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+
+ unsigned int counter_ingress;
+ bool counter_ingress_valid;
+ unsigned int counter_egress;
+ bool counter_egress_valid;
+};
+
+struct mlxsw_sp_rif_params {
+ struct net_device *dev;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_subport {
+ struct mlxsw_sp_rif common;
+ refcount_t ref_count;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_ipip_lb {
+ struct mlxsw_sp_rif common;
+ struct mlxsw_sp_rif_ipip_lb_config lb_config;
+ u16 ul_vr_id; /* Reserved for Spectrum-2. */
+ u16 ul_rif_id; /* Reserved for Spectrum. */
+};
+
+struct mlxsw_sp_rif_params_ipip_lb {
+ struct mlxsw_sp_rif_params common;
+ struct mlxsw_sp_rif_ipip_lb_config lb_config;
+};
+
+struct mlxsw_sp_rif_ops {
+ enum mlxsw_sp_rif_type type;
+ size_t rif_size;
+
+ void (*setup)(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params);
+ int (*configure)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
+ void (*deconfigure)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
+ void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
+};
+
+struct mlxsw_sp_rif_mac_profile {
+ unsigned char mac_prefix[ETH_ALEN];
+ refcount_t ref_count;
+ u8 id;
+};
+
+struct mlxsw_sp_router_ops {
+ int (*init)(struct mlxsw_sp *mlxsw_sp);
+ int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
+};
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree);
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib,
+ u8 tree_id);
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib);
+
+static unsigned int *
+mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ return &rif->counter_egress;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ return &rif->counter_ingress;
+ }
+ return NULL;
+}
+
+static bool
+mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ return rif->counter_egress_valid;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ return rif->counter_ingress_valid;
+ }
+ return false;
+}
+
+static void
+mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ bool valid)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ rif->counter_egress_valid = valid;
+ break;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ rif->counter_ingress_valid = valid;
+ break;
+ }
+}
+
+static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ unsigned int counter_index, bool enable,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ bool is_egress = false;
+ int err;
+
+ if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
+ is_egress = true;
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
+ is_egress);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
+{
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ bool valid;
+ int err;
+
+ valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
+ if (!valid)
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_NOP);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+ *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
+ return 0;
+}
+
+struct mlxsw_sp_rif_counter_set_basic {
+ u64 good_unicast_packets;
+ u64 good_multicast_packets;
+ u64 good_broadcast_packets;
+ u64 good_unicast_bytes;
+ u64 good_multicast_bytes;
+ u64 good_broadcast_bytes;
+ u64 error_packets;
+ u64 discard_packets;
+ u64 error_bytes;
+ u64 discard_bytes;
+};
+
+static int
+mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ struct mlxsw_sp_rif_counter_set_basic *set)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ int err;
+
+ if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+
+ if (!set)
+ return 0;
+
+#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
+ (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
+
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
+
+#undef MLXSW_SP_RIF_COUNTER_EXTRACT
+
+ return 0;
+}
+
+static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+}
+
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ unsigned int *p_counter_index;
+ int err;
+
+ if (mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return 0;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ p_counter_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+
+ err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+ *p_counter_index, true, dir);
+ if (err)
+ goto err_counter_edit;
+ mlxsw_sp_rif_counter_valid_set(rif, dir, true);
+ return 0;
+
+err_counter_edit:
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ unsigned int *p_counter_index;
+
+ if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (WARN_ON(!p_counter_index))
+ return;
+ mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+ *p_counter_index, false, dir);
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ *p_counter_index);
+ mlxsw_sp_rif_counter_valid_set(rif, dir, false);
+}
+
+static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
+ return;
+ mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
+static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
+#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
+ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
+
+static bool
+mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static void
+mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static void
+mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ set_bit(prefix_len, prefix_usage->b);
+}
+
+static void
+mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ clear_bit(prefix_len, prefix_usage->b);
+}
+
+struct mlxsw_sp_fib_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+};
+
+enum mlxsw_sp_fib_entry_type {
+ MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
+ MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
+ MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
+ MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
+ MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
+
+ /* This is a special case of local delivery, where a packet should be
+ * decapsulated on reception. Note that there is no corresponding ENCAP,
+ * because that's a type of next hop, not of FIB entry. (There can be
+ * several next hops in a REMOTE entry, and some of them may be
+ * encapsulating entries.)
+ */
+ MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
+ MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
+};
+
+struct mlxsw_sp_nexthop_group_info;
+struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib_entry;
+
+struct mlxsw_sp_fib_node {
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_fib *fib;
+ struct mlxsw_sp_fib_key key;
+};
+
+struct mlxsw_sp_fib_entry_decap {
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ u32 tunnel_index;
+};
+
+struct mlxsw_sp_fib_entry {
+ struct mlxsw_sp_fib_node *fib_node;
+ enum mlxsw_sp_fib_entry_type type;
+ struct list_head nexthop_group_node;
+ struct mlxsw_sp_nexthop_group *nh_group;
+ struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
+};
+
+struct mlxsw_sp_fib4_entry {
+ struct mlxsw_sp_fib_entry common;
+ struct fib_info *fi;
+ u32 tb_id;
+ dscp_t dscp;
+ u8 type;
+};
+
+struct mlxsw_sp_fib6_entry {
+ struct mlxsw_sp_fib_entry common;
+ struct list_head rt6_list;
+ unsigned int nrt6;
+};
+
+struct mlxsw_sp_rt6 {
+ struct list_head list;
+ struct fib6_info *rt;
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+struct mlxsw_sp_fib {
+ struct rhashtable ht;
+ struct list_head node_list;
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ enum mlxsw_sp_l3proto proto;
+};
+
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ u32 tb_id; /* kernel fib table id */
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
+ struct mlxsw_sp_fib *fib6;
+ struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
+ struct mlxsw_sp_rif *ul_rif;
+ refcount_t ul_rif_refcnt;
+};
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params;
+
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_fib *fib;
+ int err;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
+ fib = kzalloc(sizeof(*fib), GFP_KERNEL);
+ if (!fib)
+ return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+ INIT_LIST_HEAD(&fib->node_list);
+ fib->proto = proto;
+ fib->vr = vr;
+ fib->lpm_tree = lpm_tree;
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
+ if (err)
+ goto err_lpm_tree_bind;
+ return fib;
+
+err_lpm_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_rhashtable_init:
+ kfree(fib);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib)
+{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
+ WARN_ON(!list_empty(&fib->node_list));
+ rhashtable_destroy(&fib->ht);
+ kfree(fib);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
+{
+ static struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
+ if (lpm_tree->ref_count == 0)
+ return lpm_tree;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int
+mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ u8 root_bin = 0;
+ u8 prefix;
+ u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
+ root_bin = prefix;
+
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
+ if (prefix == 0)
+ continue;
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
+ last_prefix = prefix;
+ }
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
+ if (!lpm_tree)
+ return ERR_PTR(-EBUSY);
+ lpm_tree->proto = proto;
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ if (err)
+ return ERR_PTR(err);
+
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
+ if (err)
+ goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
+ memset(&lpm_tree->prefix_ref_count, 0,
+ sizeof(lpm_tree->prefix_ref_count));
+ lpm_tree->ref_count = 1;
+ return lpm_tree;
+
+err_left_struct_set:
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
+ mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
+ prefix_usage)) {
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
+ return lpm_tree;
+ }
+ }
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
+}
+
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ lpm_tree->ref_count++;
+}
+
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ if (--lpm_tree->ref_count == 0)
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+}
+
+#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
+
+static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ u64 max_trees;
+ int err, i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
+ return -EIO;
+
+ max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
+ mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+ mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
+ sizeof(struct mlxsw_sp_lpm_tree),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->lpm.trees)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
+ lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
+ }
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv4_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv6_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
+
+ return 0;
+
+err_ipv6_tree_get:
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_ipv4_tree_get:
+ kfree(mlxsw_sp->router->lpm.trees);
+ return err;
+}
+
+static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+ kfree(mlxsw_sp->router->lpm.trees);
+}
+
+static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
+{
+ return !!vr->fib4 || !!vr->fib6 ||
+ !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
+ !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ vr = &mlxsw_sp->router->vrs[i];
+ if (!mlxsw_sp_vr_is_used(vr))
+ return vr;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib, u8 tree_id)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ /* Bind to tree 0 which is default */
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
+{
+ /* For our purpose, squash main, default and local tables into one */
+ if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
+ tb_id = RT_TABLE_MAIN;
+ return tb_id;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ vr = &mlxsw_sp->router->vrs[i];
+ if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
+ return vr;
+ }
+ return NULL;
+}
+
+int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ u16 *vr_id)
+{
+ struct mlxsw_sp_vr *vr;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr) {
+ err = -ESRCH;
+ goto out;
+ }
+ *vr_id = vr->id;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
+static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return vr->fib4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return vr->fib6;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
+ struct mlxsw_sp_fib *fib4;
+ struct mlxsw_sp_fib *fib6;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
+ if (!vr) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
+ return ERR_PTR(-EBUSY);
+ }
+ fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(fib4))
+ return ERR_CAST(fib4);
+ fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib6)) {
+ err = PTR_ERR(fib6);
+ goto err_fib6_create;
+ }
+ mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(mr4_table)) {
+ err = PTR_ERR(mr4_table);
+ goto err_mr4_table_create;
+ }
+ mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(mr6_table)) {
+ err = PTR_ERR(mr6_table);
+ goto err_mr6_table_create;
+ }
+
+ vr->fib4 = fib4;
+ vr->fib6 = fib6;
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
+ vr->tb_id = tb_id;
+ return vr;
+
+err_mr6_table_create:
+ mlxsw_sp_mr_table_destroy(mr4_table);
+err_mr4_table_create:
+ mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
+err_fib6_create:
+ mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
+ mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
+ vr->fib6 = NULL;
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
+ vr->fib4 = NULL;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_vr *vr;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
+ return vr;
+}
+
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+{
+ if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
+ list_empty(&vr->fib6->node_list) &&
+ mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
+ mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
+}
+
+static bool
+mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto, u8 tree_id)
+{
+ struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
+
+ if (!mlxsw_sp_vr_is_used(vr))
+ return false;
+ if (fib->lpm_tree->id == tree_id)
+ return true;
+ return false;
+}
+
+static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_lpm_tree *new_tree)
+{
+ struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
+ int err;
+
+ fib->lpm_tree = new_tree;
+ mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err)
+ goto err_tree_bind;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+ return 0;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = old_tree;
+ return err;
+}
+
+static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_lpm_tree *new_tree)
+{
+ enum mlxsw_sp_l3proto proto = fib->proto;
+ struct mlxsw_sp_lpm_tree *old_tree;
+ u8 old_id, new_id = new_tree->id;
+ struct mlxsw_sp_vr *vr;
+ int i, err;
+
+ old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
+ old_id = old_tree->id;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ vr = &mlxsw_sp->router->vrs[i];
+ if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
+ continue;
+ err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
+ mlxsw_sp_vr_fib(vr, proto),
+ new_tree);
+ if (err)
+ goto err_tree_replace;
+ }
+
+ memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
+ sizeof(new_tree->prefix_ref_count));
+ mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+
+ return 0;
+
+err_tree_replace:
+ for (i--; i >= 0; i--) {
+ if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
+ continue;
+ mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
+ mlxsw_sp_vr_fib(vr, proto),
+ old_tree);
+ }
+ return err;
+}
+
+static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ u64 max_vrs;
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
+ return -EIO;
+
+ max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
+ mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->vrs)
+ return -ENOMEM;
+
+ for (i = 0; i < max_vrs; i++) {
+ vr = &mlxsw_sp->router->vrs[i];
+ vr->id = i;
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ /* At this stage we're guaranteed not to have new incoming
+ * FIB notifications and the work queue is free from FIBs
+ * sitting on top of mlxsw netdevs. However, we can still
+ * have other FIBs queued. Flush the queue before flushing
+ * the device's tables. No need for locks, as we're the only
+ * writer.
+ */
+ mlxsw_core_flush_owq();
+ mlxsw_sp_router_fib_flush(mlxsw_sp);
+ kfree(mlxsw_sp->router->vrs);
+}
+
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
+{
+ struct net_device *d;
+ u32 tb_id;
+
+ rcu_read_lock();
+ d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ if (d)
+ tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
+ else
+ tb_id = RT_TABLE_MAIN;
+ rcu_read_unlock();
+
+ return tb_id;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack);
+
+static struct mlxsw_sp_rif_ipip_lb *
+mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_params_ipip_lb lb_params;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_rif *rif;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+ lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
+ .common.dev = ol_dev,
+ .common.lag = false,
+ .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
+ };
+
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
+ if (IS_ERR(rif))
+ return ERR_CAST(rif);
+ return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt,
+ struct net_device *ol_dev)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_ipip_entry *ret = NULL;
+ int err;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+ ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
+ if (!ipip_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
+ ol_dev, NULL);
+ if (IS_ERR(ipip_entry->ol_lb)) {
+ ret = ERR_CAST(ipip_entry->ol_lb);
+ goto err_ol_ipip_lb_create;
+ }
+
+ ipip_entry->ipipt = ipipt;
+ ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = ipip_ops->parms_init(ol_dev);
+
+ err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_rem_ip_addr_set;
+ }
+
+ return ipip_entry;
+
+err_rem_ip_addr_set:
+ mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
+err_ol_ipip_lb_create:
+ kfree(ipip_entry);
+ return ret;
+}
+
+static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+
+ ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
+ mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
+ kfree(ipip_entry);
+}
+
+static bool
+mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
+ const enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
+ enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
+ union mlxsw_sp_l3addr tun_saddr;
+
+ if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
+ return false;
+
+ tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
+ return tun_ul_tb_id == ul_tb_id &&
+ mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
+}
+
+static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* Not all tunnels require to increase the default pasing depth
+ * (96 bytes).
+ */
+ if (ipip_ops->inc_parsing_depth)
+ return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+
+ return 0;
+}
+
+static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ if (ipip_ops->inc_parsing_depth)
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+}
+
+static int
+mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ u32 tunnel_index;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, &tunnel_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
+ ipip_entry->ipipt);
+ if (err)
+ goto err_parsing_depth_inc;
+
+ ipip_entry->decap_fib_entry = fib_entry;
+ fib_entry->decap.ipip_entry = ipip_entry;
+ fib_entry->decap.tunnel_index = tunnel_index;
+
+ return 0;
+
+err_parsing_depth_inc:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ fib_entry->decap.tunnel_index);
+ return err;
+}
+
+static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
+
+ /* Unlink this node from the IPIP entry that it's the decap entry of. */
+ fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
+ fib_entry->decap.ipip_entry = NULL;
+ mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, fib_entry->decap.tunnel_index);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len);
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry);
+
+static void
+mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
+
+ mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct mlxsw_sp_fib_entry *decap_fib_entry)
+{
+ if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
+ ipip_entry))
+ return;
+ decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+
+ if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ enum mlxsw_sp_fib_entry_type type)
+{
+ struct mlxsw_sp_fib_node *fib_node;
+ unsigned char addr_prefix_len;
+ struct mlxsw_sp_fib *fib;
+ struct mlxsw_sp_vr *vr;
+ const void *addrp;
+ size_t addr_len;
+ u32 addr4;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ return NULL;
+ fib = mlxsw_sp_vr_fib(vr, proto);
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr4 = be32_to_cpu(addr->addr4);
+ addrp = &addr4;
+ addr_len = 4;
+ addr_prefix_len = 32;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ addrp = &addr->addr6;
+ addr_len = 16;
+ addr_prefix_len = 128;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
+ addr_prefix_len);
+ if (!fib_node || fib_node->fib_entry->type != type)
+ return NULL;
+
+ return fib_node->fib_entry;
+}
+
+/* Given an IPIP entry, find the corresponding decap route. */
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ static struct mlxsw_sp_fib_node *fib_node;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ unsigned char saddr_prefix_len;
+ union mlxsw_sp_l3addr saddr;
+ struct mlxsw_sp_fib *ul_fib;
+ struct mlxsw_sp_vr *ul_vr;
+ const void *saddrp;
+ size_t saddr_len;
+ u32 ul_tb_id;
+ u32 saddr4;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
+ ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
+ if (!ul_vr)
+ return NULL;
+
+ ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
+ saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
+ ipip_entry->ol_dev);
+
+ switch (ipip_ops->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ saddr4 = be32_to_cpu(saddr.addr4);
+ saddrp = &saddr4;
+ saddr_len = 4;
+ saddr_prefix_len = 32;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ saddrp = &saddr.addr6;
+ saddr_len = 16;
+ saddr_prefix_len = 128;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
+ saddr_prefix_len);
+ if (!fib_node ||
+ fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
+ return NULL;
+
+ return fib_node->fib_entry;
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
+ if (IS_ERR(ipip_entry))
+ return ipip_entry;
+
+ list_add_tail(&ipip_entry->ipip_list_node,
+ &mlxsw_sp->router->ipip_list);
+
+ return ipip_entry;
+}
+
+static void
+mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ list_del(&ipip_entry->ipip_list_node);
+ mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
+}
+
+static bool
+mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ul_dev,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr ul_dip,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
+ enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
+
+ if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
+ return false;
+
+ return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
+ ul_tb_id, ipip_entry);
+}
+
+/* Given decap parameters, find the corresponding IPIP entry. */
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr ul_dip)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ struct net_device *ul_dev;
+
+ rcu_read_lock();
+
+ ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
+ if (!ul_dev)
+ goto out_unlock;
+
+ list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node)
+ if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
+ ul_proto, ul_dip,
+ ipip_entry))
+ goto out_unlock;
+
+ rcu_read_unlock();
+
+ return NULL;
+
+out_unlock:
+ rcu_read_unlock();
+ return ipip_entry;
+}
+
+static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev,
+ enum mlxsw_sp_ipip_type *p_type)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ enum mlxsw_sp_ipip_type ipipt;
+
+ for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
+ ipip_ops = router->ipip_ops_arr[ipipt];
+ if (dev->type == ipip_ops->dev_type) {
+ if (p_type)
+ *p_type = ipipt;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node)
+ if (ipip_entry->ol_dev == ol_dev)
+ return ipip_entry;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ul_dev,
+ struct mlxsw_sp_ipip_entry *start)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
+ ipip_list_node);
+ list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+
+ rcu_read_lock();
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
+
+ if (ipip_ul_dev == ul_dev)
+ return ipip_entry;
+ }
+
+ return NULL;
+}
+
+static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+}
+
+static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ops
+ = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ return ops->can_offload(mlxsw_sp, ol_dev);
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+ u32 ul_tb_id;
+
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
+ if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+ if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ NULL)) {
+ ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
+ ol_dev);
+ if (IS_ERR(ipip_entry))
+ return PTR_ERR(ipip_entry);
+ }
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct mlxsw_sp_fib_entry *decap_fib_entry;
+
+ decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
+ if (decap_fib_entry)
+ mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
+ decap_fib_entry);
+}
+
+static int
+mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
+ u16 ul_rif_id, bool enable)
+{
+ struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
+ struct mlxsw_sp_rif *rif = &lb_rif->common;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ struct in6_addr *saddr6;
+ u32 saddr4;
+
+ ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
+ switch (lb_cf.ul_protocol) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr4,
+ lb_cf.okey);
+ break;
+
+ case MLXSW_SP_L3_PROTO_IPV6:
+ saddr6 = &lb_cf.saddr.addr6;
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr6,
+ lb_cf.okey);
+ break;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_rif_ipip_lb *lb_rif;
+ int err = 0;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry) {
+ lb_rif = ipip_entry->ol_lb;
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
+ lb_rif->ul_rif_id, true);
+ if (err)
+ goto out;
+ lb_rif->common.mtu = ol_dev->mtu;
+ }
+
+out:
+ return err;
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ if (ipip_entry->decap_fib_entry)
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
+}
+
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif);
+static int
+mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool keep_encap,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
+ struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
+
+ new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
+ ipip_entry->ipipt,
+ ipip_entry->ol_dev,
+ extack);
+ if (IS_ERR(new_lb_rif))
+ return PTR_ERR(new_lb_rif);
+ ipip_entry->ol_lb = new_lb_rif;
+
+ if (keep_encap)
+ mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
+ &new_lb_rif->common);
+
+ mlxsw_sp_rif_destroy(&old_lb_rif->common);
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+
+/**
+ * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
+ * @mlxsw_sp: mlxsw_sp.
+ * @ipip_entry: IPIP entry.
+ * @recreate_loopback: Recreates the associated loopback RIF.
+ * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
+ * relevant when recreate_loopback is true.
+ * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
+ * is only relevant when recreate_loopback is false.
+ * @extack: extack.
+ *
+ * Return: Non-zero value on failure.
+ */
+int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool recreate_loopback,
+ bool keep_encap,
+ bool update_nexthops,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* RIFs can't be edited, so to update loopback, we need to destroy and
+ * recreate it. That creates a window of opportunity where RALUE and
+ * RATR registers end up referencing a RIF that's already gone. RATRs
+ * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
+ * of RALUE, demote the decap route back.
+ */
+ if (ipip_entry->decap_fib_entry)
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+
+ if (recreate_loopback) {
+ err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
+ keep_encap, extack);
+ if (err)
+ return err;
+ } else if (update_nexthops) {
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp,
+ &ipip_entry->ol_lb->common);
+ }
+
+ if (ipip_entry->ol_dev->flags & IFF_UP)
+ mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry =
+ mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+
+ if (!ipip_entry)
+ return 0;
+
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, false, false, extack);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev,
+ bool *demote_this,
+ struct netlink_ext_ack *extack)
+{
+ u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+
+ /* Moving underlay to a different VRF might cause local address
+ * conflict, and the conflicting tunnels need to be demoted.
+ */
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ ipip_entry)) {
+ *demote_this = true;
+ return 0;
+ }
+
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, false, extack);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev)
+{
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true, NULL);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev)
+{
+ /* A down underlay device causes encapsulated packets to not be
+ * forwarded, but decap still works. So refresh next hops without
+ * touching anything else.
+ */
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true, NULL);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ int err;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (!ipip_entry)
+ /* A change might make a tunnel eligible for offloading, but
+ * that is currently not implemented. What falls to slow path
+ * stays there.
+ */
+ return 0;
+
+ /* A change might make a tunnel not eligible for offloading. */
+ if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
+ ipip_entry->ipipt)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
+ return err;
+}
+
+void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+
+ if (ol_dev->flags & IFF_UP)
+ mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
+ mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
+}
+
+/* The configuration where several tunnels have the same local address in the
+ * same underlay table needs special treatment in the HW. That is currently not
+ * implemented in the driver. This function finds and demotes the first tunnel
+ * with a given source address, except the one passed in the argument
+ * `except'.
+ */
+bool
+mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ const struct mlxsw_sp_ipip_entry *except)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
+
+ list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ if (ipip_entry != except &&
+ mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
+ ul_tb_id, ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ul_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
+
+ list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+
+ rcu_read_lock();
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
+ if (ipip_ul_dev == ul_dev)
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ }
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct netdev_notifier_changeupper_info *chup;
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ break;
+ case NETDEV_UNREGISTER:
+ mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
+ break;
+ case NETDEV_UP:
+ mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
+ break;
+ case NETDEV_DOWN:
+ mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
+ break;
+ case NETDEV_CHANGEUPPER:
+ chup = container_of(info, typeof(*chup), info);
+ extack = info->extack;
+ if (netif_is_l3_master(chup->upper_dev))
+ err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
+ ol_dev,
+ extack);
+ break;
+ case NETDEV_CHANGE:
+ extack = info->extack;
+ err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
+ ol_dev, extack);
+ break;
+ case NETDEV_CHANGEMTU:
+ err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
+ break;
+ }
+ return err;
+}
+
+static int
+__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev,
+ bool *demote_this,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct netdev_notifier_changeupper_info *chup;
+ struct netlink_ext_ack *extack;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ chup = container_of(info, typeof(*chup), info);
+ extack = info->extack;
+ if (netif_is_l3_master(chup->upper_dev))
+ return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
+ ipip_entry,
+ ul_dev,
+ demote_this,
+ extack);
+ break;
+
+ case NETDEV_UP:
+ return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
+ ul_dev);
+ case NETDEV_DOWN:
+ return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
+ ipip_entry,
+ ul_dev);
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ul_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ int err;
+
+ while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
+ ul_dev,
+ ipip_entry))) {
+ struct mlxsw_sp_ipip_entry *prev;
+ bool demote_this = false;
+
+ err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
+ ul_dev, &demote_this,
+ event, info);
+ if (err) {
+ mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
+ ul_dev);
+ return err;
+ }
+
+ if (demote_this) {
+ if (list_is_first(&ipip_entry->ipip_list_node,
+ &mlxsw_sp->router->ipip_list))
+ prev = NULL;
+ else
+ /* This can't be cached from previous iteration,
+ * because that entry could be gone now.
+ */
+ prev = list_prev_entry(ipip_entry,
+ ipip_list_node);
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ ipip_entry = prev;
+ }
+ }
+
+ return 0;
+}
+
+int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip,
+ u32 tunnel_index)
+{
+ enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ router->nve_decap_config.ul_tb_id = ul_tb_id;
+ router->nve_decap_config.tunnel_index = tunnel_index;
+ router->nve_decap_config.ul_proto = ul_proto;
+ router->nve_decap_config.ul_sip = *ul_sip;
+ router->nve_decap_config.valid = true;
+
+ /* It is valid to create a tunnel with a local IP and only later
+ * assign this IP address to a local interface
+ */
+ fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
+ ul_proto, ul_sip,
+ type);
+ if (!fib_entry)
+ goto out;
+
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_update;
+
+ goto out;
+
+err_fib_entry_update:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
+void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip)
+{
+ enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (WARN_ON_ONCE(!router->nve_decap_config.valid))
+ goto out;
+
+ router->nve_decap_config.valid = false;
+
+ fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
+ ul_proto, ul_sip,
+ type);
+ if (!fib_entry)
+ goto out;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
+ u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+
+ return router->nve_decap_config.valid &&
+ router->nve_decap_config.ul_tb_id == ul_tb_id &&
+ router->nve_decap_config.ul_proto == ul_proto &&
+ !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
+ sizeof(*ul_sip));
+}
+
+struct mlxsw_sp_neigh_key {
+ struct neighbour *n;
+};
+
+struct mlxsw_sp_neigh_entry {
+ struct list_head rif_list_node;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_neigh_key key;
+ u16 rif;
+ bool connected;
+ unsigned char ha[ETH_ALEN];
+ struct list_head nexthop_list; /* list of nexthops using
+ * this neigh entry
+ */
+ struct list_head nexthop_neighs_list_node;
+ unsigned int counter_index;
+ bool counter_valid;
+};
+
+static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_neigh_key),
+};
+
+struct mlxsw_sp_neigh_entry *
+mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ if (!neigh_entry) {
+ if (list_empty(&rif->neigh_list))
+ return NULL;
+ else
+ return list_first_entry(&rif->neigh_list,
+ typeof(*neigh_entry),
+ rif_list_node);
+ }
+ if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
+ return NULL;
+ return list_next_entry(neigh_entry, rif_list_node);
+}
+
+int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ return neigh_entry->key.n->tbl->family;
+}
+
+unsigned char *
+mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ return neigh_entry->ha;
+}
+
+u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ struct neighbour *n;
+
+ n = neigh_entry->key.n;
+ return ntohl(*((__be32 *) n->primary_key));
+}
+
+struct in6_addr *
+mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ struct neighbour *n;
+
+ n = neigh_entry->key.n;
+ return (struct in6_addr *) &n->primary_key;
+}
+
+int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ u64 *p_counter)
+{
+ if (!neigh_entry->counter_valid)
+ return -EINVAL;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
+ p_counter, NULL);
+}
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
+ u16 rif)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
+ if (!neigh_entry)
+ return NULL;
+
+ neigh_entry->key.n = n;
+ neigh_entry->rif = rif;
+ INIT_LIST_HEAD(&neigh_entry->nexthop_list);
+
+ return neigh_entry;
+}
+
+static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ kfree(neigh_entry);
+}
+
+static int
+mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void
+mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static bool
+mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ struct devlink *devlink;
+ const char *table_name;
+
+ switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
+ case AF_INET:
+ table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
+ break;
+ case AF_INET6:
+ table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ return devlink_dpipe_table_counter_enabled(devlink, table_name);
+}
+
+static void
+mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
+ return;
+
+ if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
+ return;
+
+ neigh_entry->counter_valid = true;
+}
+
+static void
+mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ if (!neigh_entry->counter_valid)
+ return;
+ mlxsw_sp_flow_counter_free(mlxsw_sp,
+ neigh_entry->counter_index);
+ neigh_entry->counter_valid = false;
+}
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
+ if (!rif)
+ return ERR_PTR(-EINVAL);
+
+ neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
+ if (!neigh_entry)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+ if (err)
+ goto err_neigh_entry_insert;
+
+ mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
+ atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
+ list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
+
+ return neigh_entry;
+
+err_neigh_entry_insert:
+ mlxsw_sp_neigh_entry_free(neigh_entry);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ list_del(&neigh_entry->rif_list_node);
+ atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
+ mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_free(neigh_entry);
+}
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
+{
+ struct mlxsw_sp_neigh_key key;
+
+ key.n = n;
+ return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
+ &key, mlxsw_sp_neigh_ht_params);
+}
+
+static void
+mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ interval = min_t(unsigned long,
+ NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
+ NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
+#else
+ interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
+#endif
+ mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
+}
+
+static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int ent_index)
+{
+ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ struct net_device *dev;
+ struct neighbour *n;
+ __be32 dipn;
+ u32 dip;
+ u16 rif;
+
+ mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
+
+ if (WARN_ON_ONCE(rif >= max_rifs))
+ return;
+ if (!mlxsw_sp->router->rifs[rif]) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
+ return;
+ }
+
+ dipn = htonl(dip);
+ dev = mlxsw_sp->router->rifs[rif]->dev;
+ n = neigh_lookup(&arp_tbl, &dipn, dev);
+ if (!n)
+ return;
+
+ netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+ struct net_device *dev;
+ struct neighbour *n;
+ struct in6_addr dip;
+ u16 rif;
+
+ mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
+ (char *) &dip);
+
+ if (!mlxsw_sp->router->rifs[rif]) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
+ return;
+ }
+
+ dev = mlxsw_sp->router->rifs[rif]->dev;
+ n = neigh_lookup(&nd_tbl, &dip, dev);
+ if (!n)
+ return;
+
+ netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+}
+#else
+static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+}
+#endif
+
+static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+ u8 num_entries;
+ int i;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ rec_index);
+ /* Hardware starts counting at 0, so add 1. */
+ num_entries++;
+
+ /* Each record consists of several neighbour entries. */
+ for (i = 0; i < num_entries; i++) {
+ int ent_index;
+
+ ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
+ mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
+ ent_index);
+ }
+
+}
+
+static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+ /* One record contains one entry. */
+ mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
+ rec_index);
+}
+
+static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl, int rec_index)
+{
+ switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
+ case MLXSW_REG_RAUHTD_TYPE_IPV4:
+ mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
+ rec_index);
+ break;
+ case MLXSW_REG_RAUHTD_TYPE_IPV6:
+ mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
+ rec_index);
+ break;
+ }
+}
+
+static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
+{
+ u8 num_rec, last_rec_index, num_entries;
+
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ last_rec_index = num_rec - 1;
+
+ if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
+ return false;
+ if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
+ MLXSW_REG_RAUHTD_TYPE_IPV6)
+ return true;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ last_rec_index);
+ if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
+ return true;
+ return false;
+}
+
+static int
+__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ enum mlxsw_reg_rauhtd_type type)
+{
+ int i, num_rec;
+ int err;
+
+ /* Ensure the RIF we read from the device does not change mid-dump. */
+ mutex_lock(&mlxsw_sp->router->lock);
+ do {
+ mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
+ rauhtd_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
+ break;
+ }
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
+ i);
+ } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return err;
+}
+
+static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
+{
+ enum mlxsw_reg_rauhtd_type type;
+ char *rauhtd_pl;
+ int err;
+
+ if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
+ return 0;
+
+ rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
+ if (!rauhtd_pl)
+ return -ENOMEM;
+
+ type = MLXSW_REG_RAUHTD_TYPE_IPV4;
+ err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
+ if (err)
+ goto out;
+
+ type = MLXSW_REG_RAUHTD_TYPE_IPV6;
+ err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
+out:
+ kfree(rauhtd_pl);
+ return err;
+}
+
+static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
+ nexthop_neighs_list_node)
+ /* If this neigh have nexthops, make the kernel think this neigh
+ * is active regardless of the traffic.
+ */
+ neigh_event_send(neigh_entry->key.n, NULL);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static void
+mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = mlxsw_sp->router->neighs_update.interval;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router *router;
+ int err;
+
+ router = container_of(work, struct mlxsw_sp_router,
+ neighs_update.dw.work);
+ err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
+ if (err)
+ dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+
+ mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
+
+ mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
+}
+
+static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_router *router;
+
+ router = container_of(work, struct mlxsw_sp_router,
+ nexthop_probe_dw.work);
+ /* Iterate over nexthop neighbours, find those who are unresolved and
+ * send arp on them. This solves the chicken-egg problem when
+ * the nexthop wouldn't get offloaded until the neighbor is resolved
+ * but it wouldn't get resolved ever in case traffic is flowing in HW
+ * using different nexthop.
+ */
+ mutex_lock(&router->lock);
+ list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
+ nexthop_neighs_list_node)
+ if (!neigh_entry->connected)
+ neigh_event_send(neigh_entry->key.n, NULL);
+ mutex_unlock(&router->lock);
+
+ mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
+ MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing, bool dead);
+
+static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
+{
+ return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE;
+}
+
+static int
+mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ enum mlxsw_reg_rauht_op op)
+{
+ struct neighbour *n = neigh_entry->key.n;
+ u32 dip = ntohl(*((__be32 *) n->primary_key));
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+
+ mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
+ dip);
+ if (neigh_entry->counter_valid)
+ mlxsw_reg_rauht_pack_counter(rauht_pl,
+ neigh_entry->counter_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
+static int
+mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ enum mlxsw_reg_rauht_op op)
+{
+ struct neighbour *n = neigh_entry->key.n;
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+ const char *dip = n->primary_key;
+
+ mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
+ dip);
+ if (neigh_entry->counter_valid)
+ mlxsw_reg_rauht_pack_counter(rauht_pl,
+ neigh_entry->counter_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
+bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ struct neighbour *n = neigh_entry->key.n;
+
+ /* Packets with a link-local destination address are trapped
+ * after LPM lookup and never reach the neighbour table, so
+ * there is no need to program such neighbours to the device.
+ */
+ if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
+ IPV6_ADDR_LINKLOCAL)
+ return true;
+ return false;
+}
+
+static void
+mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool adding)
+{
+ enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
+ int err;
+
+ if (!adding && !neigh_entry->connected)
+ return;
+ neigh_entry->connected = adding;
+ if (neigh_entry->key.n->tbl->family == AF_INET) {
+ err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
+ op);
+ if (err)
+ return;
+ } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
+ if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
+ return;
+ err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
+ op);
+ if (err)
+ return;
+ } else {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (adding)
+ neigh_entry->key.n->flags |= NTF_OFFLOADED;
+ else
+ neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
+}
+
+void
+mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool adding)
+{
+ if (adding)
+ mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
+ else
+ mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
+}
+
+struct mlxsw_sp_netevent_work {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ struct neighbour *n;
+};
+
+static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct neighbour *n = net_work->n;
+ unsigned char ha[ETH_ALEN];
+ bool entry_connected;
+ u8 nud_state, dead;
+
+ /* If these parameters are changed after we release the lock,
+ * then we are guaranteed to receive another event letting us
+ * know about it.
+ */
+ read_lock_bh(&n->lock);
+ memcpy(ha, n->ha, ETH_ALEN);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ mlxsw_sp_span_respin(mlxsw_sp);
+
+ entry_connected = nud_state & NUD_VALID && !dead;
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (!entry_connected && !neigh_entry)
+ goto out;
+ if (!neigh_entry) {
+ neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
+ if (IS_ERR(neigh_entry))
+ goto out;
+ }
+
+ if (neigh_entry->connected && entry_connected &&
+ !memcmp(neigh_entry->ha, ha, ETH_ALEN))
+ goto out;
+
+ memcpy(neigh_entry->ha, ha, ETH_ALEN);
+ mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
+ dead);
+
+ if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
+ mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ neigh_release(n);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ mlxsw_sp_mp_hash_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ __mlxsw_sp_router_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_schedule_work(struct net *net,
+ struct notifier_block *nb,
+ void (*cb)(struct work_struct *))
+{
+ struct mlxsw_sp_netevent_work *net_work;
+ struct mlxsw_sp_router *router;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&net_work->work, cb);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ return NOTIFY_DONE;
+}
+
+static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp_netevent_work *net_work;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long interval;
+ struct neigh_parms *p;
+ struct neighbour *n;
+
+ switch (event) {
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We don't care about changes in the default table. */
+ if (!p->dev || (p->tbl->family != AF_INET &&
+ p->tbl->family != AF_INET6))
+ return NOTIFY_DONE;
+
+ /* We are in atomic context and can't take RTNL mutex,
+ * so use RCU variant to walk the device chain.
+ */
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
+ mlxsw_sp->router->neighs_update.interval = interval;
+
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+
+ if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work) {
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ return NOTIFY_BAD;
+ }
+
+ INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
+ net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ net_work->n = n;
+
+ /* Take a reference to ensure the neighbour won't be
+ * destructed until we drop the reference in delayed
+ * work.
+ */
+ neigh_clone(n);
+ mlxsw_core_schedule_work(&net_work->work);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ break;
+ case NETEVENT_IPV4_MPATH_HASH_UPDATE:
+ case NETEVENT_IPV6_MPATH_HASH_UPDATE:
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_mp_hash_event_work);
+
+ case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_update_priority_work);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
+ &mlxsw_sp_neigh_ht_params);
+ if (err)
+ return err;
+
+ /* Initialize the polling interval according to the default
+ * table.
+ */
+ mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
+
+ /* Create the delayed works for the activity_update */
+ INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
+ mlxsw_sp_router_neighs_update_work);
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
+ mlxsw_sp_router_probe_unresolved_nexthops);
+ atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
+ return 0;
+}
+
+static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
+ rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
+}
+
+static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
+
+ list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
+ rif_list_node) {
+ mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
+ mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+ }
+}
+
+enum mlxsw_sp_nexthop_type {
+ MLXSW_SP_NEXTHOP_TYPE_ETH,
+ MLXSW_SP_NEXTHOP_TYPE_IPIP,
+};
+
+enum mlxsw_sp_nexthop_action {
+ /* Nexthop forwards packets to an egress RIF */
+ MLXSW_SP_NEXTHOP_ACTION_FORWARD,
+ /* Nexthop discards packets */
+ MLXSW_SP_NEXTHOP_ACTION_DISCARD,
+ /* Nexthop traps packets */
+ MLXSW_SP_NEXTHOP_ACTION_TRAP,
+};
+
+struct mlxsw_sp_nexthop_key {
+ struct fib_nh *fib_nh;
+};
+
+struct mlxsw_sp_nexthop {
+ struct list_head neigh_list_node; /* member of neigh entry list */
+ struct list_head rif_list_node;
+ struct list_head router_list_node;
+ struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
+ * this nexthop belongs to
+ */
+ struct rhash_head ht_node;
+ struct neigh_table *neigh_tbl;
+ struct mlxsw_sp_nexthop_key key;
+ unsigned char gw_addr[sizeof(struct in6_addr)];
+ int ifindex;
+ int nh_weight;
+ int norm_nh_weight;
+ int num_adj_entries;
+ struct mlxsw_sp_rif *rif;
+ u8 should_offload:1, /* set indicates this nexthop should be written
+ * to the adjacency table.
+ */
+ offloaded:1, /* set indicates this nexthop was written to the
+ * adjacency table.
+ */
+ update:1; /* set indicates this nexthop should be updated in the
+ * adjacency table (f.e., its MAC changed).
+ */
+ enum mlxsw_sp_nexthop_action action;
+ enum mlxsw_sp_nexthop_type type;
+ union {
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ };
+ unsigned int counter_index;
+ bool counter_valid;
+};
+
+enum mlxsw_sp_nexthop_group_type {
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
+};
+
+struct mlxsw_sp_nexthop_group_info {
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ u32 adj_index;
+ u16 ecmp_size;
+ u16 count;
+ int sum_norm_weight;
+ u8 adj_index_valid:1,
+ gateway:1, /* routes using the group use a gateway */
+ is_resilient:1;
+ struct list_head list; /* member in nh_res_grp_list */
+ struct mlxsw_sp_nexthop nexthops[0];
+#define nh_rif nexthops[0].rif
+};
+
+struct mlxsw_sp_nexthop_group_vr_key {
+ u16 vr_id;
+ enum mlxsw_sp_l3proto proto;
+};
+
+struct mlxsw_sp_nexthop_group_vr_entry {
+ struct list_head list; /* member in vr_list */
+ struct rhash_head ht_node; /* member in vr_ht */
+ refcount_t ref_count;
+ struct mlxsw_sp_nexthop_group_vr_key key;
+};
+
+struct mlxsw_sp_nexthop_group {
+ struct rhash_head ht_node;
+ struct list_head fib_list; /* list of fib entries that use this group */
+ union {
+ struct {
+ struct fib_info *fi;
+ } ipv4;
+ struct {
+ u32 id;
+ } obj;
+ };
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct list_head vr_list;
+ struct rhashtable vr_ht;
+ enum mlxsw_sp_nexthop_group_type type;
+ bool can_destroy;
+};
+
+void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
+ return;
+
+ if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
+ return;
+
+ nh->counter_valid = true;
+}
+
+void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->counter_valid)
+ return;
+ mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
+ nh->counter_valid = false;
+}
+
+int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh, u64 *p_counter)
+{
+ if (!nh->counter_valid)
+ return -EINVAL;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
+ p_counter, NULL);
+}
+
+struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh) {
+ if (list_empty(&router->nexthop_list))
+ return NULL;
+ else
+ return list_first_entry(&router->nexthop_list,
+ typeof(*nh), router_list_node);
+ }
+ if (list_is_last(&nh->router_list_node, &router->nexthop_list))
+ return NULL;
+ return list_next_entry(nh, router_list_node);
+}
+
+bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
+{
+ return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
+}
+
+unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
+ !mlxsw_sp_nexthop_is_forward(nh))
+ return NULL;
+ return nh->neigh_entry->ha;
+}
+
+int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
+ u32 *p_adj_size, u32 *p_adj_hash_index)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
+ u32 adj_hash_index = 0;
+ int i;
+
+ if (!nh->offloaded || !nhgi->adj_index_valid)
+ return -EINVAL;
+
+ *p_adj_index = nhgi->adj_index;
+ *p_adj_size = nhgi->ecmp_size;
+
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
+
+ if (nh_iter == nh)
+ break;
+ if (nh_iter->offloaded)
+ adj_hash_index += nh_iter->num_adj_entries;
+ }
+
+ *p_adj_hash_index = adj_hash_index;
+ return 0;
+}
+
+struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
+{
+ return nh->rif;
+}
+
+bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
+ int i;
+
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
+
+ if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
+ return true;
+ }
+ return false;
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
+ .automatic_shrinking = true,
+};
+
+static struct mlxsw_sp_nexthop_group_vr_entry *
+mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_key key;
+
+ memset(&key, 0, sizeof(key));
+ key.vr_id = fib->vr->id;
+ key.proto = fib->proto;
+ return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+}
+
+static int
+mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+ int err;
+
+ vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
+ if (!vr_entry)
+ return -ENOMEM;
+
+ vr_entry->key.vr_id = fib->vr->id;
+ vr_entry->key.proto = fib->proto;
+ refcount_set(&vr_entry->ref_count, 1);
+
+ err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_hashtable_insert;
+
+ list_add(&vr_entry->list, &nh_grp->vr_list);
+
+ return 0;
+
+err_hashtable_insert:
+ kfree(vr_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
+{
+ list_del(&vr_entry->list);
+ rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+ kfree(vr_entry);
+}
+
+static int
+mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+
+ vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
+ if (vr_entry) {
+ refcount_inc(&vr_entry->ref_count);
+ return 0;
+ }
+
+ return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
+}
+
+static void
+mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+
+ vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
+ if (WARN_ON_ONCE(!vr_entry))
+ return;
+
+ if (!refcount_dec_and_test(&vr_entry->ref_count))
+ return;
+
+ mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
+}
+
+struct mlxsw_sp_nexthop_group_cmp_arg {
+ enum mlxsw_sp_nexthop_group_type type;
+ union {
+ struct fib_info *fi;
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ u32 id;
+ };
+};
+
+static bool
+mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct in6_addr *gw, int ifindex,
+ int weight)
+{
+ int i;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ const struct mlxsw_sp_nexthop *nh;
+
+ nh = &nh_grp->nhgi->nexthops[i];
+ if (nh->ifindex == ifindex && nh->nh_weight == weight &&
+ ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ if (nh_grp->nhgi->count != fib6_entry->nrt6)
+ return false;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct in6_addr *gw;
+ int ifindex, weight;
+
+ ifindex = fib6_nh->fib_nh_dev->ifindex;
+ weight = fib6_nh->fib_nh_weight;
+ gw = &fib6_nh->fib_nh_gw6;
+ if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+ weight))
+ return false;
+ }
+
+ return true;
+}
+
+static int
+mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
+ const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
+
+ if (nh_grp->type != cmp_arg->type)
+ return 1;
+
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ return cmp_arg->fi != nh_grp->ipv4.fi;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
+ cmp_arg->fib6_entry);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return cmp_arg->id != nh_grp->obj.id;
+ default:
+ WARN_ON(1);
+ return 1;
+ }
+}
+
+static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
+{
+ const struct mlxsw_sp_nexthop_group *nh_grp = data;
+ const struct mlxsw_sp_nexthop *nh;
+ struct fib_info *fi;
+ unsigned int val;
+ int i;
+
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ fi = nh_grp->ipv4.fi;
+ return jhash(&fi, sizeof(fi), seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ val = nh_grp->nhgi->count;
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ nh = &nh_grp->nhgi->nexthops[i];
+ val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
+ val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
+ }
+ return jhash(&val, sizeof(val), seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static u32
+mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
+{
+ unsigned int val = fib6_entry->nrt6;
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct net_device *dev = fib6_nh->fib_nh_dev;
+ struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
+
+ val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
+ val ^= jhash(gw, sizeof(*gw), seed);
+ }
+
+ return jhash(&val, sizeof(val), seed);
+}
+
+static u32
+mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
+{
+ const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
+
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
+ .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
+ .hashfn = mlxsw_sp_nexthop_group_hash,
+ .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
+ .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
+};
+
+static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
+ return 0;
+
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
+ return;
+
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
+
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
+ cmp_arg.fi = fi;
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &cmp_arg,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
+
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
+ cmp_arg.fib6_entry = fib6_entry;
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &cmp_arg,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
+ .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_nexthop_key),
+};
+
+static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
+ &nh->ht_node, mlxsw_sp_nexthop_ht_params);
+}
+
+static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
+ mlxsw_sp_nexthop_ht_params);
+}
+
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_key key)
+{
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
+ mlxsw_sp_nexthop_ht_params);
+}
+
+static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto proto,
+ u16 vr_id,
+ u32 adj_index, u16 ecmp_size,
+ u32 new_adj_index,
+ u16 new_ecmp_size)
+{
+ char raleu_pl[MLXSW_REG_RALEU_LEN];
+
+ mlxsw_reg_raleu_pack(raleu_pl,
+ (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
+ adj_index, ecmp_size, new_adj_index,
+ new_ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
+}
+
+static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ u32 old_adj_index, u16 old_ecmp_size)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+ int err;
+
+ list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
+ vr_entry->key.proto,
+ vr_entry->key.vr_id,
+ old_adj_index,
+ old_ecmp_size,
+ nhgi->adj_index,
+ nhgi->ecmp_size);
+ if (err)
+ goto err_mass_update_vr;
+ }
+ return 0;
+
+err_mass_update_vr:
+ list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
+ mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
+ vr_entry->key.vr_id,
+ nhgi->adj_index,
+ nhgi->ecmp_size,
+ old_adj_index, old_ecmp_size);
+ return err;
+}
+
+static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh,
+ bool force, char *ratr_pl)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ enum mlxsw_reg_ratr_op op;
+ u16 rif_index;
+
+ rif_index = nh->rif ? nh->rif->rif_index :
+ mlxsw_sp->router->lb_rif_index;
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
+ adj_index, rif_index);
+ switch (nh->action) {
+ case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ break;
+ case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
+ mlxsw_reg_ratr_trap_action_set(ratr_pl,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
+ break;
+ case MLXSW_SP_NEXTHOP_ACTION_TRAP:
+ mlxsw_reg_ratr_trap_action_set(ratr_pl,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ if (nh->counter_valid)
+ mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
+ else
+ mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
+{
+ int i;
+
+ for (i = 0; i < nh->num_adj_entries; i++) {
+ int err;
+
+ err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
+ nh, force, ratr_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh,
+ bool force, char *ratr_pl)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
+ return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
+ force, ratr_pl);
+}
+
+static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
+{
+ int i;
+
+ for (i = 0; i < nh->num_adj_entries; i++) {
+ int err;
+
+ err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
+ nh, force, ratr_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl)
+{
+ /* When action is discard or trap, the nexthop must be
+ * programmed as an Ethernet nexthop.
+ */
+ if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
+ nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
+ nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
+ return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
+ force, ratr_pl);
+ else
+ return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
+ force, ratr_pl);
+}
+
+static int
+mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ bool reallocate)
+{
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ u32 adj_index = nhgi->adj_index; /* base */
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
+
+ if (!nh->should_offload) {
+ nh->offloaded = 0;
+ continue;
+ }
+
+ if (nh->update || reallocate) {
+ int err = 0;
+
+ err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
+ true, ratr_pl);
+ if (err)
+ return err;
+ nh->update = 0;
+ nh->offloaded = 1;
+ }
+ adj_index += nh->num_adj_entries;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_adj_grp_size_range {
+ u16 start; /* Inclusive */
+ u16 end; /* Inclusive */
+};
+
+/* Ordered by range start value */
+static const struct mlxsw_sp_adj_grp_size_range
+mlxsw_sp1_adj_grp_size_ranges[] = {
+ { .start = 1, .end = 64 },
+ { .start = 512, .end = 512 },
+ { .start = 1024, .end = 1024 },
+ { .start = 2048, .end = 2048 },
+ { .start = 4096, .end = 4096 },
+};
+
+/* Ordered by range start value */
+static const struct mlxsw_sp_adj_grp_size_range
+mlxsw_sp2_adj_grp_size_ranges[] = {
+ { .start = 1, .end = 128 },
+ { .start = 256, .end = 256 },
+ { .start = 512, .end = 512 },
+ { .start = 1024, .end = 1024 },
+ { .start = 2048, .end = 2048 },
+ { .start = 4096, .end = 4096 },
+};
+
+static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (*p_adj_grp_size >= size_range->start &&
+ *p_adj_grp_size <= size_range->end)
+ return;
+
+ if (*p_adj_grp_size <= size_range->end) {
+ *p_adj_grp_size = size_range->end;
+ return;
+ }
+ }
+}
+
+static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size,
+ unsigned int alloc_size)
+{
+ int i;
+
+ for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (alloc_size >= size_range->end) {
+ *p_adj_grp_size = size_range->end;
+ return;
+ }
+ }
+}
+
+static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size)
+{
+ unsigned int alloc_size;
+ int err;
+
+ /* Round up the requested group size to the next size supported
+ * by the device and make sure the request can be satisfied.
+ */
+ mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ *p_adj_grp_size, &alloc_size);
+ if (err)
+ return err;
+ /* It is possible the allocation results in more allocated
+ * entries than requested. Try to use as much of them as
+ * possible.
+ */
+ mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
+{
+ int i, g = 0, sum_norm_weight = 0;
+ struct mlxsw_sp_nexthop *nh;
+
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
+
+ if (!nh->should_offload)
+ continue;
+ if (g > 0)
+ g = gcd(nh->nh_weight, g);
+ else
+ g = nh->nh_weight;
+ }
+
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
+
+ if (!nh->should_offload)
+ continue;
+ nh->norm_nh_weight = nh->nh_weight / g;
+ sum_norm_weight += nh->norm_nh_weight;
+ }
+
+ nhgi->sum_norm_weight = sum_norm_weight;
+}
+
+static void
+mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
+{
+ int i, weight = 0, lower_bound = 0;
+ int total = nhgi->sum_norm_weight;
+ u16 ecmp_size = nhgi->ecmp_size;
+
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+ int upper_bound;
+
+ if (!nh->should_offload)
+ continue;
+ weight += nh->norm_nh_weight;
+ upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
+ nh->num_adj_entries = upper_bound - lower_bound;
+ lower_bound = upper_bound;
+ }
+}
+
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
+
+static void
+mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
+
+ if (nh->offloaded)
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct mlxsw_sp_nexthop *nh;
+
+ nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
+ if (nh && nh->offloaded)
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+
+ /* Unfortunately, in IPv6 the route and the nexthop are described by
+ * the same struct, so we need to iterate over all the routes using the
+ * nexthop group and set / clear the offload indication for them.
+ */
+ list_for_each_entry(fib6_entry, &nh_grp->fib_list,
+ common.nexthop_group_node)
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+}
+
+static void
+mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop *nh,
+ u16 bucket_index)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ bool offload = false, trap = false;
+
+ if (nh->offloaded) {
+ if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
+ trap = true;
+ else
+ offload = true;
+ }
+ nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ bucket_index, offload, trap);
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i;
+
+ /* Do not update the flags if the nexthop group is being destroyed
+ * since:
+ * 1. The nexthop objects is being deleted, in which case the flags are
+ * irrelevant.
+ * 2. The nexthop group was replaced by a newer group, in which case
+ * the flags of the nexthop object were already updated based on the
+ * new group.
+ */
+ if (nh_grp->can_destroy)
+ return;
+
+ nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ nh_grp->nhgi->adj_index_valid, false);
+
+ /* Update flags of individual nexthop buckets in case of a resilient
+ * nexthop group.
+ */
+ if (!nh_grp->nhgi->is_resilient)
+ return;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
+ }
+}
+
+static void
+mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ }
+}
+
+static int
+mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ u16 ecmp_size, old_ecmp_size;
+ struct mlxsw_sp_nexthop *nh;
+ bool offload_change = false;
+ u32 adj_index;
+ bool old_adj_index_valid;
+ u32 old_adj_index;
+ int i, err2, err;
+
+ if (!nhgi->gateway)
+ return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
+
+ if (nh->should_offload != nh->offloaded) {
+ offload_change = true;
+ if (nh->should_offload)
+ nh->update = 1;
+ }
+ }
+ if (!offload_change) {
+ /* Nothing was added or removed, so no need to reallocate. Just
+ * update MAC on existing adjacency indexes.
+ */
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+ /* Flags of individual nexthop buckets might need to be
+ * updated.
+ */
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
+ return 0;
+ }
+ mlxsw_sp_nexthop_group_normalize(nhgi);
+ if (!nhgi->sum_norm_weight) {
+ /* No neigh of this group is connected so we just set
+ * the trap and let everthing flow through kernel.
+ */
+ err = 0;
+ goto set_trap;
+ }
+
+ ecmp_size = nhgi->sum_norm_weight;
+ err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
+ if (err)
+ /* No valid allocation size available. */
+ goto set_trap;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ ecmp_size, &adj_index);
+ if (err) {
+ /* We ran out of KVD linear space, just set the
+ * trap and let everything flow through kernel.
+ */
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
+ goto set_trap;
+ }
+ old_adj_index_valid = nhgi->adj_index_valid;
+ old_adj_index = nhgi->adj_index;
+ old_ecmp_size = nhgi->ecmp_size;
+ nhgi->adj_index_valid = 1;
+ nhgi->adj_index = adj_index;
+ nhgi->ecmp_size = ecmp_size;
+ mlxsw_sp_nexthop_group_rebalance(nhgi);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
+
+ if (!old_adj_index_valid) {
+ /* The trap was set for fib entries, so we have to call
+ * fib entry update to unset it and use adjacency index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
+ goto set_trap;
+ }
+ return 0;
+ }
+
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
+ old_adj_index, old_ecmp_size);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ old_ecmp_size, old_adj_index);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
+ goto set_trap;
+ }
+
+ return 0;
+
+set_trap:
+ old_adj_index_valid = nhgi->adj_index_valid;
+ nhgi->adj_index_valid = 0;
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
+ nh->offloaded = 0;
+ }
+ err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err2)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
+ if (old_adj_index_valid)
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nhgi->ecmp_size, nhgi->adj_index);
+ return err;
+}
+
+static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
+ bool removing)
+{
+ if (!removing) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
+ nh->should_offload = 1;
+ } else if (nh->nhgi->is_resilient) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
+ nh->should_offload = 1;
+ } else {
+ nh->should_offload = 0;
+ }
+ nh->update = 1;
+}
+
+static int
+mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ struct neighbour *n, *old_n = neigh_entry->key.n;
+ struct mlxsw_sp_nexthop *nh;
+ bool entry_connected;
+ u8 nud_state, dead;
+ int err;
+
+ nh = list_first_entry(&neigh_entry->nexthop_list,
+ struct mlxsw_sp_nexthop, neigh_list_node);
+
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ if (!n) {
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ neigh_event_send(n, NULL);
+ }
+
+ mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+ neigh_entry->key.n = n;
+ err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+ if (err)
+ goto err_neigh_entry_insert;
+
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+ entry_connected = nud_state & NUD_VALID && !dead;
+
+ list_for_each_entry(nh, &neigh_entry->nexthop_list,
+ neigh_list_node) {
+ neigh_release(old_n);
+ neigh_clone(n);
+ __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+
+ neigh_release(n);
+
+ return 0;
+
+err_neigh_entry_insert:
+ neigh_entry->key.n = old_n;
+ mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+ neigh_release(n);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing, bool dead)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ if (list_empty(&neigh_entry->nexthop_list))
+ return;
+
+ if (dead) {
+ int err;
+
+ err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
+ neigh_entry);
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
+ return;
+ }
+
+ list_for_each_entry(nh, &neigh_entry->nexthop_list,
+ neigh_list_node) {
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+}
+
+static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_rif *rif)
+{
+ if (nh->rif)
+ return;
+
+ nh->rif = rif;
+ list_add(&nh->rif_list_node, &rif->nexthop_list);
+}
+
+static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->rif)
+ return;
+
+ list_del(&nh->rif_list_node);
+ nh->rif = NULL;
+}
+
+static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct neighbour *n;
+ u8 nud_state, dead;
+ int err;
+
+ if (!nh->nhgi->gateway || nh->neigh_entry)
+ return 0;
+
+ /* Take a reference of neigh here ensuring that neigh would
+ * not be destructed before the nexthop entry is finished.
+ * The reference is taken either in neigh_lookup() or
+ * in neigh_create() in case n is not found.
+ */
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ if (!n) {
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ neigh_event_send(n, NULL);
+ }
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (!neigh_entry) {
+ neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
+ if (IS_ERR(neigh_entry)) {
+ err = -EINVAL;
+ goto err_neigh_entry_create;
+ }
+ }
+
+ /* If that is the first nexthop connected to that neigh, add to
+ * nexthop_neighs_list
+ */
+ if (list_empty(&neigh_entry->nexthop_list))
+ list_add_tail(&neigh_entry->nexthop_neighs_list_node,
+ &mlxsw_sp->router->nexthop_neighs_list);
+
+ nh->neigh_entry = neigh_entry;
+ list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+ __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
+
+ return 0;
+
+err_neigh_entry_create:
+ neigh_release(n);
+ return err;
+}
+
+static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ struct neighbour *n;
+
+ if (!neigh_entry)
+ return;
+ n = neigh_entry->key.n;
+
+ __mlxsw_sp_nexthop_neigh_update(nh, true);
+ list_del(&nh->neigh_list_node);
+ nh->neigh_entry = NULL;
+
+ /* If that is the last nexthop connected to that neigh, remove from
+ * nexthop_neighs_list
+ */
+ if (list_empty(&neigh_entry->nexthop_list))
+ list_del(&neigh_entry->nexthop_neighs_list_node);
+
+ if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
+ mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+
+ neigh_release(n);
+}
+
+static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
+{
+ struct net_device *ul_dev;
+ bool is_up;
+
+ rcu_read_lock();
+ ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ rcu_read_unlock();
+
+ return is_up;
+}
+
+static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ bool removing;
+
+ if (!nh->nhgi->gateway || nh->ipip_entry)
+ return;
+
+ nh->ipip_entry = ipip_entry;
+ removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
+}
+
+static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
+
+ if (!ipip_entry)
+ return;
+
+ __mlxsw_sp_nexthop_neigh_update(nh, true);
+ nh->ipip_entry = NULL;
+}
+
+static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct fib_nh *fib_nh,
+ enum mlxsw_sp_ipip_type *p_ipipt)
+{
+ struct net_device *dev = fib_nh->fib_nh_dev;
+
+ return dev &&
+ fib_nh->nh_parent->fib_type == RTN_UNICAST &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
+}
+
+static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ const struct net_device *dev)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
+ }
+
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ mlxsw_sp_nexthop_rif_init(nh, rif);
+ err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+ if (err)
+ goto err_neigh_init;
+
+ return 0;
+
+err_neigh_init:
+ mlxsw_sp_nexthop_rif_fini(nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
+ break;
+ }
+}
+
+static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct fib_nh *fib_nh)
+{
+ struct net_device *dev = fib_nh->fib_nh_dev;
+ struct in_device *in_dev;
+ int err;
+
+ nh->nhgi = nh_grp->nhgi;
+ nh->key.fib_nh = fib_nh;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ nh->nh_weight = fib_nh->fib_nh_weight;
+#else
+ nh->nh_weight = 1;
+#endif
+ memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
+ nh->neigh_tbl = &arp_tbl;
+ err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
+ if (err)
+ return err;
+
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+
+ if (!dev)
+ return 0;
+ nh->ifindex = dev->ifindex;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_nexthop_neigh_init;
+
+ return 0;
+
+err_nexthop_neigh_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+}
+
+static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event, struct fib_nh *fib_nh)
+{
+ struct mlxsw_sp_nexthop_key key;
+ struct mlxsw_sp_nexthop *nh;
+
+ key.fib_nh = fib_nh;
+ nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
+ if (!nh)
+ return;
+
+ switch (event) {
+ case FIB_EVENT_NH_ADD:
+ mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
+ break;
+ case FIB_EVENT_NH_DEL:
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ break;
+ }
+
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+}
+
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+ bool removing;
+
+ list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ removing = false;
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
+ break;
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+}
+
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
+ list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
+ nh->rif = new_rif;
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+}
+
+static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_nexthop *nh, *tmp;
+
+ list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+}
+
+static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
+{
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_trap_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ mlxsw_sp->router->adj_trap_index,
+ mlxsw_sp->router->lb_rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+ return err;
+}
+
+static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+}
+
+static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
+ return 0;
+
+ err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ refcount_set(&mlxsw_sp->router->num_groups, 1);
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
+{
+ if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
+ return;
+
+ mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
+}
+
+static void
+mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop_group *nh_grp,
+ unsigned long *activity)
+{
+ char *ratrad_pl;
+ int i, err;
+
+ ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
+ if (!ratrad_pl)
+ return;
+
+ mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
+ nh_grp->nhgi->count);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
+ if (err)
+ goto out;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
+ continue;
+ bitmap_set(activity, i, 1);
+ }
+
+out:
+ kfree(ratrad_pl);
+}
+
+#define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
+
+static void
+mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned long *activity;
+
+ activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
+ if (!activity)
+ return;
+
+ mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
+ nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ nh_grp->nhgi->count, activity);
+
+ bitmap_free(activity);
+}
+
+static void
+mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_router *router;
+ bool reschedule = false;
+
+ router = container_of(work, struct mlxsw_sp_router,
+ nh_grp_activity_dw.work);
+
+ mutex_lock(&router->lock);
+
+ list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
+ mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
+ reschedule = true;
+ }
+
+ mutex_unlock(&router->lock);
+
+ if (!reschedule)
+ return;
+ mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
+}
+
+static int
+mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_single_info *nh,
+ struct netlink_ext_ack *extack)
+{
+ int err = -EINVAL;
+
+ if (nh->is_fdb)
+ NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
+ else if (nh->has_encap)
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
+ else
+ err = 0;
+
+ return err;
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_single_info *nh,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
+ if (err)
+ return err;
+
+ /* Device only nexthops with an IPIP device are programmed as
+ * encapsulating adjacency entries.
+ */
+ if (!nh->gw_family && !nh->is_reject &&
+ !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_grp_info *nh_grp,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (nh_grp->is_fdb) {
+ NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nh_grp->num_nh; i++) {
+ const struct nh_notifier_single_info *nh;
+ int err;
+
+ nh = &nh_grp->nh_entries[i].nh;
+ err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_res_table_info *nh_res_table,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int alloc_size;
+ bool valid_size = false;
+ int err, i;
+
+ if (nh_res_table->num_nh_buckets < 32) {
+ NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
+ const struct mlxsw_sp_adj_grp_size_range *size_range;
+
+ size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
+
+ if (nh_res_table->num_nh_buckets >= size_range->start &&
+ nh_res_table->num_nh_buckets <= size_range->end) {
+ valid_size = true;
+ break;
+ }
+ }
+
+ if (!valid_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_res_table->num_nh_buckets,
+ &alloc_size);
+ if (err || nh_res_table->num_nh_buckets != alloc_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_res_table_info *nh_res_table,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+ u16 i;
+
+ err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
+ nh_res_table,
+ extack);
+ if (err)
+ return err;
+
+ for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
+ const struct nh_notifier_single_info *nh;
+ int err;
+
+ nh = &nh_res_table->nhs[i];
+ err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event,
+ struct nh_notifier_info *info)
+{
+ struct nh_notifier_single_info *nh;
+
+ if (event != NEXTHOP_EVENT_REPLACE &&
+ event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
+ event != NEXTHOP_EVENT_BUCKET_REPLACE)
+ return 0;
+
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
+ info->extack);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
+ info->nh_grp,
+ info->extack);
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
+ info->nh_res_table,
+ info->extack);
+ case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
+ nh = &info->nh_res_bucket->new_nh;
+ return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
+ info->extack);
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
+ return -EOPNOTSUPP;
+ }
+}
+
+static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_info *info)
+{
+ const struct net_device *dev;
+
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ dev = info->nh->dev;
+ return info->nh->gw_family || info->nh->is_reject ||
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ /* Already validated earlier. */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
+
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
+ nh->should_offload = 1;
+ /* While nexthops that discard packets do not forward packets
+ * via an egress RIF, they still need to be programmed using a
+ * valid RIF, so use the loopback RIF created during init.
+ */
+ nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
+}
+
+static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ nh->rif = NULL;
+ nh->should_offload = 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct nh_notifier_single_info *nh_obj, int weight)
+{
+ struct net_device *dev = nh_obj->dev;
+ int err;
+
+ nh->nhgi = nh_grp->nhgi;
+ nh->nh_weight = weight;
+
+ switch (nh_obj->gw_family) {
+ case AF_INET:
+ memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
+ nh->neigh_tbl = &arp_tbl;
+ break;
+ case AF_INET6:
+ memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
+ break;
+ }
+
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+ nh->ifindex = dev->ifindex;
+
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_type_init;
+
+ if (nh_obj->is_reject)
+ mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
+
+ /* In a resilient nexthop group, all the nexthops must be written to
+ * the adjacency table. Even if they do not have a valid neighbour or
+ * RIF.
+ */
+ if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
+ nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
+ nh->should_offload = 1;
+ }
+
+ return 0;
+
+err_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
+ mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ nh->should_offload = 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ bool is_resilient = false;
+ unsigned int nhs;
+ int err, i;
+
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ nhs = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nhs = info->nh_grp->num_nh;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ nhs = info->nh_res_table->num_nh_buckets;
+ is_resilient = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
+ nhgi->is_resilient = is_resilient;
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct nh_notifier_single_info *nh_obj;
+ int weight;
+
+ nh = &nhgi->nexthops[i];
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ nh_obj = info->nh;
+ weight = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nh_obj = &info->nh_grp->nh_entries[i].nh;
+ weight = info->nh_grp->nh_entries[i].weight;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
+ nh_obj = &info->nh_res_table->nhs[i];
+ weight = 1;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_nexthop_obj_init;
+ }
+ err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
+ weight);
+ if (err)
+ goto err_nexthop_obj_init;
+ }
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
+ goto err_group_refresh;
+ }
+
+ /* Add resilient nexthop groups to a list so that the activity of their
+ * nexthop buckets will be periodically queried and cleared.
+ */
+ if (nhgi->is_resilient) {
+ if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
+ mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
+ list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
+ }
+
+ return 0;
+
+err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
+ i = nhgi->count;
+err_nexthop_obj_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ int i;
+
+ if (nhgi->is_resilient) {
+ list_del(&nhgi->list);
+ if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
+ cancel_delayed_work(&router->nh_grp_activity_dw);
+ }
+
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
+ nh_grp->obj.id = info->id;
+
+ err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
+ if (err)
+ goto err_nexthop_group_info_init;
+
+ nh_grp->can_destroy = false;
+
+ return nh_grp;
+
+err_nexthop_group_info_init:
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (!nh_grp->can_destroy)
+ return;
+ mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
+ kfree(nh_grp);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
+
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
+ cmp_arg.id = id;
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &cmp_arg,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group *old_nh_grp,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
+ struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
+ int err;
+
+ old_nh_grp->nhgi = new_nhgi;
+ new_nhgi->nh_grp = old_nh_grp;
+ nh_grp->nhgi = old_nhgi;
+ old_nhgi->nh_grp = nh_grp;
+
+ if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
+ /* Both the old adjacency index and the new one are valid.
+ * Routes are currently using the old one. Tell the device to
+ * replace the old adjacency index with the new one.
+ */
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
+ old_nhgi->adj_index,
+ old_nhgi->ecmp_size);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
+ goto err_out;
+ }
+ } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
+ /* The old adjacency index is valid, while the new one is not.
+ * Iterate over all the routes using the group and change them
+ * to trap packets to the CPU.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
+ goto err_out;
+ }
+ } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
+ /* The old adjacency index is invalid, while the new one is.
+ * Iterate over all the routes using the group and change them
+ * to forward packets using the new valid index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
+ goto err_out;
+ }
+ }
+
+ /* Make sure the flags are set / cleared based on the new nexthop group
+ * information.
+ */
+ mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
+
+ /* At this point 'nh_grp' is just a shell that is not used by anyone
+ * and its nexthop group info is the old info that was just replaced
+ * with the new one. Remove it.
+ */
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+
+ return 0;
+
+err_out:
+ old_nhgi->nh_grp = old_nh_grp;
+ nh_grp->nhgi = new_nhgi;
+ new_nhgi->nh_grp = nh_grp;
+ old_nh_grp->nhgi = old_nhgi;
+ return err;
+}
+
+static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
+ struct netlink_ext_ack *extack = info->extack;
+ int err;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+
+ old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!old_nh_grp)
+ err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
+ else
+ err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
+ old_nh_grp, extack);
+
+ if (err) {
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ }
+
+ return err;
+}
+
+static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return;
+
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
+
+ /* If the group still has routes using it, then defer the delete
+ * operation until the last route using it is deleted.
+ */
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index, char *ratr_pl)
+{
+ MLXSW_REG_ZERO(ratr, ratr_pl);
+ mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
+
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
+{
+ /* Clear the opcode and activity on both the old and new payload as
+ * they are irrelevant for the comparison.
+ */
+ mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_a_set(ratr_pl, 0);
+ mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
+ mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
+
+ /* If the contents of the adjacency entry are consistent with the
+ * replacement request, then replacement was successful.
+ */
+ if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ u16 bucket_index = info->nh_res_bucket->bucket_index;
+ struct netlink_ext_ack *extack = info->extack;
+ bool force = info->nh_res_bucket->force;
+ char ratr_pl_new[MLXSW_REG_RATR_LEN];
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ u32 adj_index;
+ int err;
+
+ /* No point in trying an atomic replacement if the idle timer interval
+ * is smaller than the interval in which we query and clear activity.
+ */
+ if (!force && info->nh_res_bucket->idle_timer_ms <
+ MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
+ force = true;
+
+ adj_index = nh->nhgi->adj_index + bucket_index;
+ err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
+ return err;
+ }
+
+ if (!force) {
+ err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
+ ratr_pl_new);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
+ return err;
+ }
+
+ err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
+ return err;
+ }
+ }
+
+ nh->update = 0;
+ nh->offloaded = 1;
+ mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ u16 bucket_index = info->nh_res_bucket->bucket_index;
+ struct netlink_ext_ack *extack = info->extack;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct nh_notifier_single_info *nh_obj;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop *nh;
+ int err;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
+ return -EINVAL;
+ }
+
+ nhgi = nh_grp->nhgi;
+
+ if (bucket_index >= nhgi->count) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
+ return -EINVAL;
+ }
+
+ nh = &nhgi->nexthops[bucket_index];
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+
+ nh_obj = &info->nh_res_bucket->new_nh;
+ err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
+ goto err_nexthop_obj_init;
+ }
+
+ err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
+ if (err)
+ goto err_nexthop_obj_bucket_adj_update;
+
+ return 0;
+
+err_nexthop_obj_bucket_adj_update:
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+err_nexthop_obj_init:
+ nh_obj = &info->nh_res_bucket->old_nh;
+ mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
+ /* The old adjacency entry was not overwritten */
+ nh->update = 0;
+ nh->offloaded = 1;
+ return err;
+}
+
+static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nh_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
+ int err = 0;
+
+ router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
+ err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
+ if (err)
+ goto out;
+
+ mutex_lock(&router->lock);
+
+ switch (event) {
+ case NEXTHOP_EVENT_REPLACE:
+ err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
+ break;
+ case NEXTHOP_EVENT_DEL:
+ mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
+ break;
+ case NEXTHOP_EVENT_BUCKET_REPLACE:
+ err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
+ info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&router->lock);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
+ struct fib_info *fi)
+{
+ const struct fib_nh *nh = fib_info_nh(fi, 0);
+
+ return nh->fib_nh_gw_family ||
+ mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
+}
+
+static int
+mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct fib_nh *fib_nh;
+
+ nh = &nhgi->nexthops[i];
+ fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
+ err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop4_init;
+ }
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
+ i = nhgi->count;
+err_nexthop4_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
+ nh_grp->ipv4.fi = fi;
+ fib_info_hold(fi);
+
+ err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_info_init;
+
+ err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_insert;
+
+ nh_grp->can_destroy = true;
+
+ return nh_grp;
+
+err_nexthop_group_insert:
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
+ fib_info_put(fi);
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (!nh_grp->can_destroy)
+ return;
+ mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+ fib_info_put(nh_grp->ipv4.fi);
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
+ kfree(nh_grp);
+}
+
+static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ if (fi->nh) {
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
+ fi->nh->id);
+ if (WARN_ON_ONCE(!nh_grp))
+ return -EINVAL;
+ goto out;
+ }
+
+ nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
+ if (!nh_grp) {
+ nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+ }
+out:
+ list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
+ fib_entry->nh_group = nh_grp;
+ return 0;
+}
+
+static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+
+ list_del(&fib_entry->nexthop_group_node);
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ return;
+ }
+
+ mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static bool
+mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ return !fib4_entry->dscp;
+}
+
+static bool
+mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
+
+ switch (fib_entry->fib_node->fib->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
+ return false;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ break;
+ }
+
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+ return !!nh_group->nhgi->adj_index_valid;
+ case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+ return !!nh_group->nhgi->nh_rif;
+ case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
+ case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
+{
+ int i;
+
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
+ struct fib6_info *rt = mlxsw_sp_rt6->rt;
+
+ if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
+ ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
+ &rt->fib6_nh->fib_nh_gw6))
+ return nh;
+ }
+
+ return NULL;
+}
+
+static void
+mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
+{
+ u32 *p_dst = (u32 *) &fen_info->dst;
+ struct fib_rt_info fri;
+
+ fri.fi = fen_info->fi;
+ fri.tb_id = fen_info->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = fen_info->dst_len;
+ fri.dscp = fen_info->dscp;
+ fri.type = fen_info->type;
+ fri.offload = false;
+ fri.trap = false;
+ fri.offload_failed = true;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
+}
+
+static void
+mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
+ bool should_offload;
+
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fib4_entry->fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.dscp = fib4_entry->dscp;
+ fri.type = fib4_entry->type;
+ fri.offload = should_offload;
+ fri.trap = !should_offload;
+ fri.offload_failed = false;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
+}
+
+static void
+mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
+
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fib4_entry->fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.dscp = fib4_entry->dscp;
+ fri.type = fib4_entry->type;
+ fri.offload = false;
+ fri.trap = false;
+ fri.offload_failed = false;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ int i;
+
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
+ for (i = 0; i < nrt6; i++)
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
+ false, false, true);
+}
+#else
+static void
+mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ bool should_offload;
+
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
+ fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
+ common);
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
+ should_offload, !should_offload, false);
+}
+#else
+static void
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
+ common);
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
+ false, false, false);
+}
+#else
+static void
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+}
+#endif
+
+static void
+mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->fib_node->fib->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
+ break;
+ }
+}
+
+static void
+mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->fib_node->fib->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
+ break;
+ }
+}
+
+static void
+mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (op) {
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
+ mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
+ break;
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
+ const struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+ enum mlxsw_reg_ralxx_protocol proto;
+ u32 *p_dip;
+
+ proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
+
+ switch (fib->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ *p_dip);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr);
+ break;
+ }
+}
+
+static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ u16 trap_id = 0;
+ u32 adjacency_index = 0;
+ u16 ecmp_size = 0;
+
+ /* In case the nexthop group adjacency index is valid, use it
+ * with provided ECMP size. Otherwise, setup trap and pass
+ * traffic to kernel.
+ */
+ if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = nhgi->adj_index;
+ ecmp_size = nhgi->ecmp_size;
+ } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = mlxsw_sp->router->adj_trap_index;
+ ecmp_size = 1;
+ } else {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+ }
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u16 trap_id = 0;
+ u16 rif_index = 0;
+
+ if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ rif_index = rif->rif_index;
+ } else {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+ }
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int
+mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u16 trap_id;
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int
+mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ int err;
+
+ if (WARN_ON(!ipip_entry))
+ return -EINVAL;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
+ fib_entry->decap.tunnel_index);
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
+ fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
+
+ return err;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
+}
+
+static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
+}
+
+static int
+mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
+ union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
+ int ifindex = nhgi->nexthops[0].ifindex;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ switch (fen_info->type) {
+ case RTN_LOCAL:
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV4, dip);
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+ return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
+ fib_entry,
+ ipip_entry);
+ }
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV4,
+ &dip)) {
+ u32 tunnel_index;
+
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ return 0;
+ }
+ fallthrough;
+ case RTN_BROADCAST:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ case RTN_BLACKHOLE:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
+ return 0;
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ /* Packets hitting these routes need to be trapped, but
+ * can do so with a lower priority than packets directed
+ * at the host, so use action type local instead of trap.
+ */
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
+ return 0;
+ case RTN_UNICAST:
+ if (nhgi->gateway)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ else
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void
+mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+}
+
+static struct mlxsw_sp_fib4_entry *
+mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
+ if (!fib4_entry)
+ return ERR_PTR(-ENOMEM);
+ fib_entry = &fib4_entry->common;
+
+ err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
+ if (err)
+ goto err_nexthop4_group_get;
+
+ err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
+ if (err)
+ goto err_fib4_entry_type_set;
+
+ fib4_entry->fi = fen_info->fi;
+ fib_info_hold(fib4_entry->fi);
+ fib4_entry->tb_id = fen_info->tb_id;
+ fib4_entry->type = fen_info->type;
+ fib4_entry->dscp = fen_info->dscp;
+
+ fib_entry->fib_node = fib_node;
+
+ return fib4_entry;
+
+err_fib4_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+err_nexthop4_group_get:
+ kfree(fib4_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
+
+ fib_info_put(fib4_entry->fi);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
+ mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
+ fib_node->fib);
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ kfree(fib4_entry);
+}
+
+static struct mlxsw_sp_fib4_entry *
+mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct mlxsw_sp_fib *fib;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
+ if (!vr)
+ return NULL;
+ fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
+
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len);
+ if (!fib_node)
+ return NULL;
+
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == fen_info->tb_id &&
+ fib4_entry->dscp == fen_info->dscp &&
+ fib4_entry->type == fen_info->type &&
+ fib4_entry->fi == fen_info->fi)
+ return fib4_entry;
+
+ return NULL;
+}
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_fib_key),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len)
+{
+ struct mlxsw_sp_fib_key key;
+
+ memset(&key, 0, sizeof(key));
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len)
+{
+ struct mlxsw_sp_fib_node *fib_node;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ return NULL;
+
+ list_add(&fib_node->list, &fib->node_list);
+ memcpy(fib_node->key.addr, addr, addr_len);
+ fib_node->key.prefix_len = prefix_len;
+
+ return fib_node;
+}
+
+static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
+{
+ list_del(&fib_node->list);
+ kfree(fib_node);
+}
+
+static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
+ if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ goto out;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return PTR_ERR(lpm_tree);
+
+ err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+ if (err)
+ goto err_lpm_tree_replace;
+
+out:
+ lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
+ return 0;
+
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ return err;
+}
+
+static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
+ int err;
+
+ if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ return;
+ /* Try to construct a new LPM tree from the current prefix usage
+ * minus the unused one. If we fail, continue using the old one.
+ */
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
+ fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return;
+
+ err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+ if (err)
+ goto err_lpm_tree_replace;
+
+ return;
+
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+}
+
+static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib *fib)
+{
+ int err;
+
+ err = mlxsw_sp_fib_node_insert(fib, fib_node);
+ if (err)
+ return err;
+ fib_node->fib = fib;
+
+ err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
+ if (err)
+ goto err_fib_lpm_tree_link;
+
+ return 0;
+
+err_fib_lpm_tree_link:
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+ return err;
+}
+
+static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_fib *fib = fib_node->fib;
+
+ mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
+ size_t addr_len, unsigned char prefix_len,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_fib_node *fib_node;
+ struct mlxsw_sp_fib *fib;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+ fib = mlxsw_sp_vr_fib(vr, proto);
+
+ fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
+ if (fib_node)
+ return fib_node;
+
+ fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
+ if (!fib_node) {
+ err = -ENOMEM;
+ goto err_fib_node_create;
+ }
+
+ err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
+ if (err)
+ goto err_fib_node_init;
+
+ return fib_node;
+
+err_fib_node_init:
+ mlxsw_sp_fib_node_destroy(fib_node);
+err_fib_node_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_vr *vr = fib_node->fib->vr;
+
+ if (fib_node->fib_entry)
+ return;
+ mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
+ mlxsw_sp_fib_node_destroy(fib_node);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ int err;
+
+ fib_node->fib_entry = fib_entry;
+
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_update;
+
+ return 0;
+
+err_fib_entry_update:
+ fib_node->fib_entry = NULL;
+ return err;
+}
+
+static void
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ fib_node->fib_entry = NULL;
+}
+
+static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
+ struct mlxsw_sp_fib4_entry *fib4_replaced;
+
+ if (!fib_node->fib_entry)
+ return true;
+
+ fib4_replaced = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == RT_TABLE_MAIN &&
+ fib4_replaced->tb_id == RT_TABLE_LOCAL)
+ return false;
+
+ return true;
+}
+
+static int
+mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
+ struct mlxsw_sp_fib_node *fib_node;
+ int err;
+
+ if (fen_info->fi->nh &&
+ !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
+ return 0;
+
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
+ &fen_info->dst, sizeof(fen_info->dst),
+ fen_info->dst_len,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(fib_node)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
+ return PTR_ERR(fib_node);
+ }
+
+ fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
+ if (IS_ERR(fib4_entry)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
+ err = PTR_ERR(fib4_entry);
+ goto err_fib4_entry_create;
+ }
+
+ if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return 0;
+ }
+
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
+ goto err_fib_node_entry_link;
+ }
+
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
+
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
+ common);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
+
+ return 0;
+
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+err_fib4_entry_create:
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
+}
+
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+
+ fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
+ if (!fib4_entry)
+ return;
+ fib_node = fib4_entry->common.fib_node;
+
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+}
+
+static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
+{
+ /* Multicast routes aren't supported, so ignore them. Neighbour
+ * Discovery packets are specifically trapped.
+ */
+ if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
+ return true;
+
+ /* Cloned routes are irrelevant in the forwarding path. */
+ if (rt->fib6_flags & RTF_CACHE)
+ return true;
+
+ return false;
+}
+
+static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
+ if (!mlxsw_sp_rt6)
+ return ERR_PTR(-ENOMEM);
+
+ /* In case of route replace, replaced route is deleted with
+ * no notification. Take reference to prevent accessing freed
+ * memory.
+ */
+ mlxsw_sp_rt6->rt = rt;
+ fib6_info_hold(rt);
+
+ return mlxsw_sp_rt6;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void mlxsw_sp_rt6_release(struct fib6_info *rt)
+{
+ fib6_info_release(rt);
+}
+#else
+static void mlxsw_sp_rt6_release(struct fib6_info *rt)
+{
+}
+#endif
+
+static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
+{
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+
+ if (!mlxsw_sp_rt6->rt->nh)
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
+ kfree(mlxsw_sp_rt6);
+}
+
+static struct fib6_info *
+mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
+ list)->rt;
+}
+
+static struct mlxsw_sp_rt6 *
+mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
+ const struct fib6_info *rt)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ if (mlxsw_sp_rt6->rt == rt)
+ return mlxsw_sp_rt6;
+ }
+
+ return NULL;
+}
+
+static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct fib6_info *rt,
+ enum mlxsw_sp_ipip_type *ret)
+{
+ return rt->fib6_nh->fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
+}
+
+static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ const struct fib6_info *rt)
+{
+ struct net_device *dev = rt->fib6_nh->fib_nh_dev;
+ int err;
+
+ nh->nhgi = nh_grp->nhgi;
+ nh->nh_weight = rt->fib6_nh->fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+
+ if (!dev)
+ return 0;
+ nh->ifindex = dev->ifindex;
+
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_nexthop_type_init;
+
+ return 0;
+
+err_nexthop_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+}
+
+static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
+ const struct fib6_info *rt)
+{
+ return rt->fib6_nh->fib_nh_gw_family ||
+ mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
+}
+
+static int
+mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
+ GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
+ struct mlxsw_sp_rt6, list);
+ nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
+ nhgi->count = fib6_entry->nrt6;
+ for (i = 0; i < nhgi->count; i++) {
+ struct fib6_info *rt = mlxsw_sp_rt6->rt;
+
+ nh = &nhgi->nexthops[i];
+ err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
+ if (err)
+ goto err_nexthop6_init;
+ mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
+ }
+ nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
+ i = nhgi->count;
+err_nexthop6_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
+
+ err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
+ if (err)
+ goto err_nexthop_group_info_init;
+
+ err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_insert;
+
+ nh_grp->can_destroy = true;
+
+ return nh_grp;
+
+err_nexthop_group_insert:
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (!nh_grp->can_destroy)
+ return;
+ mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
+ kfree(nh_grp);
+}
+
+static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ if (rt->nh) {
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
+ rt->nh->id);
+ if (WARN_ON_ONCE(!nh_grp))
+ return -EINVAL;
+ goto out;
+ }
+
+ nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
+ if (!nh_grp) {
+ nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+ }
+
+ /* The route and the nexthop are described by the same struct, so we
+ * need to the update the nexthop offload indication for the new route.
+ */
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+
+out:
+ list_add_tail(&fib6_entry->common.nexthop_group_node,
+ &nh_grp->fib_list);
+ fib6_entry->common.nh_group = nh_grp;
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+
+ list_del(&fib_entry->nexthop_group_node);
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ return;
+ }
+
+ mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int
+mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ int err;
+
+ mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
+ fib6_entry->common.nh_group = NULL;
+ list_del(&fib6_entry->common.nexthop_group_node);
+
+ err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
+ if (err)
+ goto err_nexthop6_group_get;
+
+ err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ /* In case this entry is offloaded, then the adjacency index
+ * currently associated with it in the device's table is that
+ * of the old group. Start using the new one instead.
+ */
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
+ if (err)
+ goto err_fib_entry_update;
+
+ if (list_empty(&old_nh_grp->fib_list))
+ mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
+
+ return 0;
+
+err_fib_entry_update:
+ mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
+ fib_node->fib);
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
+err_nexthop6_group_get:
+ list_add_tail(&fib6_entry->common.nexthop_group_node,
+ &old_nh_grp->fib_list);
+ fib6_entry->common.nh_group = old_nh_grp;
+ mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
+ return err;
+}
+
+static int
+mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry,
+ struct fib6_info **rt_arr, unsigned int nrt6)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ int err, i;
+
+ for (i = 0; i < nrt6; i++) {
+ mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
+ if (IS_ERR(mlxsw_sp_rt6)) {
+ err = PTR_ERR(mlxsw_sp_rt6);
+ goto err_rt6_unwind;
+ }
+
+ list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
+ fib6_entry->nrt6++;
+ }
+
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ if (err)
+ goto err_rt6_unwind;
+
+ return 0;
+
+err_rt6_unwind:
+ for (; i > 0; i--) {
+ fib6_entry->nrt6--;
+ mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
+ struct mlxsw_sp_rt6, list);
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry,
+ struct fib6_info **rt_arr, unsigned int nrt6)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ int i;
+
+ for (i = 0; i < nrt6; i++) {
+ mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
+ rt_arr[i]);
+ if (WARN_ON_ONCE(!mlxsw_sp_rt6))
+ continue;
+
+ fib6_entry->nrt6--;
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
+
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+}
+
+static int
+mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
+ union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ int ifindex = nhgi->nexthops[0].ifindex;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV6,
+ dip);
+
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+ return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
+ ipip_entry);
+ }
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV6, &dip)) {
+ u32 tunnel_index;
+
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
+{
+ if (rt->fib6_flags & RTF_LOCAL)
+ return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
+ rt);
+ if (rt->fib6_flags & RTF_ANYCAST)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ else if (rt->fib6_type == RTN_BLACKHOLE)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
+ else if (rt->fib6_flags & RTF_REJECT)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
+ else if (fib_entry->nh_group->nhgi->gateway)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ else
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
+
+ list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
+ list) {
+ fib6_entry->nrt6--;
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
+}
+
+static struct mlxsw_sp_fib6_entry *
+mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ struct fib6_info **rt_arr, unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ int err, i;
+
+ fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
+ if (!fib6_entry)
+ return ERR_PTR(-ENOMEM);
+ fib_entry = &fib6_entry->common;
+
+ INIT_LIST_HEAD(&fib6_entry->rt6_list);
+
+ for (i = 0; i < nrt6; i++) {
+ mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
+ if (IS_ERR(mlxsw_sp_rt6)) {
+ err = PTR_ERR(mlxsw_sp_rt6);
+ goto err_rt6_unwind;
+ }
+ list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
+ fib6_entry->nrt6++;
+ }
+
+ err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
+ if (err)
+ goto err_rt6_unwind;
+
+ err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ if (err)
+ goto err_fib6_entry_type_set;
+
+ fib_entry->fib_node = fib_node;
+
+ return fib6_entry;
+
+err_fib6_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
+err_rt6_unwind:
+ for (; i > 0; i--) {
+ fib6_entry->nrt6--;
+ mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
+ struct mlxsw_sp_rt6, list);
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
+ kfree(fib6_entry);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
+}
+
+static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+
+ mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
+ mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
+ fib_node->fib);
+ mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
+ mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
+ WARN_ON(fib6_entry->nrt6);
+ kfree(fib6_entry);
+}
+
+static struct mlxsw_sp_fib6_entry *
+mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
+ const struct fib6_info *rt)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct mlxsw_sp_fib *fib;
+ struct fib6_info *cmp_rt;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
+ if (!vr)
+ return NULL;
+ fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
+
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen);
+ if (!fib_node)
+ return NULL;
+
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
+ rt->fib6_metric == cmp_rt->fib6_metric &&
+ mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
+ return fib6_entry;
+
+ return NULL;
+}
+
+static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ struct mlxsw_sp_fib6_entry *fib6_replaced;
+ struct fib6_info *rt, *rt_replaced;
+
+ if (!fib_node->fib_entry)
+ return true;
+
+ fib6_replaced = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry,
+ common);
+ rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
+ if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
+ rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
+ return false;
+
+ return true;
+}
+
+static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
+ int err;
+
+ if (rt->fib6_src.plen)
+ return -EINVAL;
+
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return 0;
+
+ if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
+ return 0;
+
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib_node))
+ return PTR_ERR(fib_node);
+
+ fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
+ nrt6);
+ if (IS_ERR(fib6_entry)) {
+ err = PTR_ERR(fib6_entry);
+ goto err_fib6_entry_create;
+ }
+
+ if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return 0;
+ }
+
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
+ if (err)
+ goto err_fib_node_entry_link;
+
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
+
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
+ common);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
+
+ return 0;
+
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+err_fib6_entry_create:
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
+}
+
+static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
+ int err;
+
+ if (rt->fib6_src.plen)
+ return -EINVAL;
+
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return 0;
+
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib_node))
+ return PTR_ERR(fib_node);
+
+ if (WARN_ON_ONCE(!fib_node->fib_entry)) {
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return -EINVAL;
+ }
+
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ if (err)
+ goto err_fib6_entry_nexthop_add;
+
+ return 0;
+
+err_fib6_entry_nexthop_add:
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
+}
+
+static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
+
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return;
+
+ /* Multipath routes are first added to the FIB trie and only then
+ * notified. If we vetoed the addition, we will get a delete
+ * notification for a route we do not have. Therefore, do not warn if
+ * route was not found.
+ */
+ fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
+ if (!fib6_entry)
+ return;
+
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+ * group.
+ */
+ if (nrt6 != fib6_entry->nrt6) {
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ return;
+ }
+
+ fib_node = fib6_entry->common.fib_node;
+
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+}
+
+static struct mlxsw_sp_mr_table *
+mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
+{
+ if (family == RTNL_FAMILY_IPMR)
+ return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
+ else
+ return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
+}
+
+static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
+ struct mfc_entry_notifier_info *men_info,
+ bool replace)
+{
+ struct mlxsw_sp_mr_table *mrt;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
+ return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
+}
+
+static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
+ struct mfc_entry_notifier_info *men_info)
+{
+ struct mlxsw_sp_mr_table *mrt;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
+ if (WARN_ON(!vr))
+ return;
+
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
+ mlxsw_sp_mr_route_del(mrt, men_info->mfc);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
+ struct vif_entry_notifier_info *ven_info)
+{
+ struct mlxsw_sp_mr_table *mrt;
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
+ return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
+ ven_info->vif_index,
+ ven_info->vif_flags, rif);
+}
+
+static void
+mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
+ struct vif_entry_notifier_info *ven_info)
+{
+ struct mlxsw_sp_mr_table *mrt;
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
+ if (WARN_ON(!vr))
+ return;
+
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
+ mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+}
+
+static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+}
+
+static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ switch (fib_node->fib->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
+ break;
+ }
+}
+
+static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
+ struct mlxsw_sp_fib_node *fib_node, *tmp;
+
+ list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
+ bool do_break = &tmp->list == &fib->node_list;
+
+ mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
+ if (do_break)
+ break;
+ }
+}
+
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+{
+ int i, j;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
+
+ if (!mlxsw_sp_vr_is_used(vr))
+ continue;
+
+ for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
+ mlxsw_sp_mr_table_flush(vr->mr_table[j]);
+ mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
+
+ /* If virtual router was only used for IPv4, then it's no
+ * longer used.
+ */
+ if (!mlxsw_sp_vr_is_used(vr))
+ continue;
+ mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
+ }
+}
+
+struct mlxsw_sp_fib6_event_work {
+ struct fib6_info **rt_arr;
+ unsigned int nrt6;
+};
+
+struct mlxsw_sp_fib_event_work {
+ struct work_struct work;
+ union {
+ struct mlxsw_sp_fib6_event_work fib6_work;
+ struct fib_entry_notifier_info fen_info;
+ struct fib_rule_notifier_info fr_info;
+ struct fib_nh_notifier_info fnh_info;
+ struct mfc_entry_notifier_info men_info;
+ struct vif_entry_notifier_info ven_info;
+ };
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long event;
+};
+
+static int
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *rt = fen6_info->rt;
+ struct fib6_info **rt_arr;
+ struct fib6_info *iter;
+ unsigned int nrt6;
+ int i = 0;
+
+ nrt6 = fen6_info->nsiblings + 1;
+
+ rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
+ if (!rt_arr)
+ return -ENOMEM;
+
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
+
+ rt_arr[0] = rt;
+ fib6_info_hold(rt);
+
+ if (!fen6_info->nsiblings)
+ return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ rt_arr[i + 1] = iter;
+ fib6_info_hold(iter);
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
+{
+ int i;
+
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
+}
+
+static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ int err;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ mlxsw_sp_span_respin(mlxsw_sp);
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
+ mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
+ &fib_work->fen_info);
+ }
+ fib_info_put(fib_work->fen_info.fi);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
+ break;
+ case FIB_EVENT_NH_ADD:
+ case FIB_EVENT_NH_DEL:
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+ break;
+ }
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
+}
+
+static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ int err;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ mlxsw_sp_span_respin(mlxsw_sp);
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
+ mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ }
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
+ mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ }
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
+ break;
+ }
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
+}
+
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ bool replace;
+ int err;
+
+ rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_ADD:
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
+ mr_cache_put(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ mr_cache_put(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_VIF_ADD:
+ err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
+ &fib_work->ven_info);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
+ dev_put(fib_work->ven_info.dev);
+ break;
+ case FIB_EVENT_VIF_DEL:
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
+ break;
+ }
+ mutex_unlock(&mlxsw_sp->router->lock);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
+ struct fib_notifier_info *info)
+{
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_nh_notifier_info *fnh_info;
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ fib_work->fen_info = *fen_info;
+ /* Take reference on fib_info to prevent it from being
+ * freed while work is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_work->fen_info.fi);
+ break;
+ case FIB_EVENT_NH_ADD:
+ case FIB_EVENT_NH_DEL:
+ fnh_info = container_of(info, struct fib_nh_notifier_info,
+ info);
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+ break;
+ }
+}
+
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
+ struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ int err;
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ case FIB_EVENT_ENTRY_DEL:
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
+ if (err)
+ return err;
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
+ struct fib_notifier_info *info)
+{
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_DEL:
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ mr_cache_hold(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_VIF_ADD:
+ case FIB_EVENT_VIF_DEL:
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
+ break;
+ }
+}
+
+static int mlxsw_sp_router_fib_rule_event(unsigned long event,
+ struct fib_notifier_info *info,
+ struct mlxsw_sp *mlxsw_sp)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct fib_rule_notifier_info *fr_info;
+ struct fib_rule *rule;
+ int err = 0;
+
+ /* nothing to do at the moment */
+ if (event == FIB_EVENT_RULE_DEL)
+ return 0;
+
+ fr_info = container_of(info, struct fib_rule_notifier_info, info);
+ rule = fr_info->rule;
+
+ /* Rule only affects locally generated traffic */
+ if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
+ return 0;
+
+ switch (info->family) {
+ case AF_INET:
+ if (!fib4_rule_default(rule) && !rule->l3mdev)
+ err = -EOPNOTSUPP;
+ break;
+ case AF_INET6:
+ if (!fib6_rule_default(rule) && !rule->l3mdev)
+ err = -EOPNOTSUPP;
+ break;
+ case RTNL_FAMILY_IPMR:
+ if (!ipmr_rule_default(rule) && !rule->l3mdev)
+ err = -EOPNOTSUPP;
+ break;
+ case RTNL_FAMILY_IP6MR:
+ if (!ip6mr_rule_default(rule) && !rule->l3mdev)
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (err < 0)
+ NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
+
+ return err;
+}
+
+/* Called with rcu_read_lock() */
+static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp_fib_event_work *fib_work;
+ struct fib_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
+ int err;
+
+ if ((info->family != AF_INET && info->family != AF_INET6 &&
+ info->family != RTNL_FAMILY_IPMR &&
+ info->family != RTNL_FAMILY_IP6MR))
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_RULE_ADD:
+ case FIB_EVENT_RULE_DEL:
+ err = mlxsw_sp_router_fib_rule_event(event, info,
+ router->mlxsw_sp);
+ return notifier_from_errno(err);
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ }
+ break;
+ }
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
+ return NOTIFY_BAD;
+
+ fib_work->mlxsw_sp = router->mlxsw_sp;
+ fib_work->event = event;
+
+ switch (info->family) {
+ case AF_INET:
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
+ mlxsw_sp_router_fib4_event(fib_work, info);
+ break;
+ case AF_INET6:
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
+ if (err)
+ goto err_fib_event;
+ break;
+ case RTNL_FAMILY_IP6MR:
+ case RTNL_FAMILY_IPMR:
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
+ break;
+ }
+
+ mlxsw_core_schedule_work(&fib_work->work);
+
+ return NOTIFY_DONE;
+
+err_fib_event:
+ kfree(fib_work);
+ return NOTIFY_BAD;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (mlxsw_sp->router->rifs[i] &&
+ mlxsw_sp->router->rifs[i]->dev == dev)
+ return mlxsw_sp->router->rifs[i];
+
+ return NULL;
+}
+
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return rif;
+}
+
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+ u16 vid = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ goto out;
+
+ /* We only return the VID for VLAN RIFs. Otherwise we return an
+ * invalid value (0).
+ */
+ if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
+ goto out;
+
+ vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return vid;
+}
+
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_enable_set(ritr_pl, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
+ mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+}
+
+static bool
+mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
+ unsigned long event)
+{
+ struct inet6_dev *inet6_dev;
+ bool addr_list_empty = true;
+ struct in_device *idev;
+
+ switch (event) {
+ case NETDEV_UP:
+ return rif == NULL;
+ case NETDEV_DOWN:
+ rcu_read_lock();
+ idev = __in_dev_get_rcu(dev);
+ if (idev && idev->ifa_list)
+ addr_list_empty = false;
+
+ inet6_dev = __in6_dev_get(dev);
+ if (addr_list_empty && inet6_dev &&
+ !list_empty(&inet6_dev->addr_list))
+ addr_list_empty = false;
+ rcu_read_unlock();
+
+ /* macvlans do not have a RIF, but rather piggy back on the
+ * RIF of their lower device.
+ */
+ if (netif_is_macvlan(dev) && addr_list_empty)
+ return true;
+
+ if (rif && addr_list_empty &&
+ !netif_is_l3_slave(rif->dev))
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static enum mlxsw_sp_rif_type
+mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ enum mlxsw_sp_fid_type type;
+
+ if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
+ return MLXSW_SP_RIF_TYPE_IPIP_LB;
+
+ /* Otherwise RIF type is derived from the type of the underlying FID. */
+ if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev))
+ type = MLXSW_SP_FID_TYPE_8021D;
+ else
+ type = MLXSW_SP_FID_TYPE_RFID;
+
+ return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
+}
+
+static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ if (!mlxsw_sp->router->rifs[i]) {
+ *p_rif_index = i;
+ return 0;
+ }
+ }
+
+ return -ENOBUFS;
+}
+
+static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
+ u16 vr_id,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = kzalloc(rif_size, GFP_KERNEL);
+ if (!rif)
+ return NULL;
+
+ INIT_LIST_HEAD(&rif->nexthop_list);
+ INIT_LIST_HEAD(&rif->neigh_list);
+ if (l3_dev) {
+ ether_addr_copy(rif->addr, l3_dev->dev_addr);
+ rif->mtu = l3_dev->mtu;
+ rif->dev = l3_dev;
+ }
+ rif->vr_id = vr_id;
+ rif->rif_index = rif_index;
+
+ return rif;
+}
+
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp->router->rifs[rif_index];
+}
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
+{
+ return rif->rif_index;
+}
+
+u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
+{
+ return lb_rif->common.rif_index;
+}
+
+u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
+{
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
+ struct mlxsw_sp_vr *ul_vr;
+
+ ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
+ if (WARN_ON(IS_ERR(ul_vr)))
+ return 0;
+
+ return ul_vr->id;
+}
+
+u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
+{
+ return lb_rif->ul_rif_id;
+}
+
+static bool
+mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS) &&
+ mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ if (err)
+ return err;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ NULL);
+ if (err)
+ goto err_clear_ingress;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ goto err_alloc_egress;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ NULL);
+ if (err)
+ goto err_clear_egress;
+
+ return 0;
+
+err_clear_egress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+err_alloc_egress:
+err_clear_ingress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ return err;
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return;
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
+ struct rtnl_hw_stats64 *p_stats)
+{
+ struct mlxsw_sp_rif_counter_set_basic ingress;
+ struct mlxsw_sp_rif_counter_set_basic egress;
+ int err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ &ingress);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &egress);
+ if (err)
+ return err;
+
+#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
+ ((SET.good_unicast_ ## SFX) + \
+ (SET.good_multicast_ ## SFX) + \
+ (SET.good_broadcast_ ## SFX))
+
+ p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
+ p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
+ p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
+ p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
+ p_stats->rx_errors = ingress.error_packets;
+ p_stats->tx_errors = egress.error_packets;
+ p_stats->rx_dropped = ingress.discard_packets;
+ p_stats->tx_dropped = egress.discard_packets;
+ p_stats->multicast = ingress.good_multicast_packets +
+ ingress.good_broadcast_packets;
+
+#undef MLXSW_SP_ROUTER_ALL_GOOD
+
+ return 0;
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct rtnl_hw_stats64 stats = {};
+ int err;
+
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return 0;
+
+ err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
+ if (err)
+ return err;
+
+ netdev_offload_xstats_report_delta(info->report_delta, &stats);
+ return 0;
+}
+
+struct mlxsw_sp_router_hwstats_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work =
+ container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
+ work);
+
+ rtnl_lock();
+ rtnl_offload_xstats_notify(hws_work->dev);
+ rtnl_unlock();
+ dev_put(hws_work->dev);
+ kfree(hws_work);
+}
+
+static void
+mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work;
+
+ /* To collect notification payload, the core ends up sending another
+ * notifier block message, which would deadlock on the attempt to
+ * acquire the router lock again. Just postpone the notification until
+ * later.
+ */
+
+ hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
+ if (!hws_work)
+ return;
+
+ INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
+ dev_hold(dev);
+ hws_work->dev = dev;
+ mlxsw_core_schedule_work(&hws_work->work);
+}
+
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
+{
+ return rif->dev->ifindex;
+}
+
+const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
+{
+ return rif->dev;
+}
+
+static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
+{
+ struct rtnl_hw_stats64 stats = {};
+
+ if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
+ netdev_offload_xstats_push_delta(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3,
+ &stats);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack)
+{
+ u32 tb_id = l3mdev_fib_table(params->dev);
+ const struct mlxsw_sp_rif_ops *ops;
+ struct mlxsw_sp_fid *fid = NULL;
+ enum mlxsw_sp_rif_type type;
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_vr *vr;
+ u16 rif_index;
+ int i, err;
+
+ type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
+ ops = mlxsw_sp->router->rif_ops_arr[type];
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+ vr->rif_count++;
+
+ err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
+ goto err_rif_index_alloc;
+ }
+
+ rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
+ if (!rif) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+ dev_hold(rif->dev);
+ mlxsw_sp->router->rifs[rif_index] = rif;
+ rif->mlxsw_sp = mlxsw_sp;
+ rif->ops = ops;
+
+ if (ops->fid_get) {
+ fid = ops->fid_get(rif, extack);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
+ }
+ rif->fid = fid;
+ }
+
+ if (ops->setup)
+ ops->setup(rif, params);
+
+ err = ops->configure(rif, extack);
+ if (err)
+ goto err_configure;
+
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
+ err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
+ if (err)
+ goto err_mr_rif_add;
+ }
+
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ err = mlxsw_sp_router_port_l3_stats_enable(rif);
+ if (err)
+ goto err_stats_enable;
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_alloc(rif);
+ }
+
+ atomic_inc(&mlxsw_sp->router->rifs_count);
+ return rif;
+
+err_stats_enable:
+err_mr_rif_add:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
+ ops->deconfigure(rif);
+err_configure:
+ if (fid)
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
+ mlxsw_sp->router->rifs[rif_index] = NULL;
+ dev_put(rif->dev);
+ kfree(rif);
+err_rif_alloc:
+err_rif_index_alloc:
+ vr->rif_count--;
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
+{
+ const struct mlxsw_sp_rif_ops *ops = rif->ops;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_fid *fid = rif->fid;
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ atomic_dec(&mlxsw_sp->router->rifs_count);
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
+
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ mlxsw_sp_rif_push_l3_stats(rif);
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_free(rif);
+ }
+
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
+ mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
+ ops->deconfigure(rif);
+ if (fid)
+ /* Loopback RIFs are not associated with a FID. */
+ mlxsw_sp_fid_put(fid);
+ mlxsw_sp->router->rifs[rif->rif_index] = NULL;
+ dev_put(rif->dev);
+ kfree(rif);
+ vr->rif_count--;
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ goto out;
+ mlxsw_sp_rif_destroy(rif);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static void
+mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+
+ params->vid = mlxsw_sp_port_vlan->vid;
+ params->lag = mlxsw_sp_port->lagged;
+ if (params->lag)
+ params->lag_id = mlxsw_sp_port->lag_id;
+ else
+ params->system_port = mlxsw_sp_port->local_port;
+}
+
+static struct mlxsw_sp_rif_subport *
+mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_subport, common);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
+ if (!rif)
+ return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ refcount_inc(&rif_subport->ref_count);
+ return rif;
+}
+
+static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ if (!refcount_dec_and_test(&rif_subport->ref_count))
+ return;
+
+ mlxsw_sp_rif_destroy(rif);
+}
+
+static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif_mac_profile *profile,
+ struct netlink_ext_ack *extack)
+{
+ u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ int id;
+
+ id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
+ max_rif_mac_profiles, GFP_KERNEL);
+
+ if (id >= 0) {
+ profile->id = id;
+ return 0;
+ }
+
+ if (id == -ENOSPC)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported router interface MAC profiles");
+
+ return id;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
+ mac_profile);
+ WARN_ON(!profile);
+ return profile;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_alloc(const char *mac)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return NULL;
+
+ ether_addr_copy(profile->mac_prefix, mac);
+ refcount_set(&profile->ref_count, 1);
+ return profile;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ struct mlxsw_sp_rif_mac_profile *profile;
+ int id;
+
+ idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
+ if (ether_addr_equal_masked(profile->mac_prefix, mac,
+ mlxsw_sp->mac_mask))
+ return profile;
+ }
+
+ return NULL;
+}
+
+static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
+}
+
+static u64 mlxsw_sp_rifs_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rifs_count);
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+ int err;
+
+ profile = mlxsw_sp_rif_mac_profile_alloc(mac);
+ if (!profile)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
+ if (err)
+ goto profile_index_alloc_err;
+
+ atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
+ return profile;
+
+profile_index_alloc_err:
+ kfree(profile);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
+ u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
+ profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
+ kfree(profile);
+}
+
+static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u8 *p_mac_profile,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
+ if (profile) {
+ refcount_inc(&profile->ref_count);
+ goto out;
+ }
+
+ profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
+
+out:
+ *p_mac_profile = profile->id;
+ return 0;
+}
+
+static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
+ u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ mac_profile);
+ if (WARN_ON(!profile))
+ return;
+
+ if (!refcount_dec_and_test(&profile->ref_count))
+ return;
+
+ mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
+}
+
+static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ rif->mac_profile_id);
+ if (WARN_ON(!profile))
+ return false;
+
+ return refcount_read(&profile->ref_count) > 1;
+}
+
+static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
+ const char *new_mac)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ rif->mac_profile_id);
+ if (WARN_ON(!profile))
+ return -EINVAL;
+
+ ether_addr_copy(profile->mac_prefix, new_mac);
+ return 0;
+}
+
+static int
+mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ const char *new_mac,
+ struct netlink_ext_ack *extack)
+{
+ u8 mac_profile;
+ int err;
+
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+ !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
+ return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
+ &mac_profile, extack);
+ if (err)
+ return err;
+
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
+ rif->mac_profile_id = mac_profile;
+ return 0;
+}
+
+static int
+__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
+ rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+
+ /* FID was already created, just take a reference */
+ fid = rif->ops->fid_get(rif, extack);
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ BR_STATE_FORWARDING);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ mlxsw_sp_port_vlan->fid = fid;
+
+ return 0;
+
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+err_fid_port_vid_map:
+ mlxsw_sp_fid_put(fid);
+ mlxsw_sp_rif_subport_put(rif);
+ return err;
+}
+
+static void
+__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
+ return;
+
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ mlxsw_sp_fid_put(fid);
+ mlxsw_sp_rif_subport_put(rif);
+}
+
+int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif)
+ goto out;
+
+ err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
+ extack);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
+ l3_dev, extack);
+ case NETDEV_DOWN:
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
+ MLXSW_SP_DEFAULT_VID, extack);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
+ port_dev,
+ event, vid,
+ extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
+ MLXSW_SP_DEFAULT_VID, extack);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+ struct mlxsw_sp_rif *rif;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
+ u16 proto;
+
+ br_vlan_get_proto(l3_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+ break;
+ case NETDEV_DOWN:
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ mlxsw_sp_rif_destroy(rif);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (netif_is_bridge_port(vlan_dev))
+ return 0;
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
+ event, vid, extack);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid, extack);
+ else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
+ extack);
+
+ return 0;
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
+{
+ u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp4, mask);
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
+{
+ u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp6, mask);
+}
+
+static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const u8 *mac, bool adding)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u8 vrrp_id = adding ? mac[5] : 0;
+ int err;
+
+ if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
+ !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
+ return 0;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
+ mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
+ else
+ mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ if (!rif) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
+ macvlan_dev->dev_addr, true);
+ if (err)
+ goto err_rif_vrrp_add;
+
+ /* Make sure the bridge driver does not have this MAC pointing at
+ * some other port.
+ */
+ if (rif->ops->fdb_del)
+ rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
+
+ return 0;
+
+err_rif_vrrp_add:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ return err;
+}
+
+static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ /* If we do not have a RIF, then we already took care of
+ * removing the macvlan's MAC during RIF deletion.
+ */
+ if (!rif)
+ return;
+ mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
+ false);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ mutex_lock(&mlxsw_sp->router->lock);
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *macvlan_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
+ case NETDEV_DOWN:
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (mlxsw_sp_port_dev_check(dev))
+ return mlxsw_sp_inetaddr_port_event(dev, event, extack);
+ else if (netif_is_lag_master(dev))
+ return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
+ else if (netif_is_bridge_master(dev))
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
+ extack);
+ else if (is_vlan_dev(dev))
+ return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
+ extack);
+ else if (netif_is_macvlan(dev))
+ return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
+ extack);
+ else
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp_router *router;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
+ if (event == NETDEV_UP)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
+ mutex_lock(&router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
+out:
+ mutex_unlock(&router->lock);
+ return notifier_from_errno(err);
+}
+
+int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *) ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return NOTIFY_DONE;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return notifier_from_errno(err);
+}
+
+struct mlxsw_sp_inet6addr_event_work {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
+ container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
+ struct net_device *dev = inet6addr_work->dev;
+ unsigned long event = inet6addr_work->event;
+ struct mlxsw_sp_rif *rif;
+
+ rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ rtnl_unlock();
+ dev_put(dev);
+ kfree(inet6addr_work);
+}
+
+/* Called with rcu_read_lock() */
+static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
+ struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
+ struct net_device *dev = if6->idev->dev;
+ struct mlxsw_sp_router *router;
+
+ /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
+ if (event == NETDEV_UP)
+ return NOTIFY_DONE;
+
+ inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
+ if (!inet6addr_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
+ INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
+ inet6addr_work->mlxsw_sp = router->mlxsw_sp;
+ inet6addr_work->dev = dev;
+ inet6addr_work->event = event;
+ dev_hold(dev);
+ mlxsw_core_schedule_work(&inet6addr_work->work);
+
+ return NOTIFY_DONE;
+}
+
+int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
+ struct net_device *dev = i6vi->i6vi_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return NOTIFY_DONE;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const char *mac, int mtu, u8 mac_profile)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int
+mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = rif->dev;
+ u8 old_mac_profile;
+ u16 fid_index;
+ int err;
+
+ fid_index = mlxsw_sp_fid_index(rif->fid);
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
+ if (err)
+ return err;
+
+ old_mac_profile = rif->mac_profile_id;
+ err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
+ extack);
+ if (err)
+ goto err_rif_mac_profile_replace;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
+ dev->mtu, rif->mac_profile_id);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ if (rif->mtu != dev->mtu) {
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ /* The RIF is relevant only to its mr_table instance, as unlike
+ * unicast routing, in multicast routing a RIF cannot be shared
+ * between several multicast routing tables.
+ */
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
+ mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
+ rif, dev->mtu);
+ }
+
+ ether_addr_copy(rif->addr, dev->dev_addr);
+ rif->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
+ old_mac_profile);
+err_rif_edit:
+ mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
+err_rif_mac_profile_replace:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
+ return err;
+}
+
+static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_pre_changeaddr_info *info)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+ struct netlink_ext_ack *extack;
+ u8 max_rif_mac_profiles;
+ u64 occ;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
+ if (profile)
+ return 0;
+
+ max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
+ occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
+ if (occ < max_rif_mac_profiles)
+ return 0;
+
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
+ return -ENOBUFS;
+}
+
+static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ switch (info->type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ return mlxsw_sp_router_port_l3_stats_enable(rif);
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ mlxsw_sp_router_port_l3_stats_report_used(rif, info);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int
+mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
+}
+
+static bool mlxsw_sp_is_router_event(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_PRE_CHANGEADDR:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEMTU:
+ case NETDEV_CHANGEADDR:
+ return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
+ case NETDEV_PRE_CHANGEADDR:
+ return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *rif;
+
+ /* If netdev is already associated with a RIF, then we need to
+ * destroy it and create a new one with the new virtual router ID.
+ */
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (rif)
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
+ extack);
+
+ return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
+}
+
+static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif)
+ return;
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
+}
+
+static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
+ return false;
+ return netif_is_l3_master(info->upper_dev);
+}
+
+static int
+mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ int err = 0;
+
+ /* We do not create a RIF for a macvlan, but only use it to
+ * direct more MAC addresses to the router.
+ */
+ if (!mlxsw_sp || netif_is_macvlan(l3_dev))
+ return 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ break;
+ case NETDEV_CHANGEUPPER:
+ if (info->linking) {
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
+ } else {
+ mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_router *router;
+ struct mlxsw_sp *mlxsw_sp;
+ int err = 0;
+
+ router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
+ mlxsw_sp = router->mlxsw_sp;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (mlxsw_sp_is_offload_xstats_event(event))
+ err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_is_router_event(event))
+ err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
+ else if (mlxsw_sp_is_vrf_event(event, ptr))
+ err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
+
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return notifier_from_errno(err);
+}
+
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
+{
+ struct netdev_nested_priv priv = {
+ .data = (void *)rif,
+ };
+
+ if (!netif_is_macvlan_port(rif->dev))
+ return 0;
+
+ netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
+ return netdev_walk_all_upper_dev_rcu(rif->dev,
+ __mlxsw_sp_rif_macvlan_flush, &priv);
+}
+
+static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ refcount_set(&rif_subport->ref_count, 1);
+ rif_subport->vid = params->vid;
+ rif_subport->lag = params->lag;
+ if (params->lag)
+ rif_subport->lag_id = params->lag_id;
+ else
+ rif_subport->system_port = params->system_port;
+}
+
+static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_subport *rif_subport;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
+ rif_subport->lag ? rif_subport->lag_id :
+ rif_subport->system_port,
+ efid, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_subport_op(rif, true);
+ if (err)
+ goto err_rif_subport_op;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_subport_op(rif, false);
+err_rif_subport_op:
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid *fid = rif->fid;
+
+ mlxsw_sp_fid_rif_unset(fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_rif_subport_op(rif, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
+ .type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .rif_size = sizeof(struct mlxsw_sp_rif_subport),
+ .setup = mlxsw_sp_rif_subport_setup,
+ .configure = mlxsw_sp_rif_subport_configure,
+ .deconfigure = mlxsw_sp_rif_subport_deconfigure,
+ .fid_get = mlxsw_sp_rif_subport_fid_get,
+};
+
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
+{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
+ rif->dev->mtu);
+ mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
+ if (err)
+ goto err_rif_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_fid *fid = rif->fid;
+
+ mlxsw_sp_fid_rif_unset(fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+}
+
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info = {};
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
+ .type = MLXSW_SP_RIF_TYPE_FID,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
+};
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *br_dev;
+ u16 vid;
+ int err;
+
+ if (is_vlan_dev(rif->dev)) {
+ vid = vlan_dev_vlan_id(rif->dev);
+ br_dev = vlan_dev_real_dev(rif->dev);
+ if (WARN_ON(!netif_is_bridge_master(br_dev)))
+ return ERR_PTR(-EINVAL);
+ } else {
+ err = br_vlan_get_pvid(rif->dev, &vid);
+ if (err < 0 || !vid) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+}
+
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info = {};
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct net_device *br_dev;
+ struct net_device *dev;
+
+ br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+ dev = br_fdb_find_port(br_dev, mac, vid);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
+}
+
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static struct mlxsw_sp_rif_ipip_lb *
+mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
+}
+
+static void
+mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params)
+{
+ struct mlxsw_sp_rif_params_ipip_lb *params_lb;
+ struct mlxsw_sp_rif_ipip_lb *rif_lb;
+
+ params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
+ common);
+ rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
+ rif_lb->lb_config = params_lb->lb_config;
+}
+
+static int
+mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_vr *ul_vr;
+ int err;
+
+ ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_vr))
+ return PTR_ERR(ul_vr);
+
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
+ if (err)
+ goto err_loopback_op;
+
+ lb_rif->ul_vr_id = ul_vr->id;
+ lb_rif->ul_rif_id = 0;
+ ++ul_vr->rif_count;
+ return 0;
+
+err_loopback_op:
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
+ return err;
+}
+
+static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_vr *ul_vr;
+
+ ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
+ mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
+
+ --ul_vr->rif_count;
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
+ .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
+ .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
+ .setup = mlxsw_sp_rif_ipip_lb_setup,
+ .configure = mlxsw_sp1_rif_ipip_lb_configure,
+ .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
+};
+
+static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+ [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
+};
+
+static int
+mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
+ mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
+ MLXSW_REG_RITR_LOOPBACK_GENERIC);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *ul_rif;
+ u16 rif_index;
+ int err;
+
+ err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
+ return ERR_PTR(err);
+ }
+
+ ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
+ if (!ul_rif)
+ return ERR_PTR(-ENOMEM);
+
+ mlxsw_sp->router->rifs[rif_index] = ul_rif;
+ ul_rif->mlxsw_sp = mlxsw_sp;
+ err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
+ if (err)
+ goto ul_rif_op_err;
+
+ atomic_inc(&mlxsw_sp->router->rifs_count);
+ return ul_rif;
+
+ul_rif_op_err:
+ mlxsw_sp->router->rifs[rif_index] = NULL;
+ kfree(ul_rif);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+
+ atomic_dec(&mlxsw_sp->router->rifs_count);
+ mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
+ mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
+ kfree(ul_rif);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
+ return vr->ul_rif;
+
+ vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
+ if (IS_ERR(vr->ul_rif)) {
+ err = PTR_ERR(vr->ul_rif);
+ goto err_ul_rif_create;
+ }
+
+ vr->rif_count++;
+ refcount_set(&vr->ul_rif_refcnt, 1);
+
+ return vr->ul_rif;
+
+err_ul_rif_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ struct mlxsw_sp_vr *vr;
+
+ vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
+
+ if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
+ return;
+
+ vr->rif_count--;
+ mlxsw_sp_ul_rif_destroy(ul_rif);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ u16 *ul_rif_index)
+{
+ struct mlxsw_sp_rif *ul_rif;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_rif)) {
+ err = PTR_ERR(ul_rif);
+ goto out;
+ }
+ *ul_rif_index = ul_rif->rif_index;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
+void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
+{
+ struct mlxsw_sp_rif *ul_rif;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
+ if (WARN_ON(!ul_rif))
+ goto out;
+
+ mlxsw_sp_ul_rif_put(ul_rif);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static int
+mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif *ul_rif;
+ int err;
+
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_rif))
+ return PTR_ERR(ul_rif);
+
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
+ if (err)
+ goto err_loopback_op;
+
+ lb_rif->ul_vr_id = 0;
+ lb_rif->ul_rif_id = ul_rif->rif_index;
+
+ return 0;
+
+err_loopback_op:
+ mlxsw_sp_ul_rif_put(ul_rif);
+ return err;
+}
+
+static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif *ul_rif;
+
+ ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
+ mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
+ mlxsw_sp_ul_rif_put(ul_rif);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
+ .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
+ .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
+ .setup = mlxsw_sp_rif_ipip_lb_setup,
+ .configure = mlxsw_sp2_rif_ipip_lb_configure,
+ .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
+};
+
+static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+ [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
+};
+
+static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_core *core = mlxsw_sp->core;
+
+ if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
+ return -EIO;
+ mlxsw_sp->router->max_rif_mac_profile =
+ MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
+
+ mlxsw_sp->router->rifs = kcalloc(max_rifs,
+ sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->rifs)
+ return -ENOMEM;
+
+ idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
+ atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
+ atomic_set(&mlxsw_sp->router->rifs_count, 0);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIFS,
+ mlxsw_sp_rifs_occ_get,
+ mlxsw_sp);
+
+ return 0;
+}
+
+static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int i;
+
+ WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
+
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
+ idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
+ kfree(mlxsw_sp->router->rifs);
+}
+
+static int
+mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
+{
+ char tigcr_pl[MLXSW_REG_TIGCR_LEN];
+
+ mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
+}
+
+static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
+
+ err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
+}
+
+static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
+static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
+static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
+}
+
+static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
+{
+ struct mlxsw_sp_router *router;
+
+ /* Flush pending FIB notifications and then flush the device's
+ * table before requesting another dump. The FIB notification
+ * block is unregistered, so no need to take RTNL.
+ */
+ mlxsw_core_flush_owq();
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ mlxsw_sp_router_fib_flush(router->mlxsw_sp);
+}
+
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+struct mlxsw_sp_mp_hash_config {
+ DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
+ DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
+ DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
+ DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
+ bool inc_parsing_depth;
+};
+
+#define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
+ bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
+
+#define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
+ bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
+
+#define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
+ bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
+
+static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
+{
+ unsigned long *inner_headers = config->inner_headers;
+ unsigned long *inner_fields = config->inner_fields;
+
+ /* IPv4 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
+ /* IPv6 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
+}
+
+static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
+{
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
+}
+
+static void
+mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
+ u32 hash_fields)
+{
+ unsigned long *inner_headers = config->inner_headers;
+ unsigned long *inner_fields = config->inner_fields;
+
+ /* IPv4 Inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
+ /* IPv6 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
+ /* L4 inner */
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
+ MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
+}
+
+static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mp_hash_config *config)
+{
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+ u32 hash_fields;
+
+ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
+ case 0:
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ break;
+ case 1:
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ break;
+ case 2:
+ /* Outer */
+ mlxsw_sp_mp4_hash_outer_addr(config);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_l3(config);
+ break;
+ case 3:
+ hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
+ /* Outer */
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
+ break;
+ }
+}
+
+static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
+{
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
+}
+
+static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mp_hash_config *config)
+{
+ u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
+ unsigned long *headers = config->headers;
+ unsigned long *fields = config->fields;
+
+ switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
+ case 0:
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ break;
+ case 1:
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ break;
+ case 2:
+ /* Outer */
+ mlxsw_sp_mp6_hash_outer_addr(config);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_l3(config);
+ config->inc_parsing_depth = true;
+ break;
+ case 3:
+ /* Outer */
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
+ MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
+ MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
+ }
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+ MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
+ /* Inner */
+ mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
+ if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
+ config->inc_parsing_depth = true;
+ break;
+ }
+}
+
+static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
+ bool old_inc_parsing_depth,
+ bool new_inc_parsing_depth)
+{
+ int err;
+
+ if (!old_inc_parsing_depth && new_inc_parsing_depth) {
+ err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+ if (err)
+ return err;
+ mlxsw_sp->router->inc_parsing_depth = true;
+ } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+ mlxsw_sp->router->inc_parsing_depth = false;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+{
+ bool old_inc_parsing_depth, new_inc_parsing_depth;
+ struct mlxsw_sp_mp_hash_config config = {};
+ char recr2_pl[MLXSW_REG_RECR2_LEN];
+ unsigned long bit;
+ u32 seed;
+ int err;
+
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+ mlxsw_reg_recr2_pack(recr2_pl, seed);
+ mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
+ mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
+
+ old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
+ new_inc_parsing_depth = config.inc_parsing_depth;
+ err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
+ old_inc_parsing_depth,
+ new_inc_parsing_depth);
+ if (err)
+ return err;
+
+ for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
+ mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
+ mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
+ for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
+ mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
+ if (err)
+ goto err_reg_write;
+
+ return 0;
+
+err_reg_write:
+ mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
+ old_inc_parsing_depth);
+ return err;
+}
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
+
+ mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
+ false);
+}
+#else
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return 0;
+}
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+}
+#endif
+
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rdpm_pl[MLXSW_REG_RDPM_LEN];
+ unsigned int i;
+
+ MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+ /* HW is determining switch priority based on DSCP-bits, but the
+ * kernel is still doing that based on the ToS. Since there's a
+ * mismatch in bits we need to make sure to translate the right
+ * value ToS would observe, skipping the 2 least-significant ECN bits.
+ */
+ for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+ mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ u64 max_rifs;
+ bool usp;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
+ return -EIO;
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 lb_rif_index;
+ int err;
+
+ /* Create a generic loopback RIF associated with the main table
+ * (default VRF). Any table can be used, but the main table exists
+ * anyway, so we do not waste resources.
+ */
+ err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
+ &lb_rif_index);
+ if (err)
+ return err;
+
+ mlxsw_sp->router->lb_rif_index = lb_rif_index;
+
+ return 0;
+}
+
+static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
+}
+
+static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
+ mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
+ mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
+
+ return 0;
+}
+
+const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
+ .init = mlxsw_sp1_router_init,
+ .ipips_init = mlxsw_sp1_ipips_init,
+};
+
+static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
+ mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
+
+ return 0;
+}
+
+const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
+ .init = mlxsw_sp2_router_init,
+ .ipips_init = mlxsw_sp2_ipips_init,
+};
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_router *router;
+ int err;
+
+ router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+ mutex_init(&router->lock);
+ mlxsw_sp->router = router;
+ router->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp->router_ops->init(mlxsw_sp);
+ if (err)
+ goto err_router_ops_init;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
+ mlxsw_sp_nh_grp_activity_work);
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ goto err_router_init;
+
+ err = mlxsw_sp_rifs_init(mlxsw_sp);
+ if (err)
+ goto err_rifs_init;
+
+ err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
+ if (err)
+ goto err_ipips_init;
+
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
+ &mlxsw_sp_nexthop_ht_params);
+ if (err)
+ goto err_nexthop_ht_init;
+
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
+ &mlxsw_sp_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_group_ht_init;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
+ err = mlxsw_sp_lpm_init(mlxsw_sp);
+ if (err)
+ goto err_lpm_init;
+
+ err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
+ if (err)
+ goto err_mr_init;
+
+ err = mlxsw_sp_vrs_init(mlxsw_sp);
+ if (err)
+ goto err_vrs_init;
+
+ err = mlxsw_sp_lb_rif_init(mlxsw_sp);
+ if (err)
+ goto err_lb_rif_init;
+
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+
+ err = mlxsw_sp_mp_hash_init(mlxsw_sp);
+ if (err)
+ goto err_mp_hash_init;
+
+ err = mlxsw_sp_dscp_init(mlxsw_sp);
+ if (err)
+ goto err_dscp_init;
+
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ mlxsw_sp->router->nexthop_nb.notifier_call =
+ mlxsw_sp_nexthop_obj_event;
+ err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb,
+ extack);
+ if (err)
+ goto err_register_nexthop_notifier;
+
+ mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb,
+ mlxsw_sp_router_fib_dump_flush, extack);
+ if (err)
+ goto err_register_fib_notifier;
+
+ mlxsw_sp->router->netdevice_nb.notifier_call =
+ mlxsw_sp_router_netdevice_event;
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
+ if (err)
+ goto err_register_netdev_notifier;
+
+ return 0;
+
+err_register_netdev_notifier:
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
+err_register_fib_notifier:
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
+err_register_nexthop_notifier:
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+err_register_netevent_notifier:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ mlxsw_core_flush_owq();
+err_dscp_init:
+ mlxsw_sp_mp_hash_fini(mlxsw_sp);
+err_mp_hash_init:
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+err_neigh_init:
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
+err_lb_rif_init:
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+err_vrs_init:
+ mlxsw_sp_mr_fini(mlxsw_sp);
+err_mr_init:
+ mlxsw_sp_lpm_fini(mlxsw_sp);
+err_lpm_init:
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
+err_nexthop_group_ht_init:
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
+err_nexthop_ht_init:
+ mlxsw_sp_ipips_fini(mlxsw_sp);
+err_ipips_init:
+ mlxsw_sp_rifs_fini(mlxsw_sp);
+err_rifs_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
+err_router_ops_init:
+ mutex_destroy(&mlxsw_sp->router->lock);
+ kfree(mlxsw_sp->router);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mlxsw_core_flush_owq();
+ mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+ mlxsw_sp_mr_fini(mlxsw_sp);
+ mlxsw_sp_lpm_fini(mlxsw_sp);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
+ mlxsw_sp_ipips_fini(mlxsw_sp);
+ mlxsw_sp_rifs_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
+ mutex_destroy(&mlxsw_sp->router->lock);
+ kfree(mlxsw_sp->router);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
new file mode 100644
index 000000000..c5dfb972b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_ROUTER_H_
+#define _MLXSW_ROUTER_H_
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_router_nve_decap {
+ u32 ul_tb_id;
+ u32 tunnel_index;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr ul_sip;
+ u8 valid:1;
+};
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif **rifs;
+ struct idr rif_mac_profiles_idr;
+ atomic_t rif_mac_profiles_count;
+ atomic_t rifs_count;
+ u8 max_rif_mac_profile;
+ struct mlxsw_sp_vr *vrs;
+ struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
+ struct list_head nexthop_list;
+ struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ atomic_t neigh_count;
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_neighs_list;
+ struct list_head ipip_list;
+ struct notifier_block nexthop_nb;
+ struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inet6addr_nb;
+ struct notifier_block netdevice_nb;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+ const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
+ struct mlxsw_sp_router_nve_decap nve_decap_config;
+ struct mutex lock; /* Protects shared router resources */
+ struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
+ u16 lb_rif_index;
+ const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
+ size_t adj_grp_size_ranges_count;
+ struct delayed_work nh_grp_activity_dw;
+ struct list_head nh_res_grp_list;
+ bool inc_parsing_depth;
+ refcount_t num_groups;
+ u32 adj_trap_index;
+};
+
+struct mlxsw_sp_rif_ipip_lb;
+struct mlxsw_sp_rif_ipip_lb_config {
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+ u32 okey;
+ enum mlxsw_sp_l3proto ul_protocol; /* Underlay. */
+ union mlxsw_sp_l3addr saddr;
+};
+
+enum mlxsw_sp_rif_counter_dir {
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+};
+
+struct mlxsw_sp_neigh_entry;
+struct mlxsw_sp_nexthop;
+struct mlxsw_sp_ipip_entry;
+
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
+u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
+u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
+u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ u64 *cnt);
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir);
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir);
+struct mlxsw_sp_neigh_entry *
+mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
+ struct mlxsw_sp_neigh_entry *neigh_entry);
+int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry);
+unsigned char *
+mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry);
+u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry);
+struct in6_addr *
+mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry);
+
+#define mlxsw_sp_rif_neigh_for_each(neigh_entry, rif) \
+ for (neigh_entry = mlxsw_sp_rif_neigh_next(rif, NULL); neigh_entry; \
+ neigh_entry = mlxsw_sp_rif_neigh_next(rif, neigh_entry))
+int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ u64 *p_counter);
+void
+mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool adding);
+bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry);
+int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool recreate_loopback,
+ bool keep_encap,
+ bool update_nexthops,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+bool
+mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ const struct mlxsw_sp_ipip_entry *except);
+struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh);
+unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh);
+int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
+ u32 *p_adj_size, u32 *p_adj_hash_index);
+struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
+#define mlxsw_sp_nexthop_for_each(nh, router) \
+ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
+ nh = mlxsw_sp_nexthop_next(router, nh))
+int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh, u64 *p_counter);
+int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh, bool force,
+ char *ratr_pl);
+void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
+
+static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
+ const union mlxsw_sp_l3addr *addr2)
+{
+ return !memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
+
+#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
new file mode 100644
index 000000000..b3472fb94
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -0,0 +1,1741 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/if_bridge.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <net/arp.h>
+#include <net/gre.h>
+#include <net/lag.h>
+#include <net/ndisc.h>
+#include <net/ip6_tunnel.h>
+
+#include "spectrum.h"
+#include "spectrum_ipip.h"
+#include "spectrum_span.h"
+#include "spectrum_switchdev.h"
+
+struct mlxsw_sp_span {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr;
+ const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr;
+ size_t span_entry_ops_arr_size;
+ struct list_head analyzed_ports_list;
+ struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
+ struct list_head trigger_entries_list;
+ u16 policer_id_base;
+ refcount_t policer_id_base_ref_count;
+ atomic_t active_entries_count;
+ int entries_count;
+ struct mlxsw_sp_span_entry entries[];
+};
+
+struct mlxsw_sp_span_analyzed_port {
+ struct list_head list; /* Member of analyzed_ports_list */
+ refcount_t ref_count;
+ u16 local_port;
+ bool ingress;
+};
+
+struct mlxsw_sp_span_trigger_entry {
+ struct list_head list; /* Member of trigger_entries_list */
+ struct mlxsw_sp_span *span;
+ const struct mlxsw_sp_span_trigger_ops *ops;
+ refcount_t ref_count;
+ u16 local_port;
+ enum mlxsw_sp_span_trigger trigger;
+ struct mlxsw_sp_span_trigger_parms parms;
+};
+
+enum mlxsw_sp_span_trigger_type {
+ MLXSW_SP_SPAN_TRIGGER_TYPE_PORT,
+ MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL,
+};
+
+struct mlxsw_sp_span_trigger_ops {
+ int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
+ void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
+ bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
+ void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work);
+
+static u64 mlxsw_sp_span_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->span->active_entries_count);
+}
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_span *span;
+ int i, entries_count, err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
+ return -EIO;
+
+ entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
+ span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
+ if (!span)
+ return -ENOMEM;
+ refcount_set(&span->policer_id_base_ref_count, 0);
+ span->entries_count = entries_count;
+ atomic_set(&span->active_entries_count, 0);
+ mutex_init(&span->analyzed_ports_lock);
+ INIT_LIST_HEAD(&span->analyzed_ports_list);
+ INIT_LIST_HEAD(&span->trigger_entries_list);
+ span->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp->span = span;
+
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++)
+ mlxsw_sp->span->entries[i].id = i;
+
+ err = mlxsw_sp->span_ops->init(mlxsw_sp);
+ if (err)
+ goto err_init;
+
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
+ INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
+
+ return 0;
+
+err_init:
+ mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
+ kfree(mlxsw_sp->span);
+ return err;
+}
+
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ cancel_work_sync(&mlxsw_sp->span->work);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
+ mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
+ kfree(mlxsw_sp->span);
+}
+
+static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev)
+{
+ return !dev;
+}
+
+static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ return -EOPNOTSUPP;
+}
+
+static void
+mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+ .is_static = true,
+ .can_handle = mlxsw_sp1_span_cpu_can_handle,
+ .parms_set = mlxsw_sp1_span_entry_cpu_parms,
+ .configure = mlxsw_sp1_span_entry_cpu_configure,
+ .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure,
+};
+
+static int
+mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = netdev_priv(to_dev);
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u16 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+ mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id);
+ mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+ mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_reg_mpat_span_type span_type)
+{
+ struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u16 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
+ mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .is_static = true,
+ .can_handle = mlxsw_sp_port_dev_check,
+ .parms_set = mlxsw_sp_span_entry_phys_parms,
+ .configure = mlxsw_sp_span_entry_phys_configure,
+ .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
+};
+
+static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev,
+ unsigned char dmac[ETH_ALEN])
+{
+ struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
+ int err = 0;
+
+ if (!neigh) {
+ neigh = neigh_create(tbl, pkey, dev);
+ if (IS_ERR(neigh))
+ return PTR_ERR(neigh);
+ }
+
+ neigh_event_send(neigh, NULL);
+
+ read_lock_bh(&neigh->lock);
+ if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
+ memcpy(dmac, neigh->ha, ETH_ALEN);
+ else
+ err = -ENOENT;
+ read_unlock_bh(&neigh->lock);
+
+ neigh_release(neigh);
+ return err;
+}
+
+static int
+mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = NULL;
+ return 0;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
+ unsigned char *dmac,
+ u16 *p_vid)
+{
+ struct bridge_vlan_info vinfo;
+ struct net_device *edev;
+ u16 vid = *p_vid;
+
+ if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
+ return NULL;
+ if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) ||
+ !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return NULL;
+
+ edev = br_fdb_find_port(br_dev, dmac, vid);
+ if (!edev)
+ return NULL;
+
+ if (br_vlan_get_info(edev, vid, &vinfo))
+ return NULL;
+ if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ *p_vid = 0;
+ else
+ *p_vid = vid;
+ return edev;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
+ unsigned char *dmac)
+{
+ return br_fdb_find_port(br_dev, dmac, 0);
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
+ unsigned char dmac[ETH_ALEN],
+ u16 *p_vid)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ enum mlxsw_reg_spms_state spms_state;
+ struct net_device *dev = NULL;
+ struct mlxsw_sp_port *port;
+ u8 stp_state;
+
+ if (br_vlan_enabled(br_dev))
+ dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
+ else if (!*p_vid)
+ dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
+ if (!dev)
+ return NULL;
+
+ port = mlxsw_sp_port_dev_lower_find(dev);
+ if (!port)
+ return NULL;
+
+ bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
+ if (!bridge_port)
+ return NULL;
+
+ stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
+ spms_state = mlxsw_sp_stp_spms_state(stp_state);
+ if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
+ return NULL;
+
+ return dev;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
+ u16 *p_vid)
+{
+ *p_vid = vlan_dev_vlan_id(vlan_dev);
+ return vlan_dev_real_dev(vlan_dev);
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter)
+ if (netif_carrier_ok(dev) &&
+ net_lag_port_dev_txable(dev) &&
+ mlxsw_sp_port_dev_check(dev))
+ return dev;
+
+ return NULL;
+}
+
+static __maybe_unused int
+mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
+ union mlxsw_sp_l3addr saddr,
+ union mlxsw_sp_l3addr daddr,
+ union mlxsw_sp_l3addr gw,
+ __u8 ttl,
+ struct neigh_table *tbl,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ unsigned char dmac[ETH_ALEN];
+ u16 vid = 0;
+
+ if (mlxsw_sp_l3addr_is_zero(gw))
+ gw = daddr;
+
+ if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
+ goto unoffloadable;
+
+ if (is_vlan_dev(edev))
+ edev = mlxsw_sp_span_entry_vlan(edev, &vid);
+
+ if (netif_is_bridge_master(edev)) {
+ edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
+ if (!edev)
+ goto unoffloadable;
+ }
+
+ if (is_vlan_dev(edev)) {
+ if (vid || !(edev->flags & IFF_UP))
+ goto unoffloadable;
+ edev = mlxsw_sp_span_entry_vlan(edev, &vid);
+ }
+
+ if (netif_is_lag_master(edev)) {
+ if (!(edev->flags & IFF_UP))
+ goto unoffloadable;
+ edev = mlxsw_sp_span_entry_lag(edev);
+ if (!edev)
+ goto unoffloadable;
+ }
+
+ if (!mlxsw_sp_port_dev_check(edev))
+ goto unoffloadable;
+
+ sparmsp->dest_port = netdev_priv(edev);
+ sparmsp->ttl = ttl;
+ memcpy(sparmsp->dmac, dmac, ETH_ALEN);
+ memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
+ sparmsp->saddr = saddr;
+ sparmsp->daddr = daddr;
+ sparmsp->vid = vid;
+ return 0;
+
+unoffloadable:
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+}
+
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+static struct net_device *
+mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+ __be32 *saddrp, __be32 *daddrp)
+{
+ struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct net_device *dev = NULL;
+ struct ip_tunnel_parm parms;
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
+
+ /* We assume "dev" stays valid after rt is put. */
+ ASSERT_RTNL();
+
+ parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+ 0);
+
+ rt = ip_route_output_key(tun->net, &fl4);
+ if (IS_ERR(rt))
+ return NULL;
+
+ if (rt->rt_type != RTN_UNICAST)
+ goto out;
+
+ dev = rt->dst.dev;
+ *saddrp = fl4.saddr;
+ if (rt->rt_gw_family == AF_INET)
+ *daddrp = rt->rt_gw4;
+ /* can not offload if route has an IPv6 gateway */
+ else if (rt->rt_gw_family == AF_INET6)
+ dev = NULL;
+
+out:
+ ip_rt_put(rt);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
+ union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
+ bool inherit_tos = tparm.iph.tos & 0x1;
+ bool inherit_ttl = !tparm.iph.ttl;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.iph.ttl,
+ &arp_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u16 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+ mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, !!sparms.vid);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
+ sparms.ttl, sparms.smac,
+ be32_to_cpu(sparms.saddr.addr4),
+ be32_to_cpu(sparms.daddr.addr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
+ .can_handle = netif_is_gretap,
+ .parms_set = mlxsw_sp_span_entry_gretap4_parms,
+ .configure = mlxsw_sp_span_entry_gretap4_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+static struct net_device *
+mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
+ struct in6_addr *saddrp,
+ struct in6_addr *daddrp)
+{
+ struct ip6_tnl *t = netdev_priv(to_dev);
+ struct flowi6 fl6 = t->fl.u.ip6;
+ struct net_device *dev = NULL;
+ struct dst_entry *dst;
+ struct rt6_info *rt6;
+
+ /* We assume "dev" stays valid after dst is released. */
+ ASSERT_RTNL();
+
+ fl6.flowi6_mark = t->parms.fwmark;
+ if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
+ return NULL;
+
+ dst = ip6_route_output(t->net, NULL, &fl6);
+ if (!dst || dst->error)
+ goto out;
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+
+ dev = dst->dev;
+ *saddrp = fl6.saddr;
+ *daddrp = rt6->rt6i_gateway;
+
+out:
+ dst_release(dst);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
+ union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
+ bool inherit_ttl = !tparm.hop_limit;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.hop_limit,
+ &nd_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u16 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+ mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, !!sparms.vid);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
+ sparms.saddr.addr6,
+ sparms.daddr.addr6);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
+ .can_handle = netif_is_ip6gretap,
+ .parms_set = mlxsw_sp_span_entry_gretap6_parms,
+ .configure = mlxsw_sp_span_entry_gretap6_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
+};
+#endif
+
+static bool
+mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
+{
+ return is_vlan_dev(dev) &&
+ mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
+}
+
+static int
+mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct net_device *real_dev;
+ u16 vid;
+
+ if (!(to_dev->flags & IFF_UP))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
+ sparmsp->dest_port = netdev_priv(real_dev);
+ sparmsp->vid = vid;
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u16 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+ mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+ mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
+ .can_handle = mlxsw_sp_span_vlan_can_handle,
+ .parms_set = mlxsw_sp_span_entry_vlan_parms,
+ .configure = mlxsw_sp_span_entry_vlan_configure,
+ .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
+};
+
+static const
+struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = {
+ &mlxsw_sp1_span_entry_ops_cpu,
+ &mlxsw_sp_span_entry_ops_phys,
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+ &mlxsw_sp_span_entry_ops_gretap4,
+#endif
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+ &mlxsw_sp_span_entry_ops_gretap6,
+#endif
+ &mlxsw_sp_span_entry_ops_vlan,
+};
+
+static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev)
+{
+ return !dev;
+}
+
+static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
+ return 0;
+}
+
+static int
+mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ /* Mirroring to the CPU port is like mirroring to any other physical
+ * port. Its local port is used instead of that of the physical port.
+ */
+ return mlxsw_sp_span_entry_phys_configure(span_entry, sparms);
+}
+
+static void
+mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ enum mlxsw_reg_mpat_span_type span_type;
+
+ span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH;
+ mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+ .is_static = true,
+ .can_handle = mlxsw_sp2_span_cpu_can_handle,
+ .parms_set = mlxsw_sp2_span_entry_cpu_parms,
+ .configure = mlxsw_sp2_span_entry_cpu_configure,
+ .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure,
+};
+
+static const
+struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = {
+ &mlxsw_sp2_span_entry_ops_cpu,
+ &mlxsw_sp_span_entry_ops_phys,
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+ &mlxsw_sp_span_entry_ops_gretap4,
+#endif
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+ &mlxsw_sp_span_entry_ops_gretap6,
+#endif
+ &mlxsw_sp_span_entry_ops_vlan,
+};
+
+static int
+mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
+ .parms_set = mlxsw_sp_span_entry_nop_parms,
+ .configure = mlxsw_sp_span_entry_nop_configure,
+ .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
+};
+
+static void
+mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ int err;
+
+ if (!sparms.dest_port)
+ goto set_parms;
+
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "Cannot mirror to a port which belongs to a different mlxsw instance\n");
+ sparms.dest_port = NULL;
+ goto set_parms;
+ }
+
+ err = span_entry->ops->configure(span_entry, sparms);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n");
+ sparms.dest_port = NULL;
+ goto set_parms;
+ }
+
+set_parms:
+ span_entry->parms = sparms;
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ if (span_entry->parms.dest_port)
+ span_entry->ops->deconfigure(span_entry);
+}
+
+static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
+ u16 policer_id)
+{
+ struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
+ u16 policer_id_base;
+ int err;
+
+ /* Policers set on SPAN agents must be in the range of
+ * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the
+ * base is set and the new policer is not within the range, then we
+ * must error out.
+ */
+ if (refcount_read(&span->policer_id_base_ref_count)) {
+ if (policer_id < span->policer_id_base ||
+ policer_id >= span->policer_id_base + span->entries_count)
+ return -EINVAL;
+
+ refcount_inc(&span->policer_id_base_ref_count);
+ return 0;
+ }
+
+ /* Base must be even. */
+ policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1;
+ err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp,
+ policer_id_base);
+ if (err)
+ return err;
+
+ span->policer_id_base = policer_id_base;
+ refcount_set(&span->policer_id_base_ref_count, 1);
+
+ return 0;
+}
+
+static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
+{
+ if (refcount_dec_and_test(&span->policer_id_base_ref_count))
+ span->policer_id_base = 0;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry = NULL;
+ int i;
+
+ /* find a free entry to use */
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
+ span_entry = &mlxsw_sp->span->entries[i];
+ break;
+ }
+ }
+ if (!span_entry)
+ return NULL;
+
+ if (sparms.policer_enable) {
+ int err;
+
+ err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span,
+ sparms.policer_id);
+ if (err)
+ return NULL;
+ }
+
+ atomic_inc(&mlxsw_sp->span->active_entries_count);
+ span_entry->ops = ops;
+ refcount_set(&span_entry->ref_count, 1);
+ span_entry->to_dev = to_dev;
+ mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
+
+ return span_entry;
+}
+
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+ atomic_dec(&mlxsw_sp->span->active_entries_count);
+ if (span_entry->parms.policer_enable)
+ mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span);
+}
+
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+
+ if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
+ return curr;
+ }
+ return NULL;
+}
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+ span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+
+ if (refcount_read(&curr->ref_count) && curr->id == span_id)
+ return curr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_parms *sparms)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+
+ if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
+ curr->parms.policer_enable == sparms->policer_enable &&
+ curr->parms.policer_id == sparms->policer_id &&
+ curr->parms.session_id == sparms->session_id)
+ return curr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev,
+ &sparms);
+ if (span_entry) {
+ /* Already exists, just take a reference */
+ refcount_inc(&span_entry->ref_count);
+ return span_entry;
+ }
+
+ return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
+}
+
+static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ if (refcount_dec_and_test(&span_entry->ref_count))
+ mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
+ return 0;
+}
+
+static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
+{
+ struct mlxsw_sp_hdroom hdroom;
+
+ hdroom = *mlxsw_sp_port->hdroom;
+ hdroom.int_buf.enable = enable;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+}
+
+static int
+mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
+}
+
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+
+ list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
+ if (analyzed_port->local_port == local_port &&
+ analyzed_port->ingress == ingress)
+ return analyzed_port;
+ }
+
+ return NULL;
+}
+
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ struct mlxsw_sp_span *span = mlxsw_sp->span;
+ size_t i;
+
+ for (i = 0; i < span->span_entry_ops_arr_size; ++i)
+ if (span->span_entry_ops_arr[i]->can_handle(to_dev))
+ return span->span_entry_ops_arr[i];
+
+ return NULL;
+}
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
+{
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
+
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+ struct mlxsw_sp_span_parms sparms = {NULL};
+
+ if (!refcount_read(&curr->ref_count))
+ continue;
+
+ if (curr->ops->is_static)
+ continue;
+
+ err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+ rtnl_unlock();
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+}
+
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
+ const struct mlxsw_sp_span_agent_parms *parms)
+{
+ const struct net_device *to_dev = parms->to_dev;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_span_parms sparms;
+ int err;
+
+ ASSERT_RTNL();
+
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&sparms, 0, sizeof(sparms));
+ err = ops->parms_set(mlxsw_sp, to_dev, &sparms);
+ if (err)
+ return err;
+
+ sparms.policer_id = parms->policer_id;
+ sparms.policer_enable = parms->policer_enable;
+ sparms.session_id = parms->session_id;
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOBUFS;
+
+ *p_span_id = span_entry->id;
+
+ return 0;
+}
+
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ ASSERT_RTNL();
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
+ if (WARN_ON_ONCE(!span_entry))
+ return;
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ int err;
+
+ analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
+ if (!analyzed_port)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&analyzed_port->ref_count, 1);
+ analyzed_port->local_port = mlxsw_sp_port->local_port;
+ analyzed_port->ingress = ingress;
+ list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
+
+ /* An egress mirror buffer should be allocated on the egress port which
+ * does the mirroring.
+ */
+ if (!ingress) {
+ err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port);
+ if (err)
+ goto err_buffer_update;
+ }
+
+ return analyzed_port;
+
+err_buffer_update:
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_span_analyzed_port *
+ analyzed_port)
+{
+ /* Remove egress mirror buffer now that port is no longer analyzed
+ * at egress.
+ */
+ if (!analyzed_port->ingress)
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
+
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+}
+
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u16 local_port = mlxsw_sp_port->local_port;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (analyzed_port) {
+ refcount_inc(&analyzed_port->ref_count);
+ goto out_unlock;
+ }
+
+ analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
+ mlxsw_sp_port,
+ ingress);
+ if (IS_ERR(analyzed_port))
+ err = PTR_ERR(analyzed_port);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+ return err;
+}
+
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u16 local_port = mlxsw_sp_port->local_port;
+
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (WARN_ON_ONCE(!analyzed_port))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&analyzed_port->ref_count))
+ goto out_unlock;
+
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+}
+
+static int
+__mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry, bool enable)
+{
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ enum mlxsw_reg_mpar_i_e i_e;
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX)
+ return -EINVAL;
+
+ mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
+ trigger_entry->parms.span_id,
+ trigger_entry->parms.probability_rate);
+ return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span,
+ trigger_entry, true);
+}
+
+static void
+mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry,
+ false);
+}
+
+static bool
+mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return trigger_entry->trigger == trigger &&
+ trigger_entry->local_port == mlxsw_sp_port->local_port;
+}
+
+static int
+mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
+{
+ /* Port trigger are enabled during binding. */
+ return 0;
+}
+
+static void
+mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
+{
+}
+
+static const struct mlxsw_sp_span_trigger_ops
+mlxsw_sp_span_trigger_port_ops = {
+ .bind = mlxsw_sp_span_trigger_port_bind,
+ .unbind = mlxsw_sp_span_trigger_port_unbind,
+ .matches = mlxsw_sp_span_trigger_port_matches,
+ .enable = mlxsw_sp_span_trigger_port_enable,
+ .disable = mlxsw_sp_span_trigger_port_disable,
+};
+
+static int
+mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ return -EOPNOTSUPP;
+}
+
+static void
+mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+}
+
+static bool
+mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+static int
+mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 tc)
+{
+ return -EOPNOTSUPP;
+}
+
+static void
+mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 tc)
+{
+}
+
+static const struct mlxsw_sp_span_trigger_ops
+mlxsw_sp1_span_trigger_global_ops = {
+ .bind = mlxsw_sp1_span_trigger_global_bind,
+ .unbind = mlxsw_sp1_span_trigger_global_unbind,
+ .matches = mlxsw_sp1_span_trigger_global_matches,
+ .enable = mlxsw_sp1_span_trigger_global_enable,
+ .disable = mlxsw_sp1_span_trigger_global_disable,
+};
+
+static const struct mlxsw_sp_span_trigger_ops *
+mlxsw_sp1_span_trigger_ops_arr[] = {
+ [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
+ [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
+ &mlxsw_sp1_span_trigger_global_ops,
+};
+
+static int
+mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
+ enum mlxsw_reg_mpagr_trigger trigger;
+ char mpagr_pl[MLXSW_REG_MPAGR_LEN];
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX)
+ return -EINVAL;
+
+ mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
+ trigger_entry->parms.probability_rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
+}
+
+static void
+mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ /* There is no unbinding for global triggers. The trigger should be
+ * disabled on all ports by now.
+ */
+}
+
+static bool
+mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return trigger_entry->trigger == trigger;
+}
+
+static int
+__mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 tc, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
+ char momte_pl[MLXSW_REG_MOMTE_LEN];
+ enum mlxsw_reg_momte_type type;
+ int err;
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ type = MLXSW_REG_MOMTE_TYPE_WRED;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ type = MLXSW_REG_MOMTE_TYPE_ECN;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ /* Query existing configuration in order to only change the state of
+ * the specified traffic class.
+ */
+ mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
+}
+
+static int
+mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 tc)
+{
+ return __mlxsw_sp2_span_trigger_global_enable(trigger_entry,
+ mlxsw_sp_port, tc, true);
+}
+
+static void
+mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
+ trigger_entry,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 tc)
+{
+ __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc,
+ false);
+}
+
+static const struct mlxsw_sp_span_trigger_ops
+mlxsw_sp2_span_trigger_global_ops = {
+ .bind = mlxsw_sp2_span_trigger_global_bind,
+ .unbind = mlxsw_sp2_span_trigger_global_unbind,
+ .matches = mlxsw_sp2_span_trigger_global_matches,
+ .enable = mlxsw_sp2_span_trigger_global_enable,
+ .disable = mlxsw_sp2_span_trigger_global_disable,
+};
+
+static const struct mlxsw_sp_span_trigger_ops *
+mlxsw_sp2_span_trigger_ops_arr[] = {
+ [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
+ [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
+ &mlxsw_sp2_span_trigger_global_ops,
+};
+
+static void
+mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
+{
+ struct mlxsw_sp_span *span = trigger_entry->span;
+ enum mlxsw_sp_span_trigger_type type;
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ trigger_entry->ops = span->span_trigger_ops_arr[type];
+}
+
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms
+ *parms)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err;
+
+ trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
+ if (!trigger_entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&trigger_entry->ref_count, 1);
+ trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port :
+ 0;
+ trigger_entry->trigger = trigger;
+ memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
+ trigger_entry->span = span;
+ mlxsw_sp_span_trigger_ops_set(trigger_entry);
+ list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
+
+ err = trigger_entry->ops->bind(trigger_entry);
+ if (err)
+ goto err_trigger_entry_bind;
+
+ return trigger_entry;
+
+err_trigger_entry_bind:
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ trigger_entry->ops->unbind(trigger_entry);
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+}
+
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
+ if (trigger_entry->ops->matches(trigger_entry, trigger,
+ mlxsw_sp_port))
+ return trigger_entry;
+ }
+
+ return NULL;
+}
+
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
+ return -EINVAL;
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (trigger_entry) {
+ if (trigger_entry->parms.span_id != parms->span_id ||
+ trigger_entry->parms.probability_rate !=
+ parms->probability_rate)
+ return -EINVAL;
+ refcount_inc(&trigger_entry->ref_count);
+ goto out;
+ }
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port,
+ parms);
+ if (IS_ERR(trigger_entry))
+ err = PTR_ERR(trigger_entry);
+
+out:
+ return err;
+}
+
+void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ ASSERT_RTNL();
+
+ if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
+ parms->span_id)))
+ return;
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (WARN_ON_ONCE(!trigger_entry))
+ return;
+
+ if (!refcount_dec_and_test(&trigger_entry->ref_count))
+ return;
+
+ mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
+}
+
+int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_sp_span_trigger trigger, u8 tc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ ASSERT_RTNL();
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (WARN_ON_ONCE(!trigger_entry))
+ return -EINVAL;
+
+ return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc);
+}
+
+void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_sp_span_trigger trigger, u8 tc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ ASSERT_RTNL();
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (WARN_ON_ONCE(!trigger_entry))
+ return;
+
+ return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
+}
+
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger)
+{
+ switch (trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ return true;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ return false;
+ }
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
+
+ /* Must be first to avoid NULL pointer dereference by subsequent
+ * can_handle() callbacks.
+ */
+ if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] !=
+ &mlxsw_sp1_span_entry_ops_cpu))
+ return -EINVAL;
+
+ mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr;
+ mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr;
+ mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
+
+ return 0;
+}
+
+static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
+ u16 policer_id_base)
+{
+ return -EOPNOTSUPP;
+}
+
+const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
+ .init = mlxsw_sp1_span_init,
+ .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
+};
+
+static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr);
+
+ /* Must be first to avoid NULL pointer dereference by subsequent
+ * can_handle() callbacks.
+ */
+ if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] !=
+ &mlxsw_sp2_span_entry_ops_cpu))
+ return -EINVAL;
+
+ mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr;
+ mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr;
+ mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
+
+ return 0;
+}
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
+ u16 policer_id_base)
+{
+ char mogcr_pl[MLXSW_REG_MOGCR_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+}
+
+const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
+ .init = mlxsw_sp2_span_init,
+ .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
+};
+
+const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
+ .init = mlxsw_sp2_span_init,
+ .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
new file mode 100644
index 000000000..82e711afb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_SPAN_H
+#define _MLXSW_SPECTRUM_SPAN_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/refcount.h>
+
+#include "spectrum_router.h"
+
+struct mlxsw_sp;
+struct mlxsw_sp_port;
+
+/* SPAN session identifiers that correspond to MLXSW_TRAP_ID_MIRROR_SESSION<i>
+ * trap identifiers. The session identifier is an attribute of the SPAN agent,
+ * which determines the trap identifier of packets that are mirrored to the
+ * CPU. Packets that are trapped to the CPU for the same logical reason (e.g.,
+ * buffer drops) should use the same session identifier.
+ */
+enum mlxsw_sp_span_session_id {
+ MLXSW_SP_SPAN_SESSION_ID_BUFFER,
+ MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
+
+ __MLXSW_SP_SPAN_SESSION_ID_MAX = 8,
+};
+
+struct mlxsw_sp_span_parms {
+ struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
+ unsigned int ttl;
+ unsigned char dmac[ETH_ALEN];
+ unsigned char smac[ETH_ALEN];
+ union mlxsw_sp_l3addr daddr;
+ union mlxsw_sp_l3addr saddr;
+ u16 vid;
+ u16 policer_id;
+ bool policer_enable;
+ enum mlxsw_sp_span_session_id session_id;
+};
+
+enum mlxsw_sp_span_trigger {
+ MLXSW_SP_SPAN_TRIGGER_INGRESS,
+ MLXSW_SP_SPAN_TRIGGER_EGRESS,
+ MLXSW_SP_SPAN_TRIGGER_TAIL_DROP,
+ MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+ MLXSW_SP_SPAN_TRIGGER_ECN,
+};
+
+struct mlxsw_sp_span_trigger_parms {
+ int span_id;
+ u32 probability_rate;
+};
+
+struct mlxsw_sp_span_agent_parms {
+ const struct net_device *to_dev;
+ u16 policer_id;
+ bool policer_enable;
+ enum mlxsw_sp_span_session_id session_id;
+};
+
+struct mlxsw_sp_span_entry_ops;
+
+struct mlxsw_sp_span_ops {
+ int (*init)(struct mlxsw_sp *mlxsw_sp);
+ int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp,
+ u16 policer_id_base);
+};
+
+struct mlxsw_sp_span_entry {
+ const struct net_device *to_dev;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms parms;
+ refcount_t ref_count;
+ int id;
+};
+
+struct mlxsw_sp_span_entry_ops {
+ bool is_static;
+ bool (*can_handle)(const struct net_device *to_dev);
+ int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
+ int (*configure)(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms);
+ void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
+};
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
+
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev);
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry);
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+void mlxsw_sp_span_speed_update_work(struct work_struct *work);
+
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
+ const struct mlxsw_sp_span_agent_parms *parms);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+void
+mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_sp_span_trigger trigger, u8 tc);
+void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_sp_span_trigger trigger, u8 tc);
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger);
+
+extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
+extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
+extern const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
new file mode 100644
index 000000000..1290b2d3e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -0,0 +1,4015 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
+#include <net/switchdev.h>
+#include <net/vxlan.h>
+
+#include "spectrum_span.h"
+#include "spectrum_switchdev.h"
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+struct mlxsw_sp_bridge_ops;
+
+struct mlxsw_sp_bridge {
+ struct mlxsw_sp *mlxsw_sp;
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ bool vlan_enabled_exists;
+ struct list_head bridges_list;
+ DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
+ const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
+};
+
+struct mlxsw_sp_bridge_device {
+ struct net_device *dev;
+ struct list_head list;
+ struct list_head ports_list;
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
+ u8 vlan_enabled:1,
+ multicast_enabled:1,
+ mrouter:1;
+ const struct mlxsw_sp_bridge_ops *ops;
+};
+
+struct mlxsw_sp_bridge_port {
+ struct net_device *dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct list_head list;
+ struct list_head vlans_list;
+ unsigned int ref_count;
+ u8 stp_state;
+ unsigned long flags;
+ bool mrouter;
+ bool lagged;
+ union {
+ u16 lag_id;
+ u16 system_port;
+ };
+};
+
+struct mlxsw_sp_bridge_vlan {
+ struct list_head list;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct mlxsw_sp_bridge_ops {
+ int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
+ void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack);
+ struct mlxsw_sp_fid *
+ (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid, struct netlink_ext_ack *extack);
+ struct mlxsw_sp_fid *
+ (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid);
+ u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid);
+};
+
+struct mlxsw_sp_switchdev_ops {
+ void (*init)(struct mlxsw_sp *mlxsw_sp);
+};
+
+struct mlxsw_sp_mdb_entry_key {
+ unsigned char addr[ETH_ALEN];
+ u16 fid;
+};
+
+struct mlxsw_sp_mdb_entry {
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mdb_entry_key key;
+ u16 mid;
+ struct list_head ports_list;
+ u16 ports_count;
+};
+
+struct mlxsw_sp_mdb_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+ refcount_t refcount;
+ bool mrouter;
+};
+
+static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
+};
+
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
+
+static void
+mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
+
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device
+ *bridge_device, bool mc_enabled);
+
+static void
+mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool add);
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ list_for_each_entry(bridge_device, &bridge->bridges_list, list)
+ if (bridge_device->dev == br_dev)
+ return bridge_device;
+
+ return NULL;
+}
+
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
+static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv->data;
+
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+ return 0;
+}
+
+static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct netdev_nested_priv priv = {
+ .data = (void *)mlxsw_sp,
+ };
+
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+ netdev_walk_all_upper_dev_rcu(dev,
+ mlxsw_sp_bridge_device_upper_rif_destroy,
+ &priv);
+}
+
+static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev, *stop_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
+ br_dev, dev, 0,
+ extack);
+ if (err) {
+ stop_dev = dev;
+ goto err_vxlan_join;
+ }
+ }
+ }
+
+ return 0;
+
+err_vxlan_join:
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ if (stop_dev == dev)
+ break;
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+ }
+ return err;
+}
+
+static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev))
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+}
+
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
+ bool no_delay)
+{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
+
+ mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
+ msecs_to_jiffies(interval));
+}
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = bridge->mlxsw_sp->bus_info->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ bool vlan_enabled = br_vlan_enabled(br_dev);
+ int err;
+
+ if (vlan_enabled && bridge->vlan_enabled_exists) {
+ dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
+ if (!bridge_device)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_mdb_rhashtable_init;
+
+ bridge_device->dev = br_dev;
+ bridge_device->vlan_enabled = vlan_enabled;
+ bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ bridge_device->mrouter = br_multicast_router(br_dev);
+ INIT_LIST_HEAD(&bridge_device->ports_list);
+ if (vlan_enabled) {
+ u16 proto;
+
+ bridge->vlan_enabled_exists = true;
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD)
+ bridge_device->ops = bridge->bridge_8021ad_ops;
+ else
+ bridge_device->ops = bridge->bridge_8021q_ops;
+ } else {
+ bridge_device->ops = bridge->bridge_8021d_ops;
+ }
+ INIT_LIST_HEAD(&bridge_device->mdb_list);
+
+ if (list_empty(&bridge->bridges_list))
+ mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
+ list_add(&bridge_device->list, &bridge->bridges_list);
+
+ /* It is possible we already have VXLAN devices enslaved to the bridge.
+ * In which case, we need to replay their configuration as if they were
+ * just now enslaved to the bridge.
+ */
+ err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
+ if (err)
+ goto err_vxlan_init;
+
+ return bridge_device;
+
+err_vxlan_init:
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ rhashtable_destroy(&bridge_device->mdb_ht);
+err_mdb_rhashtable_init:
+ kfree(bridge_device);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
+ mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
+ bridge_device->dev);
+ list_del(&bridge_device->list);
+ if (list_empty(&bridge->bridges_list))
+ cancel_delayed_work(&bridge->fdb_notify.dw);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ WARN_ON(!list_empty(&bridge_device->ports_list));
+ WARN_ON(!list_empty(&bridge_device->mdb_list));
+ rhashtable_destroy(&bridge_device->mdb_ht);
+ kfree(bridge_device);
+}
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (bridge_device)
+ return bridge_device;
+
+ return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
+}
+
+static void
+mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ if (list_empty(&bridge_device->ports_list))
+ mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
+}
+
+static struct mlxsw_sp_bridge_port *
+__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->dev == brport_dev)
+ return bridge_port;
+ }
+
+ return NULL;
+}
+
+struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (!bridge_device)
+ return NULL;
+
+ return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
+ if (!bridge_port)
+ return ERR_PTR(-ENOMEM);
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
+ bridge_port->lagged = mlxsw_sp_port->lagged;
+ if (bridge_port->lagged)
+ bridge_port->lag_id = mlxsw_sp_port->lag_id;
+ else
+ bridge_port->system_port = mlxsw_sp_port->local_port;
+ bridge_port->dev = brport_dev;
+ bridge_port->bridge_device = bridge_device;
+ bridge_port->stp_state = BR_STATE_DISABLED;
+ bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
+ INIT_LIST_HEAD(&bridge_port->vlans_list);
+ list_add(&bridge_port->list, &bridge_device->ports_list);
+ bridge_port->ref_count = 1;
+
+ err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
+ return bridge_port;
+
+err_switchdev_offload:
+ list_del(&bridge_port->list);
+ kfree(bridge_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
+ list_del(&bridge_port->list);
+ WARN_ON(!list_empty(&bridge_port->vlans_list));
+ kfree(bridge_port);
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
+ if (bridge_port) {
+ bridge_port->ref_count++;
+ return bridge_port;
+ }
+
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
+ if (IS_ERR(bridge_device))
+ return ERR_CAST(bridge_device);
+
+ bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
+ extack);
+ if (IS_ERR(bridge_port)) {
+ err = PTR_ERR(bridge_port);
+ goto err_bridge_port_create;
+ }
+
+ return bridge_port;
+
+err_bridge_port_create:
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_port *bridge_port)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (--bridge_port->ref_count != 0)
+ return;
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_bridge_port_destroy(bridge_port);
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
+}
+
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_bridge_device *
+ bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (!mlxsw_sp_port_vlan->bridge_port)
+ continue;
+ if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
+ bridge_device)
+ continue;
+ if (bridge_device->vlan_enabled &&
+ mlxsw_sp_port_vlan->vid != vid)
+ continue;
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_port_vlan*
+mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_index)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (fid && mlxsw_sp_fid_index(fid) == fid_index)
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ if (bridge_vlan->vid == vid)
+ return bridge_vlan;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+
+ bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
+ if (!bridge_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
+ bridge_vlan->vid = vid;
+ list_add(&bridge_vlan->list, &bridge_port->vlans_list);
+
+ return bridge_vlan;
+}
+
+static void
+mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
+{
+ list_del(&bridge_vlan->list);
+ WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
+ kfree(bridge_vlan);
+}
+
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ if (bridge_vlan)
+ return bridge_vlan;
+
+ return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
+}
+
+static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
+{
+ if (list_empty(&bridge_vlan->port_vlan_list))
+ mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
+}
+
+static int
+mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ u8 state)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
+ bridge_vlan->vid, state);
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ u8 state)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
+ bridge_vlan, state);
+ if (err)
+ goto err_port_bridge_vlan_stp_set;
+ }
+
+ bridge_port->stp_state = state;
+
+ return 0;
+
+err_port_bridge_vlan_stp_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
+ bridge_port->stp_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type,
+ mlxsw_sp_port->local_port,
+ member);
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
+ bridge_vlan,
+ packet_type,
+ member);
+ if (err)
+ goto err_port_bridge_vlan_flood_set;
+ }
+
+ return 0;
+
+err_port_bridge_vlan_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
+ packet_type, !member);
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type, local_port, member);
+ if (err)
+ goto err_fid_flood_set;
+ }
+
+ return 0;
+
+err_fid_flood_set:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &bridge_vlan->port_vlan_list,
+ list) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
+ local_port, !member);
+ }
+
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ member);
+ if (err)
+ goto err_bridge_vlans_flood_set;
+ }
+
+ return 0;
+
+err_bridge_vlans_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ !member);
+ return err;
+}
+
+static int
+mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ bool set)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = bridge_vlan->vid;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool set)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, set);
+ if (err)
+ goto err_port_bridge_vlan_learning_set;
+ }
+
+ return 0;
+
+err_port_bridge_vlan_learning_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, !set);
+ return err;
+}
+
+static int
+mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags flags)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ if (flags.mask & BR_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
+
+ if (flags.mask & BR_LEARNING) {
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
+ bridge_port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
+
+ if (bridge_port->bridge_device->multicast_enabled)
+ goto out;
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ flags.val & BR_MCAST_FLOOD);
+ if (err)
+ return err;
+ }
+
+out:
+ memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
+ return 0;
+}
+
+static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
+{
+ char sfdat_pl[MLXSW_REG_SFDAT_LEN];
+ int err;
+
+ mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
+ if (err)
+ return err;
+ mlxsw_sp->bridge->ageing_time = ageing_time;
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned long ageing_clock_t)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
+
+ return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+}
+
+static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ bool vlan_enabled)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ if (bridge_device->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ u16 vlan_proto)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
+
+ if (!bridge_port->bridge_device->multicast_enabled)
+ goto out;
+
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mrouter);
+ if (err)
+ return err;
+
+out:
+ bridge_port->mrouter = is_port_mrouter;
+ return 0;
+}
+
+static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
+{
+ const struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = bridge_port->bridge_device;
+ return bridge_device->multicast_enabled ? bridge_port->mrouter :
+ bridge_port->flags & BR_MCAST_FLOOD;
+}
+
+static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ if (bridge_device->multicast_enabled == !mc_disabled)
+ return 0;
+
+ bridge_device->multicast_enabled = !mc_disabled;
+ err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ !mc_disabled);
+ if (err)
+ goto err_mc_enable_sync;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
+ packet_type,
+ member);
+ if (err)
+ goto err_flood_table_set;
+ }
+
+ return 0;
+
+err_flood_table_set:
+ list_for_each_entry_continue_reverse(bridge_port,
+ &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
+ !member);
+ }
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ mc_disabled);
+err_mc_enable_sync:
+ bridge_device->multicast_enabled = mc_disabled;
+ return err;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
+ if (mdb_entry_port->local_port == local_port)
+ return mdb_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count++;
+
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+ mdb_entry->ports_count++;
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port, bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count--;
+ return;
+ }
+
+ mdb_entry->ports_count--;
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static __always_unused struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (!mdb_entry_port->mrouter)
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ mdb_entry_port->mrouter = true;
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static __always_unused void
+mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!mdb_entry_port->mrouter)
+ return;
+
+ mdb_entry_port->mrouter = false;
+ if (!refcount_dec_and_test(&mdb_entry_port->refcount))
+ return;
+
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static void
+mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool add)
+{
+ u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
+}
+
+static int
+mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *orig_dev,
+ bool is_mrouter)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ if (bridge_device->mrouter != is_mrouter)
+ mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
+ is_mrouter);
+ bridge_device->mrouter = is_mrouter;
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
+ attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.vlan_filtering);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.vlan_protocol);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.mc_disabled);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
+ return err;
+}
+
+static int
+mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ bridge_device = bridge_port->bridge_device;
+ fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
+ bridge_port->flags & BR_FLOOD);
+ if (err)
+ goto err_fid_uc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
+ mlxsw_sp_mc_flood(bridge_port));
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
+ true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
+
+ mlxsw_sp_port_vlan->fid = fid;
+
+ return 0;
+
+err_fid_port_vid_map:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+err_fid_mc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+err_fid_uc_flood_set:
+ mlxsw_sp_fid_put(fid);
+ return err;
+}
+
+static void
+mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+ mlxsw_sp_fid_put(fid);
+}
+
+static u16
+mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool is_pvid)
+{
+ if (is_pvid)
+ return vid;
+ else if (mlxsw_sp_port->pvid == vid)
+ return 0; /* Dis-allow untagged packets */
+ else
+ return mlxsw_sp_port->pvid;
+}
+
+static int
+mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ int err;
+
+ /* No need to continue if only VLAN flags were changed */
+ if (mlxsw_sp_port_vlan->bridge_port)
+ return 0;
+
+ err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ bridge_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ bridge_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
+ if (!bridge_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+
+ list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
+ &bridge_vlan->port_vlan_list);
+
+ mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
+ bridge_port->dev, extack);
+ mlxsw_sp_port_vlan->bridge_port = bridge_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
+ return err;
+}
+
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ bool last_port;
+
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
+ mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
+ return;
+
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ last_port = list_is_singular(&bridge_vlan->port_vlan_list);
+
+ list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
+ mlxsw_sp_bridge_vlan_put(bridge_vlan);
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (last_port)
+ mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
+ bridge_port,
+ mlxsw_sp_fid_index(fid));
+
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
+ mlxsw_sp_fid_index(fid));
+
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
+
+ mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
+ mlxsw_sp_port_vlan->bridge_port = NULL;
+}
+
+static int
+mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 old_pvid = mlxsw_sp_port->pvid;
+ u16 proto;
+ int err;
+
+ /* The only valid scenario in which a port-vlan already exists, is if
+ * the VLAN flags were changed and the port-vlan is associated with the
+ * correct bridge port
+ */
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan &&
+ mlxsw_sp_port_vlan->bridge_port != bridge_port)
+ return -EEXIST;
+
+ if (!mlxsw_sp_port_vlan) {
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
+ vid);
+ if (IS_ERR(mlxsw_sp_port_vlan))
+ return PTR_ERR(mlxsw_sp_port_vlan);
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
+ is_untagged);
+ if (err)
+ goto err_port_vlan_set;
+
+ br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ return 0;
+
+err_port_vlan_bridge_join:
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
+err_port_pvid_set:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_vlan_set:
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+ return err;
+}
+
+static int
+mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ u16 pvid;
+
+ pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
+ if (!pvid)
+ return 0;
+
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vlan->vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vlan->vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ if (netif_is_bridge_master(orig_dev)) {
+ int err = 0;
+
+ if (br_vlan_enabled(orig_dev))
+ err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
+ orig_dev, vlan);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
+
+ return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
+}
+
+static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
+{
+ return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
+}
+
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
+{
+ bool lagged = bridge_port->lagged;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u16 system_port;
+
+ system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
+ mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
+{
+ return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
+}
+
+static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
+{
+ return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
+ MLXSW_REG_SFD_OP_WRITE_REMOVE;
+}
+
+static int
+mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
+ const char *mac, u16 fid, __be32 addr, bool adding)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ u32 uip;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ uip = be32_to_cpu(addr);
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
+ mlxsw_sp_sfd_rec_policy(dynamic), mac,
+ fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ u32 kvdl_index, bool adding)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
+ MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
+ kvdl_index, true);
+ if (err)
+ goto err_sfd_write;
+
+ err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
+ if (err)
+ /* Replace can fail only for creating new mapping, so removing
+ * the FDB entry in the error path is OK.
+ */
+ goto err_addr_replace;
+
+ return 0;
+
+err_addr_replace:
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
+ false);
+err_sfd_write:
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+ return err;
+}
+
+static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+}
+
+static int
+mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid, const struct in6_addr *addr, bool adding)
+{
+ if (adding)
+ return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
+ addr);
+
+ mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
+ return 0;
+}
+
+static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ bool adding, bool dynamic)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
+ addr->addr4, adding);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
+ &addr->addr6, adding);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ const char *mac, u16 fid, u16 vid,
+ bool adding,
+ enum mlxsw_reg_sfd_rec_action action,
+ enum mlxsw_reg_sfd_rec_policy policy)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
+ local_port);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ const char *mac, u16 fid, u16 vid,
+ bool adding, bool dynamic)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
+ adding, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_sfd_rec_policy(dynamic));
+}
+
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
+}
+
+static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
+ const char *mac, u16 fid, u16 lag_vid,
+ bool adding, bool dynamic)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
+ mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ lag_vid, lag_id);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int
+mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_notifier_fdb_info *fdb_info, bool adding)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = fdb_info->info.dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 fid_index, vid;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_port)
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ fdb_info->vid);
+ if (!mlxsw_sp_port_vlan)
+ return 0;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+ vid = mlxsw_sp_port_vlan->vid;
+
+ if (!bridge_port->lagged)
+ return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
+ bridge_port->system_port,
+ fdb_info->addr, fid_index, vid,
+ adding, false);
+ else
+ return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
+ bridge_port->lag_id,
+ fdb_info->addr, fid_index,
+ vid, adding, false);
+}
+
+static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mdb_entry *mdb_entry,
+ bool adding)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
+ mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mdb_entry->mid);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u64 max_lag_members, i;
+ int lag_id;
+
+ if (!bridge_port->lagged) {
+ set_bit(bridge_port->system_port, ports_bm->bitmap);
+ } else {
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ lag_id = bridge_port->lag_id;
+ for (i = 0; i < max_lag_members; i++) {
+ mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
+ lag_id, i);
+ if (mlxsw_sp_port)
+ set_bit(mlxsw_sp_port->local_port,
+ ports_bm->bitmap);
+ }
+ }
+}
+
+static void
+mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->mrouter) {
+ mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
+ bridge_port,
+ flood_bm);
+ }
+ }
+}
+
+static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ unsigned int nbits = ports_bm->nbits;
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, nbits) {
+ mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
+ mdb_entry,
+ i);
+ if (IS_ERR(mdb_entry_port)) {
+ nbits = i;
+ goto err_mrouter_port_get;
+ }
+ }
+
+ return 0;
+
+err_mrouter_port_get:
+ for_each_set_bit(i, ports_bm->bitmap, nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+ return PTR_ERR(mdb_entry_port);
+}
+
+static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+}
+
+static int
+mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
+{
+ struct mlxsw_sp_ports_bitmap ports_bm;
+ int err;
+
+ err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
+ if (err)
+ return err;
+
+ mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
+
+ if (add)
+ err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
+ mdb_entry);
+ else
+ mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
+
+ mlxsw_sp_port_bitmap_fini(&ports_bm);
+ return err;
+}
+
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(mdb_entry->key.addr, addr);
+ mdb_entry->key.fid = fid;
+ err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
+ if (err)
+ goto err_pgt_mid_alloc;
+
+ INIT_LIST_HEAD(&mdb_entry->ports_list);
+
+ err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
+ true);
+ if (err)
+ goto err_mdb_mrouters_set;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port)) {
+ err = PTR_ERR(mdb_entry_port);
+ goto err_mdb_entry_port_get;
+ }
+
+ if (bridge_device->multicast_enabled) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+
+ err = rhashtable_insert_fast(&bridge_device->mdb_ht,
+ &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
+
+ return mdb_entry;
+
+err_rhashtable_insert:
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+err_mdb_entry_write:
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
+err_mdb_entry_port_get:
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+err_mdb_mrouters_set:
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+err_pgt_mid_alloc:
+ kfree(mdb_entry);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ u16 local_port, bool force)
+{
+ list_del(&mdb_entry->list);
+ rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+ WARN_ON(!list_empty(&mdb_entry->ports_list));
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+ kfree(mdb_entry);
+}
+
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+
+ ether_addr_copy(key.addr, addr);
+ key.fid = fid;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (mdb_entry) {
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
+ mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port))
+ return ERR_CAST(mdb_entry_port);
+
+ return mdb_entry;
+ }
+
+ return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
+ local_port);
+}
+
+static bool
+mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_mdb_entry_port *removed_entry_port,
+ bool force)
+{
+ if (mdb_entry->ports_count > 1)
+ return false;
+
+ if (force)
+ return true;
+
+ if (!removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 1)
+ return false;
+
+ if (removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 2)
+ return false;
+
+ return true;
+}
+
+static void
+mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
+ bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ /* Avoid a temporary situation in which the MDB entry points to an empty
+ * PGT entry, as otherwise packets will be temporarily dropped instead
+ * of being flooded. Instead, in this situation, call
+ * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
+ * then releases the PGT entry.
+ */
+ if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
+ mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
+ local_port, force);
+ else
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
+ force);
+}
+
+static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ u16 fid_index;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (!mlxsw_sp_port_vlan)
+ return 0;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
+ mdb->addr, fid_index,
+ mlxsw_sp_port->local_port);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool mc_enabled)
+{
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+ return 0;
+
+err_mdb_entry_write:
+ list_for_each_entry_continue_reverse(mdb_entry,
+ &bridge_device->mdb_list, list)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
+ return err;
+}
+
+static void
+mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool add)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+
+ bridge_device = bridge_port->bridge_device;
+
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
+}
+
+static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
+
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 proto;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
+
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+}
+
+static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ if (netif_is_bridge_master(orig_dev))
+ return -EOPNOTSUPP;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
+
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
+
+ return 0;
+}
+
+static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ u16 fid_index;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (!mlxsw_sp_port_vlan)
+ return 0;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ ether_addr_copy(key.addr, mdb->addr);
+ key.fid = fid_index;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (!mdb_entry) {
+ netdev_err(dev, "Unable to remove port from MC DB\n");
+ return -EINVAL;
+ }
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ mlxsw_sp_port->local_port, false);
+ return 0;
+}
+
+static void
+mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
+ u16 local_port = mlxsw_sp_port->local_port;
+
+ bridge_device = bridge_port->bridge_device;
+
+ list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
+ list) {
+ if (mdb_entry->key.fid != fid_index)
+ continue;
+
+ if (bridge_port->mrouter)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
+ mdb_entry,
+ local_port);
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ local_port, true);
+ }
+}
+
+static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
+ return err;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
+ u16 lag_id)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u64 max_lag_members;
+ int i;
+
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ for (i = 0; i < max_lag_members; i++) {
+ mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
+ if (mlxsw_sp_port)
+ return mlxsw_sp_port;
+ }
+ return NULL;
+}
+
+static int
+mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ if (is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
+ return -EINVAL;
+ }
+
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port->default_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
+ extack);
+}
+
+static void
+mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
+}
+
+static void
+mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
+}
+
+static int
+mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ u16 vid, u16 ethertype,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_nve_params params = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .vni = vxlan->cfg.vni,
+ .dev = vxlan_dev,
+ .ethertype = ethertype,
+ };
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ /* If the VLAN is 0, we need to find the VLAN that is configured as
+ * PVID and egress untagged on the bridge port of the VxLAN device.
+ * It is possible no such VLAN exists
+ */
+ if (!vid) {
+ err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
+ if (err || !vid)
+ return err;
+ }
+
+ fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
+ return PTR_ERR(fid);
+ }
+
+ if (mlxsw_sp_fid_vni_is_set(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
+ err = -EINVAL;
+ goto err_vni_exists;
+ }
+
+ err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
+ if (err)
+ goto err_nve_fid_enable;
+
+ return 0;
+
+err_nve_fid_enable:
+err_vni_exists:
+ mlxsw_sp_fid_put(fid);
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
+ vid, ETH_P_8021Q, extack);
+}
+
+static struct net_device *
+mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ u16 pvid;
+ int err;
+
+ if (!netif_is_vxlan(dev))
+ continue;
+
+ err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
+ if (err || pvid != vid)
+ continue;
+
+ return dev;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid, struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
+}
+
+static u16
+mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_vid(fid);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
+ .port_join = mlxsw_sp_bridge_8021q_port_join,
+ .port_leave = mlxsw_sp_bridge_8021q_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
+static bool
+mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->bridge_port &&
+ mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
+ br_dev)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct net_device *dev = bridge_port->dev;
+ u16 vid;
+
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
+ return -EINVAL;
+ }
+
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
+}
+
+static void
+mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct net_device *dev = bridge_port->dev;
+ u16 vid;
+
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
+ return;
+
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+}
+
+static int
+mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_nve_params params = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .vni = vxlan->cfg.vni,
+ .dev = vxlan_dev,
+ .ethertype = ETH_P_8021Q,
+ };
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
+ return -EINVAL;
+ }
+
+ if (mlxsw_sp_fid_vni_is_set(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
+ err = -EINVAL;
+ goto err_vni_exists;
+ }
+
+ err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
+ if (err)
+ goto err_nve_fid_enable;
+
+ return 0;
+
+err_nve_fid_enable:
+err_vni_exists:
+ mlxsw_sp_fid_put(fid);
+ return err;
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid, struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ /* The only valid VLAN for a VLAN-unaware bridge is 0 */
+ if (vid)
+ return NULL;
+
+ return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static u16
+mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid)
+{
+ return 0;
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
+ .port_join = mlxsw_sp_bridge_8021d_port_join,
+ .port_leave = mlxsw_sp_bridge_8021d_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021d_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
+};
+
+static int
+mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
+ extack);
+ if (err)
+ goto err_bridge_vlan_aware_port_join;
+
+ return 0;
+
+err_bridge_vlan_aware_port_join:
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+}
+
+static int
+mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
+ vid, ETH_P_8021AD, extack);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
+ .port_join = mlxsw_sp_bridge_8021ad_port_join,
+ .port_leave = mlxsw_sp_bridge_8021ad_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
+static int
+mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* The EtherType of decapsulated packets is determined at the egress
+ * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
+ * co-exist.
+ */
+ err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
+ mlxsw_sp_port, extack);
+ if (err)
+ goto err_bridge_8021ad_port_join;
+
+ return 0;
+
+err_bridge_8021ad_port_join:
+ mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
+ return err;
+}
+
+static void
+mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
+ .port_join = mlxsw_sp2_bridge_8021ad_port_join,
+ .port_leave = mlxsw_sp2_bridge_8021ad_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
+ extack);
+ if (IS_ERR(bridge_port))
+ return PTR_ERR(bridge_port);
+ bridge_device = bridge_port->bridge_device;
+
+ err = bridge_device->ops->port_join(bridge_device, bridge_port,
+ mlxsw_sp_port, extack);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+ return err;
+}
+
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+ bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+ if (!bridge_port)
+ return;
+
+ bridge_device->ops->port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+}
+
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_fid *fid;
+
+ /* If the VxLAN device is down, then the FID does not have a VNI */
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+ /* Drop both the reference we just took during lookup and the reference
+ * the VXLAN device took.
+ */
+ mlxsw_sp_fid_put(fid);
+ mlxsw_sp_fid_put(fid);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+ enum mlxsw_sp_l3proto *proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ if (vxlan_addr->sa.sa_family == AF_INET) {
+ addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV4;
+ } else {
+ addr->addr6 = vxlan_addr->sin6.sin6_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV6;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ union vxlan_addr *vxlan_addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ vxlan_addr->sa.sa_family = AF_INET;
+ vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ vxlan_addr->sa.sa_family = AF_INET6;
+ vxlan_addr->sin6.sin6_addr = addr->addr6;
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni, bool adding)
+{
+ struct switchdev_notifier_vxlan_fdb_info info;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ enum switchdev_notifier_type type;
+
+ type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
+ info.remote_port = vxlan->cfg.dst_port;
+ info.remote_vni = vni;
+ info.remote_ifindex = 0;
+ ether_addr_copy(info.eth_addr, mac);
+ info.vni = vni;
+ info.offloaded = adding;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni,
+ bool adding)
+{
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
+ adding);
+}
+
+static void
+mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev, bool offloaded)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = offloaded;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index,
+ bool adding)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port, vid, fid, evid = 0;
+ enum switchdev_notifier_type type;
+ char mac[ETH_ALEN];
+ bool do_notification = true;
+ int err;
+
+ mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
+
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
+ goto just_remove;
+ }
+
+ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
+ goto just_remove;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
+ }
+
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
+ }
+
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ evid = mlxsw_sp_port_vlan->vid;
+
+do_fdb_op:
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
+ adding, true);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
+ return;
+ }
+
+ if (!do_notification)
+ return;
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
+
+ return;
+
+just_remove:
+ adding = false;
+ do_notification = false;
+ goto do_fdb_op;
+}
+
+static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index,
+ bool adding)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ enum switchdev_notifier_type type;
+ char mac[ETH_ALEN];
+ u16 lag_vid = 0;
+ u16 lag_id;
+ u16 vid, fid;
+ bool do_notification = true;
+ int err;
+
+ mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
+ mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
+ if (!mlxsw_sp_port) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
+ goto just_remove;
+ }
+
+ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
+ goto just_remove;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
+ }
+
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
+ }
+
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
+
+do_fdb_op:
+ err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
+ adding, true);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
+ return;
+ }
+
+ if (!do_notification)
+ return;
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
+
+ return;
+
+just_remove:
+ adding = false;
+ do_notification = false;
+ goto do_fdb_op;
+}
+
+static int
+__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ bool adding,
+ struct net_device **nve_dev,
+ u16 *p_vid, __be32 *p_vni)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *br_dev, *dev;
+ int nve_ifindex;
+ int err;
+
+ err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni(fid, p_vni);
+ if (err)
+ return err;
+
+ dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
+ if (!dev)
+ return -EINVAL;
+ *nve_dev = dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
+ return -EINVAL;
+
+ if (adding && netif_is_vxlan(dev)) {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
+ return -EINVAL;
+ }
+
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return -EINVAL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl,
+ int rec_index,
+ bool adding)
+{
+ enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
+ enum switchdev_notifier_type type;
+ struct net_device *nve_dev;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ char mac[ETH_ALEN];
+ u16 fid_index, vid;
+ __be32 vni;
+ u32 uip;
+ int err;
+
+ mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
+ &uip, &sfn_proto);
+
+ fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
+ if (!fid)
+ goto err_fid_lookup;
+
+ err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr);
+ if (err)
+ goto err_ip_resolve;
+
+ err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
+ &nve_dev, &vid, &vni);
+ if (err)
+ goto err_fdb_process;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, adding, true);
+ if (err)
+ goto err_fdb_op;
+
+ mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, vni, adding);
+
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_op:
+err_fdb_process:
+err_ip_resolve:
+ mlxsw_sp_fid_put(fid);
+err_fid_lookup:
+ /* Remove an FDB entry in case we cannot process it. Otherwise the
+ * device will keep sending the same notification over and over again.
+ */
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto, &addr,
+ false, true);
+}
+
+static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index)
+{
+ switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
+ mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
+ mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
+ }
+}
+
+#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
+
+static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp_bridge *bridge;
+ struct mlxsw_sp *mlxsw_sp;
+ bool reschedule = false;
+ char *sfn_pl;
+ int queries;
+ u8 num_rec;
+ int i;
+ int err;
+
+ sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
+ if (!sfn_pl)
+ return;
+
+ bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
+ mlxsw_sp = bridge->mlxsw_sp;
+
+ rtnl_lock();
+ if (list_empty(&bridge->bridges_list))
+ goto out;
+ reschedule = true;
+ queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
+ while (queries > 0) {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ goto out;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
+ goto out;
+ queries--;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(sfn_pl);
+ if (!reschedule)
+ return;
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
+}
+
+struct mlxsw_sp_switchdev_event_work {
+ struct work_struct work;
+ union {
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
+ };
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void
+mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work,
+ struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = switchdev_work->dev;
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ int err;
+
+ fdb_info = &switchdev_work->fdb_info;
+ err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
+ if (err)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
+ &proto, &addr);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
+ vxlan_fdb_info.eth_addr,
+ mlxsw_sp_fid_index(fid),
+ proto, &addr, true, false);
+ if (err)
+ return;
+ vxlan_fdb_info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info.info, NULL);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info.eth_addr,
+ fdb_info->vid, dev, true);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
+ vxlan_fdb_info.eth_addr,
+ mlxsw_sp_fid_index(fid),
+ proto, &addr, false,
+ false);
+ vxlan_fdb_info.offloaded = false;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info.info, NULL);
+ break;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ struct net_device *br_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_fid *fid;
+ __be32 vni;
+ int err;
+
+ if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
+ return;
+
+ if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ (!switchdev_work->fdb_info.added_by_user ||
+ switchdev_work->fdb_info.is_local))
+ return;
+
+ if (!netif_running(dev))
+ return;
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return;
+ if (!netif_is_bridge_master(br_dev))
+ return;
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return;
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = bridge_device->ops->fid_lookup(bridge_device,
+ switchdev_work->fdb_info.vid);
+ if (!fid)
+ return;
+
+ err = mlxsw_sp_fid_vni(fid, &vni);
+ if (err)
+ goto out;
+
+ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
+ vni);
+
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
+static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_switchdev_event_work *switchdev_work =
+ container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ rtnl_lock();
+ if (netif_is_vxlan(dev)) {
+ mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
+ goto out;
+ }
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ if (!mlxsw_sp_port)
+ goto out;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
+ if (err)
+ break;
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ fdb_info->addr,
+ fdb_info->vid, dev, true);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
+ break;
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ /* These events are only used to potentially update an existing
+ * SPAN mirror.
+ */
+ break;
+ }
+
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ u8 all_zeros_mac[ETH_ALEN] = { 0 };
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ struct net_device *br_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ int err;
+
+ vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+ br_dev = netdev_master_upper_dev_get(dev);
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
+ &proto, &addr);
+
+ if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
+ if (err) {
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+ vxlan_fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info->info, NULL);
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+
+ /* The device has a single FDB table, whereas Linux has two - one
+ * in the bridge driver and another in the VxLAN driver. We only
+ * program an entry to the device if the MAC points to the VxLAN
+ * device in the bridge's FDB table
+ */
+ vid = bridge_device->ops->fid_vid(bridge_device, fid);
+ if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
+ goto err_br_fdb_find;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
+ mlxsw_sp_fid_index(fid), proto,
+ &addr, true, false);
+ if (err)
+ goto err_fdb_tunnel_uc_op;
+ vxlan_fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info->info, NULL);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info->eth_addr, vid, dev, true);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_tunnel_uc_op:
+err_br_fdb_find:
+ mlxsw_sp_fid_put(fid);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ u8 all_zeros_mac[ETH_ALEN] = { 0 };
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+
+ vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+ if (!vxlan_fdb_info->offloaded)
+ return;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
+ &proto, &addr);
+
+ if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
+ mlxsw_sp_fid_index(fid), proto, &addr,
+ false, false);
+ vid = bridge_device->ops->fid_vid(bridge_device, fid);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info->eth_addr, vid, dev, false);
+
+ mlxsw_sp_fid_put(fid);
+}
+
+static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_switchdev_event_work *switchdev_work =
+ container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out;
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ goto out;
+ if (!netif_is_bridge_master(br_dev))
+ goto out;
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
+ mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
+ break;
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
+ mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static int
+mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
+ switchdev_work,
+ struct switchdev_notifier_info *info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct vxlan_config *cfg = &vxlan->cfg;
+ struct netlink_ext_ack *extack;
+
+ extack = switchdev_notifier_info_to_extack(info);
+ vxlan_fdb_info = container_of(info,
+ struct switchdev_notifier_vxlan_fdb_info,
+ info);
+
+ if (vxlan_fdb_info->remote_port != cfg->dst_port) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (vxlan_fdb_info->remote_vni != cfg->vni ||
+ vxlan_fdb_info->vni != cfg->vni) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (vxlan_fdb_info->remote_ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
+ return -EOPNOTSUPP;
+ }
+ if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
+
+ return 0;
+}
+
+/* Called under rcu_read_lock() */
+static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct net_device *br_dev;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ /* Tunnel devices are not our uppers, so check their master instead */
+ br_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (!br_dev)
+ return NOTIFY_DONE;
+ if (!netif_is_bridge_master(br_dev))
+ return NOTIFY_DONE;
+ if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_bridge_fdb_event_work);
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ /* Take a reference on the device. This can be either
+ * upper device containig mlxsw_sp_port or just a
+ * mlxsw_sp_port
+ */
+ dev_hold(dev);
+ break;
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_vxlan_fdb_event_work);
+ err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
+ info);
+ if (err)
+ goto err_vxlan_work_prepare;
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ mlxsw_core_schedule_work(&switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_vxlan_work_prepare:
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+struct notifier_block mlxsw_sp_switchdev_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_event,
+};
+
+static int
+mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ bool flag_untagged, bool flag_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ __be32 vni = vxlan->cfg.vni;
+ struct mlxsw_sp_fid *fid;
+ u16 old_vid;
+ int err;
+
+ /* We cannot have the same VLAN as PVID and egress untagged on multiple
+ * VxLAN devices. Note that we get this notification before the VLAN is
+ * actually added to the bridge's database, so it is not possible for
+ * the lookup function to return 'vxlan_dev'
+ */
+ if (flag_untagged && flag_pvid &&
+ mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
+ return -EINVAL;
+ }
+
+ if (!netif_running(vxlan_dev))
+ return 0;
+
+ /* First case: FID is not associated with this VNI, but the new VLAN
+ * is both PVID and egress untagged. Need to enable NVE on the FID, if
+ * it exists
+ */
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
+ if (!fid) {
+ if (!flag_untagged || !flag_pvid)
+ return 0;
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
+ vid, extack);
+ }
+
+ /* Second case: FID is associated with the VNI and the VLAN associated
+ * with the FID is the same as the notified VLAN. This means the flags
+ * (PVID / egress untagged) were toggled and that NVE should be
+ * disabled on the FID
+ */
+ old_vid = mlxsw_sp_fid_8021q_vid(fid);
+ if (vid == old_vid) {
+ if (WARN_ON(flag_untagged && flag_pvid)) {
+ mlxsw_sp_fid_put(fid);
+ return -EINVAL;
+ }
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ mlxsw_sp_fid_put(fid);
+ return 0;
+ }
+
+ /* Third case: A new VLAN was configured on the VxLAN device, but this
+ * VLAN is not PVID, so there is nothing to do.
+ */
+ if (!flag_pvid) {
+ mlxsw_sp_fid_put(fid);
+ return 0;
+ }
+
+ /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
+ * mapped to the VNI should be unmapped
+ */
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ mlxsw_sp_fid_put(fid);
+
+ /* Fifth case: The new VLAN is also egress untagged, which means the
+ * VLAN needs to be mapped to the VNI
+ */
+ if (!flag_untagged)
+ return 0;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
+ if (err)
+ goto err_vxlan_join;
+
+ return 0;
+
+err_vxlan_join:
+ bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ __be32 vni = vxlan->cfg.vni;
+ struct mlxsw_sp_fid *fid;
+
+ if (!netif_running(vxlan_dev))
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
+ if (!fid)
+ return;
+
+ /* A different VLAN than the one mapped to the VNI is deleted */
+ if (mlxsw_sp_fid_8021q_vid(fid) != vid)
+ goto out;
+
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
+static int
+mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ struct switchdev_obj_port_vlan *vlan =
+ SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct netlink_ext_ack *extack;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+
+ extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+ br_dev = netdev_master_upper_dev_get(vxlan_dev);
+ if (!br_dev)
+ return 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ port_obj_info->handled = true;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ if (!bridge_device->vlan_enabled)
+ return 0;
+
+ return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vlan->vid,
+ flag_untagged,
+ flag_pvid, extack);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ struct switchdev_obj_port_vlan *vlan =
+ SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+
+ br_dev = netdev_master_upper_dev_get(vxlan_dev);
+ if (!br_dev)
+ return;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return;
+
+ port_obj_info->handled = true;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ if (!bridge_device->vlan_enabled)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
+ vlan->vid);
+}
+
+static int
+mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ int err = 0;
+
+ switch (port_obj_info->obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
+ port_obj_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void
+mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ switch (port_obj_info->obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ if (netif_is_vxlan(dev))
+ err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
+ else
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
+ else
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_blocking_event,
+};
+
+u8
+mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ return bridge_port->stp_state;
+}
+
+static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ struct notifier_block *nb;
+ int err;
+
+ err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
+ return err;
+ }
+
+ err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
+ return err;
+ }
+
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
+ INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+ return 0;
+
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ return err;
+}
+
+static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct notifier_block *nb;
+
+ cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
+
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
+
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+}
+
+static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
+}
+
+const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
+ .init = mlxsw_sp1_switchdev_init,
+};
+
+static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
+}
+
+const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
+ .init = mlxsw_sp2_switchdev_init,
+};
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+ mlxsw_sp->bridge = bridge;
+ bridge->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
+
+ bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
+ bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
+
+ mlxsw_sp->switchdev_ops->init(mlxsw_sp);
+
+ return mlxsw_sp_fdb_init(mlxsw_sp);
+}
+
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_fdb_fini(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
+ kfree(mlxsw_sp->bridge);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
new file mode 100644
index 000000000..c218e10bd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/netdevice.h>
+
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_bridge_port;
+
+struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev);
+
+u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
new file mode 100644
index 000000000..f4bfdb6da
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -0,0 +1,1969 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <net/devlink.h>
+#include <uapi/linux/devlink.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_trap.h"
+
+struct mlxsw_sp_trap_policer_item {
+ struct devlink_trap_policer policer;
+ u16 hw_id;
+};
+
+struct mlxsw_sp_trap_group_item {
+ struct devlink_trap_group group;
+ u16 hw_group_id;
+ u8 priority;
+ u8 fixed_policer:1; /* Whether policer binding can change */
+};
+
+#define MLXSW_SP_TRAP_LISTENERS_MAX 3
+
+struct mlxsw_sp_trap_item {
+ struct devlink_trap trap;
+ struct mlxsw_listener listeners_arr[MLXSW_SP_TRAP_LISTENERS_MAX];
+ u8 is_source:1;
+};
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/mlxsw.rst
+ */
+enum {
+ DEVLINK_MLXSW_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+};
+
+#define DEVLINK_MLXSW_TRAP_NAME_IRIF_DISABLED \
+ "irif_disabled"
+#define DEVLINK_MLXSW_TRAP_NAME_ERIF_DISABLED \
+ "erif_disabled"
+
+#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+enum {
+ /* Packet was mirrored from ingress. */
+ MLXSW_SP_MIRROR_REASON_INGRESS = 1,
+ /* Packet was mirrored from policy engine. */
+ MLXSW_SP_MIRROR_REASON_POLICY_ENGINE = 2,
+ /* Packet was early dropped. */
+ MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9,
+ /* Packet was mirrored from egress. */
+ MLXSW_SP_MIRROR_REASON_EGRESS = 14,
+};
+
+static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index;
+ const struct flow_action_cookie *fa_cookie;
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ rcu_read_lock();
+ fa_cookie = mlxsw_sp_acl_act_cookie_lookup(mlxsw_sp, cookie_index);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, fa_cookie);
+ rcu_read_unlock();
+ consume_skb(skb);
+}
+
+static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return err;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
+ skb_pull(skb, ETH_HLEN);
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ netif_receive_skb(skb);
+}
+
+static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ skb->offload_fwd_mark = 1;
+ mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+}
+
+static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ skb->offload_l3_fwd_mark = 1;
+ skb->offload_fwd_mark = 1;
+ mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+}
+
+static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ /* The PTP handler expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port);
+}
+
+static struct mlxsw_sp_port *
+mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_rx_md_info *rx_md_info)
+{
+ u16 local_port;
+
+ if (!rx_md_info->tx_port_valid)
+ return NULL;
+
+ if (rx_md_info->tx_port_is_lag)
+ local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
+ rx_md_info->tx_lag_id,
+ rx_md_info->tx_lag_port_index);
+ else
+ local_port = rx_md_info->tx_sys_port;
+
+ if (local_port >= mlxsw_core_max_ports(mlxsw_sp->core))
+ return NULL;
+
+ return mlxsw_sp->ports[local_port];
+}
+
+/* The latency units are determined according to MOGCR.mirror_latency_units. It
+ * defaults to 64 nanoseconds.
+ */
+#define MLXSW_SP_MIRROR_LATENCY_SHIFT 6
+
+static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp,
+ struct psample_metadata *md,
+ struct sk_buff *skb, int in_ifindex,
+ bool truncate, u32 trunc_size)
+{
+ struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ md->trunc_size = truncate ? trunc_size : skb->len;
+ md->in_ifindex = in_ifindex;
+ mlxsw_sp_port = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info);
+ md->out_ifindex = mlxsw_sp_port && mlxsw_sp_port->dev ?
+ mlxsw_sp_port->dev->ifindex : 0;
+ md->out_tc_valid = rx_md_info->tx_tc_valid;
+ md->out_tc = rx_md_info->tx_tc;
+ md->out_tc_occ_valid = rx_md_info->tx_congestion_valid;
+ md->out_tc_occ = rx_md_info->tx_congestion;
+ md->latency_valid = rx_md_info->latency_valid;
+ md->latency = rx_md_info->latency;
+ md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT;
+}
+
+static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params *params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct psample_metadata md = {};
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
+ trigger.local_port = local_port;
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_port *mlxsw_sp_port, *mlxsw_sp_port_tx;
+ struct mlxsw_sp_sample_trigger trigger;
+ struct mlxsw_sp_sample_params *params;
+ struct psample_metadata md = {};
+ int err;
+
+ /* Locally generated packets are not reported from the policy engine
+ * trigger, so do not report them from the egress trigger as well.
+ */
+ if (local_port == MLXSW_PORT_CPU_PORT)
+ goto out;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ /* Packet was sampled from Tx, so we need to retrieve the sample
+ * parameters based on the Tx port and not the Rx port.
+ */
+ mlxsw_sp_port_tx = mlxsw_sp_sample_tx_port_get(mlxsw_sp, rx_md_info);
+ if (!mlxsw_sp_port_tx)
+ goto out;
+
+ trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
+ trigger.local_port = mlxsw_sp_port_tx->local_port;
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u16 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ struct mlxsw_sp_sample_trigger trigger = {
+ .type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_POLICY_ENGINE,
+ };
+ struct mlxsw_sp_sample_params *params;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct psample_metadata md = {};
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto out;
+
+ params = mlxsw_sp_sample_trigger_params_lookup(mlxsw_sp, &trigger);
+ if (!params)
+ goto out;
+
+ /* The psample module expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_psample_md_init(mlxsw_sp, &md, skb,
+ mlxsw_sp_port->dev->ifindex, params->truncate,
+ params->trunc_size);
+ psample_sample_packet(params->psample_group, skb, params->rate, &md);
+out:
+ consume_skb(skb);
+}
+
+#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_TRAP_DROP_EXT(_id, _group_id, _metadata) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA | (_metadata))
+
+#define MLXSW_SP_TRAP_BUFFER_DROP(_id) \
+ DEVLINK_TRAP_GENERIC(DROP, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
+ DEVLINK_MLXSW_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
+ MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, DISCARD_##_id, \
+ TRAP_EXCEPTION_TO_CPU, false, SP_##_group_id, \
+ SET_FW_DEFAULT, SP_##_group_id)
+
+#define MLXSW_SP_RXL_ACL_DISCARD(_id, _en_group_id, _dis_group_id) \
+ MLXSW_RXL_DIS(mlxsw_sp_rx_acl_drop_listener, DISCARD_##_id, \
+ TRAP_EXCEPTION_TO_CPU, false, SP_##_en_group_id, \
+ SET_FW_DEFAULT, SP_##_dis_group_id)
+
+#define MLXSW_SP_RXL_BUFFER_DISCARD(_mirror_reason) \
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_drop_listener, 0, SP_BUFFER_DISCARDS, \
+ MLXSW_SP_MIRROR_REASON_##_mirror_reason)
+
+#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
+ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, \
+ _action, false, SP_##_group_id, SET_FW_DEFAULT)
+
+#define MLXSW_SP_RXL_NO_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_no_mark_listener, _id, _action, \
+ _is_ctrl, SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_RXL_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, _action, _is_ctrl, \
+ SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_RXL_L3_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_l3_mark_listener, _id, _action, _is_ctrl, \
+ SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_TRAP_POLICER(_id, _rate, _burst) \
+ DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
+ MLXSW_REG_QPCR_HIGHEST_CIR, \
+ MLXSW_REG_QPCR_LOWEST_CIR, \
+ 1 << MLXSW_REG_QPCR_HIGHEST_CBS, \
+ 1 << MLXSW_REG_QPCR_LOWEST_CBS)
+
+/* Ordered by policer identifier */
+static const struct mlxsw_sp_trap_policer_item
+mlxsw_sp_trap_policer_items_arr[] = {
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 4096),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(2, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(3, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(4, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 8192),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(6, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 8192),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(9, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(11, 256, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(12, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(13, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 16384),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 8192),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(20, 10240, 4096),
+ },
+};
+
+static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(STP, 2),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LACP, 3),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LLDP, 4),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(MC_SNOOPING, 5),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING,
+ .priority = 3,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(DHCP, 6),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 7),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BFD, 8),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(OSPF, 9),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BGP, 10),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
+ .priority = 4,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(VRRP, 11),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PIM, 12),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(UC_LB, 13),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 14),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+ .priority = 1,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PTP_EVENT, 16),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PTP_GENERAL, 17),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
+ .priority = 4,
+ },
+};
+
+static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW,
+ L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE,
+ L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(RPF, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(RPF, L3_EXCEPTIONS, TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RTR_EGRESS0, L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4,
+ L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6,
+ L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR,
+ TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP,
+ ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ .listeners_arr = {
+ MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS,
+ DUMMY),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP,
+ ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ .listeners_arr = {
+ MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS,
+ DUMMY),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(STP, STP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(STP, STP, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LACP, LACP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(LACP, LACP, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
+ true, SP_LLDP, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_QUERY, MC_SNOOPING, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IGMP_QUERY, MC_SNOOPING,
+ MIRROR_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V1_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V2_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V3_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V2_LEAVE, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_QUERY, MC_SNOOPING, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY,
+ MC_SNOOPING, MIRROR_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V1_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V2_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V1_DONE, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_DHCP, DHCP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_DHCP, DHCP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DHCP, DHCP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_DHCP, DHCP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_OVERLAY, NEIGH_DISCOVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_NEIGH_SOLICIT,
+ NEIGH_DISCOVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION,
+ NEIGH_DISCOVERY, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_NEIGH_ADVERT,
+ NEIGH_DISCOVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISEMENT,
+ NEIGH_DISCOVERY, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_BFD, BFD, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_BFD, BFD, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_BFD, BFD, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_BFD, BFD, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_OSPF, OSPF, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_OSPF, OSPF, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_OSPF, OSPF, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_OSPF, OSPF, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_BGP, BGP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_BGP, BGP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_BGP, BGP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_BGP, BGP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_VRRP, VRRP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, VRRP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_VRRP, VRRP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, VRRP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_PIM, PIM, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_PIM, PIM, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_PIM, PIM, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_PIM, PIM, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(UC_LB, UC_LB, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_L3_MARK(LBERROR, LBERROR, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IP2ME, IP2ME, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_UC_DIP_LINK_LOCAL_SCOPE,
+ LOCAL_DELIVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, IP2ME,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_ROUTER_ALERT, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ /* IPV6_ROUTER_ALERT is defined in uAPI as 22, but it is not
+ * used in this file, so undefine it.
+ */
+ #undef IPV6_ROUTER_ALERT
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_ALERT, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DIP_ALL_NODES, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DIP_ALL_ROUTERS, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_SOLICIT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_ADVERT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISEMENT, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_REDIRECT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(PTP_EVENT, PTP_EVENT, TRAP),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_ptp_listener, PTP0, TRAP_TO_CPU,
+ false, SP_PTP0, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(PTP_GENERAL, PTP_GENERAL, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(PTP1, PTP1, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_NEXTHOP, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER3, L3_DISCARDS),
+ },
+ },
+};
+
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = 0; i < trap->policers_count; i++) {
+ if (trap->policer_items_arr[i].policer.id == id)
+ return &trap->policer_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_trap_group_item *
+mlxsw_sp_trap_group_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = 0; i < trap->groups_count; i++) {
+ if (trap->group_items_arr[i].group.id == id)
+ return &trap->group_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_trap_item *
+mlxsw_sp_trap_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = 0; i < trap->traps_count; i++) {
+ if (trap->trap_items_arr[i].trap.id == id)
+ return &trap->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int mlxsw_sp_trap_cpu_policers_set(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u16 hw_id;
+
+ /* The purpose of "thin" policer is to drop as many packets
+ * as possible. The dummy group is using it.
+ */
+ hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
+ if (WARN_ON(hw_id == trap->max_policers))
+ return -ENOBUFS;
+
+ __set_bit(hw_id, trap->policers_usage);
+ trap->thin_policer_hw_id = hw_id;
+ mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M,
+ false, 1, 4);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
+ mlxsw_sp->trap->thin_policer_hw_id, 0, 1);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+}
+
+static int mlxsw_sp_trap_policer_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_policer_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t free_policers = 0;
+ u32 last_id;
+ int i;
+
+ for_each_clear_bit(i, trap->policers_usage, trap->max_policers)
+ free_policers++;
+
+ if (arr_size > free_policers) {
+ dev_err(mlxsw_sp->bus_info->dev, "Exceeded number of supported packet trap policers\n");
+ return -ENOBUFS;
+ }
+
+ trap->policer_items_arr = kcalloc(free_policers, elem_size, GFP_KERNEL);
+ if (!trap->policer_items_arr)
+ return -ENOMEM;
+
+ trap->policers_count = free_policers;
+
+ /* Initialize policer items array with pre-defined policers. */
+ memcpy(trap->policer_items_arr, mlxsw_sp_trap_policer_items_arr,
+ elem_size * arr_size);
+
+ /* Initialize policer items array with the rest of the available
+ * policers.
+ */
+ last_id = mlxsw_sp_trap_policer_items_arr[arr_size - 1].policer.id;
+ for (i = arr_size; i < trap->policers_count; i++) {
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+
+ /* Use parameters set for first policer and override
+ * relevant ones.
+ */
+ policer_item = &mlxsw_sp_trap_policer_items_arr[0];
+ trap->policer_items_arr[i] = *policer_item;
+ trap->policer_items_arr[i].policer.id = ++last_id;
+ trap->policer_items_arr[i].policer.init_rate = 1;
+ trap->policer_items_arr[i].policer.init_burst = 16;
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_policer_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->policer_items_arr);
+}
+
+static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int err, i;
+
+ err = mlxsw_sp_trap_policer_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ for (i = 0; i < trap->policers_count; i++) {
+ policer_item = &trap->policer_items_arr[i];
+ err = devl_trap_policers_register(devlink,
+ &policer_item->policer, 1);
+ if (err)
+ goto err_trap_policer_register;
+ }
+
+ return 0;
+
+err_trap_policer_register:
+ for (i--; i >= 0; i--) {
+ policer_item = &trap->policer_items_arr[i];
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
+ }
+ mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->policers_count - 1; i >= 0; i--) {
+ policer_item = &trap->policer_items_arr[i];
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
+ }
+ mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
+}
+
+static int mlxsw_sp_trap_group_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t common_groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+ const struct mlxsw_sp_trap_group_item *spec_group_items_arr;
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_group_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t groups_count, spec_groups_count;
+ int err;
+
+ err = mlxsw_sp->trap_ops->groups_init(mlxsw_sp, &spec_group_items_arr,
+ &spec_groups_count);
+ if (err)
+ return err;
+
+ /* The group items array is created by concatenating the common trap
+ * group items and the ASIC-specific trap group items.
+ */
+ groups_count = common_groups_count + spec_groups_count;
+ trap->group_items_arr = kcalloc(groups_count, elem_size, GFP_KERNEL);
+ if (!trap->group_items_arr)
+ return -ENOMEM;
+
+ memcpy(trap->group_items_arr, mlxsw_sp_trap_group_items_arr,
+ elem_size * common_groups_count);
+ memcpy(trap->group_items_arr + common_groups_count,
+ spec_group_items_arr, elem_size * spec_groups_count);
+
+ trap->groups_count = groups_count;
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_group_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->group_items_arr);
+}
+
+static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_group_item *group_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int err, i;
+
+ err = mlxsw_sp_trap_group_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ for (i = 0; i < trap->groups_count; i++) {
+ group_item = &trap->group_items_arr[i];
+ err = devl_trap_groups_register(devlink, &group_item->group, 1);
+ if (err)
+ goto err_trap_group_register;
+ }
+
+ return 0;
+
+err_trap_group_register:
+ for (i--; i >= 0; i--) {
+ group_item = &trap->group_items_arr[i];
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
+ }
+ mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->groups_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_trap_group_item *group_item;
+
+ group_item = &trap->group_items_arr[i];
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
+ }
+ mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
+}
+
+static bool
+mlxsw_sp_trap_listener_is_valid(const struct mlxsw_listener *listener)
+{
+ return listener->trap_id != 0;
+}
+
+static int mlxsw_sp_trap_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t common_traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+ const struct mlxsw_sp_trap_item *spec_trap_items_arr;
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t traps_count, spec_traps_count;
+ int err;
+
+ err = mlxsw_sp->trap_ops->traps_init(mlxsw_sp, &spec_trap_items_arr,
+ &spec_traps_count);
+ if (err)
+ return err;
+
+ /* The trap items array is created by concatenating the common trap
+ * items and the ASIC-specific trap items.
+ */
+ traps_count = common_traps_count + spec_traps_count;
+ trap->trap_items_arr = kcalloc(traps_count, elem_size, GFP_KERNEL);
+ if (!trap->trap_items_arr)
+ return -ENOMEM;
+
+ memcpy(trap->trap_items_arr, mlxsw_sp_trap_items_arr,
+ elem_size * common_traps_count);
+ memcpy(trap->trap_items_arr + common_traps_count,
+ spec_trap_items_arr, elem_size * spec_traps_count);
+
+ trap->traps_count = traps_count;
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->trap_items_arr);
+}
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ const struct mlxsw_sp_trap_item *trap_item;
+ int err, i;
+
+ err = mlxsw_sp_trap_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ for (i = 0; i < trap->traps_count; i++) {
+ trap_item = &trap->trap_items_arr[i];
+ err = devl_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+ for (i--; i >= 0; i--) {
+ trap_item = &trap->trap_items_arr[i];
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
+ }
+ mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->traps_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_trap_item *trap_item;
+
+ trap_item = &trap->trap_items_arr[i];
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
+ }
+ mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_trap_cpu_policers_set(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_trap_dummy_group_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_trap_policers_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_trap_groups_init(mlxsw_sp);
+ if (err)
+ goto err_trap_groups_init;
+
+ err = mlxsw_sp_traps_init(mlxsw_sp);
+ if (err)
+ goto err_traps_init;
+
+ return 0;
+
+err_traps_init:
+ mlxsw_sp_trap_groups_fini(mlxsw_sp);
+err_trap_groups_init:
+ mlxsw_sp_trap_policers_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_trap_groups_fini(mlxsw_sp);
+ mlxsw_sp_trap_policers_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
+ int i;
+
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
+ const struct mlxsw_listener *listener;
+ int err;
+
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
+ continue;
+ err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
+ int i;
+
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return;
+
+ for (i = MLXSW_SP_TRAP_LISTENERS_MAX - 1; i >= 0; i--) {
+ const struct mlxsw_listener *listener;
+
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
+ continue;
+ mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx);
+ }
+}
+
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
+ int i;
+
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ if (trap_item->is_source) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing the action of source traps is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
+ const struct mlxsw_listener *listener;
+ bool enabled;
+ int err;
+
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
+ continue;
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ enabled = false;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ enabled = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ err = mlxsw_core_trap_state_set(mlxsw_core, listener, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+__mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ u32 policer_id, struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
+ const struct mlxsw_sp_trap_group_item *group_item;
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ group_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, group->id);
+ if (WARN_ON(!group_item))
+ return -EINVAL;
+
+ if (group_item->fixed_policer && policer_id != group->init_policer_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing the policer binding of this group is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (policer_id) {
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp,
+ policer_id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+ hw_policer_id = policer_item->hw_id;
+ }
+
+ mlxsw_reg_htgt_pack(htgt_pl, group_item->hw_group_id, hw_policer_id,
+ group_item->priority, group_item->priority);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+}
+
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group)
+{
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group,
+ group->init_policer_id, NULL);
+}
+
+int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
+{
+ u32 policer_id = policer ? policer->id : 0;
+
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id,
+ extack);
+}
+
+static int
+mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_trap_policer_item *policer_item)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ u16 hw_id;
+
+ /* We should be able to allocate a policer because the number of
+ * policers we registered with devlink is in according with the number
+ * of available policers.
+ */
+ hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
+ if (WARN_ON(hw_id == trap->max_policers))
+ return -ENOBUFS;
+
+ __set_bit(hw_id, trap->policers_usage);
+ policer_item->hw_id = hw_id;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_trap_policer_item_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_trap_policer_item *policer_item)
+{
+ __clear_bit(policer_item->hw_id, mlxsw_sp->trap->policers_usage);
+}
+
+static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
+ struct netlink_ext_ack *extack)
+{
+ int bs = fls64(burst) - 1;
+
+ if (burst != (BIT_ULL(bs))) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
+ return -EINVAL;
+ }
+
+ *p_burst_size = bs;
+
+ return 0;
+}
+
+static int __mlxsw_sp_trap_policer_set(struct mlxsw_sp *mlxsw_sp, u16 hw_id,
+ u64 rate, u64 burst, bool clear_counter,
+ struct netlink_ext_ack *extack)
+{
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u8 burst_size;
+ int err;
+
+ err = mlxsw_sp_trap_policer_bs(burst, &burst_size, extack);
+ if (err)
+ return err;
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M, false,
+ rate, burst_size);
+ mlxsw_reg_qpcr_clear_counter_set(qpcr_pl, clear_counter);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ int err;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ err = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer_item);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
+ policer->init_rate,
+ policer->init_burst, true, NULL);
+ if (err)
+ goto err_trap_policer_set;
+
+ return 0;
+
+err_trap_policer_set:
+ mlxsw_sp_trap_policer_item_fini(mlxsw_sp, policer_item);
+ return err;
+}
+
+void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return;
+
+ mlxsw_sp_trap_policer_item_fini(mlxsw_sp, policer_item);
+}
+
+int mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ return __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
+ rate, burst, false, extack);
+}
+
+int
+mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ int err;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer_item->hw_id,
+ MLXSW_REG_QPCR_IR_UNITS_M, false, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+ if (err)
+ return err;
+
+ *p_drops = mlxsw_reg_qpcr_violate_count_get(qpcr_pl);
+
+ return 0;
+}
+
+int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
+ bool *p_enabled, u16 *p_hw_id)
+{
+ struct mlxsw_sp_trap_policer_item *pol_item;
+ struct mlxsw_sp_trap_group_item *gr_item;
+ u32 pol_id;
+
+ gr_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, id);
+ if (!gr_item)
+ return -ENOENT;
+
+ pol_id = gr_item->group.init_policer_id;
+ if (!pol_id) {
+ *p_enabled = false;
+ return 0;
+ }
+
+ pol_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, pol_id);
+ if (WARN_ON(!pol_item))
+ return -ENOENT;
+
+ *p_enabled = true;
+ *p_hw_id = pol_item->hw_id;
+ return 0;
+}
+
+static const struct mlxsw_sp_trap_group_item
+mlxsw_sp1_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ },
+};
+
+static const struct mlxsw_sp_trap_item
+mlxsw_sp1_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
+ MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
+ },
+ },
+};
+
+static int
+mlxsw_sp1_trap_groups_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count)
+{
+ *arr = mlxsw_sp1_trap_group_items_arr;
+ *p_groups_count = ARRAY_SIZE(mlxsw_sp1_trap_group_items_arr);
+
+ return 0;
+}
+
+static int mlxsw_sp1_traps_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count)
+{
+ *arr = mlxsw_sp1_trap_items_arr;
+ *p_traps_count = ARRAY_SIZE(mlxsw_sp1_trap_items_arr);
+
+ return 0;
+}
+
+const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops = {
+ .groups_init = mlxsw_sp1_trap_groups_init,
+ .traps_init = mlxsw_sp1_traps_init,
+};
+
+static const struct mlxsw_sp_trap_group_item
+mlxsw_sp2_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 20),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
+ .priority = 0,
+ .fixed_policer = true,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ .fixed_policer = true,
+ },
+};
+
+static const struct mlxsw_sp_trap_item
+mlxsw_sp2_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_BUFFER_DROP(EARLY_DROP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_BUFFER_DISCARD(INGRESS_WRED),
+ },
+ .is_source = true,
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_INGRESS),
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_tx_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_EGRESS),
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_sample_acl_listener, 1,
+ SP_PKT_SAMPLE,
+ MLXSW_SP_MIRROR_REASON_POLICY_ENGINE),
+ },
+ },
+};
+
+static int
+mlxsw_sp2_trap_groups_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count)
+{
+ *arr = mlxsw_sp2_trap_group_items_arr;
+ *p_groups_count = ARRAY_SIZE(mlxsw_sp2_trap_group_items_arr);
+
+ return 0;
+}
+
+static int mlxsw_sp2_traps_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count)
+{
+ *arr = mlxsw_sp2_trap_items_arr;
+ *p_traps_count = ARRAY_SIZE(mlxsw_sp2_trap_items_arr);
+
+ return 0;
+}
+
+const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops = {
+ .groups_init = mlxsw_sp2_trap_groups_init,
+ .traps_init = mlxsw_sp2_traps_init,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
new file mode 100644
index 000000000..b8df684be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_TRAP_H
+#define _MLXSW_SPECTRUM_TRAP_H
+
+#include <linux/list.h>
+#include <net/devlink.h>
+
+struct mlxsw_sp_trap {
+ struct mlxsw_sp_trap_policer_item *policer_items_arr;
+ size_t policers_count; /* Number of registered policers */
+
+ struct mlxsw_sp_trap_group_item *group_items_arr;
+ size_t groups_count; /* Number of registered groups */
+
+ struct mlxsw_sp_trap_item *trap_items_arr;
+ size_t traps_count; /* Number of registered traps */
+
+ u16 thin_policer_hw_id;
+
+ u64 max_policers;
+ unsigned long policers_usage[]; /* Usage bitmap */
+};
+
+struct mlxsw_sp_trap_ops {
+ int (*groups_init)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count);
+ int (*traps_init)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count);
+};
+
+extern const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops;
+extern const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 000000000..8da169663
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+ /* Ethernet EMAD and FDB miss */
+ MLXSW_TRAP_ID_FDB_MC = 0x01,
+ MLXSW_TRAP_ID_ETHEMAD = 0x05,
+ /* L2 traps for specific packet types */
+ MLXSW_TRAP_ID_STP = 0x10,
+ MLXSW_TRAP_ID_LACP = 0x11,
+ MLXSW_TRAP_ID_EAPOL = 0x12,
+ MLXSW_TRAP_ID_LLDP = 0x13,
+ MLXSW_TRAP_ID_MMRP = 0x14,
+ MLXSW_TRAP_ID_MVRP = 0x15,
+ MLXSW_TRAP_ID_RPVST = 0x16,
+ MLXSW_TRAP_ID_DHCP = 0x19,
+ MLXSW_TRAP_ID_PTP0 = 0x28,
+ MLXSW_TRAP_ID_PTP1 = 0x29,
+ MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+ MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+ MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+ MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+ MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+ MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
+ MLXSW_TRAP_ID_FID_MISS = 0x3D,
+ MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_MTUERROR = 0x52,
+ MLXSW_TRAP_ID_TTLERROR = 0x53,
+ MLXSW_TRAP_ID_LBERROR = 0x54,
+ MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
+ MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_IPV4_VRRP = 0x59,
+ MLXSW_TRAP_ID_RPF = 0x5C,
+ MLXSW_TRAP_ID_IP2ME = 0x5F,
+ MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
+ MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61,
+ MLXSW_TRAP_ID_IPV6_LINK_LOCAL_SRC = 0x62,
+ MLXSW_TRAP_ID_IPV6_ALL_NODES_LINK = 0x63,
+ MLXSW_TRAP_ID_IPV6_OSPF = 0x64,
+ MLXSW_TRAP_ID_IPV6_MLDV12_LISTENER_QUERY = 0x65,
+ MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_REPORT = 0x66,
+ MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_DONE = 0x67,
+ MLXSW_TRAP_ID_IPV6_MLDV2_LISTENER_REPORT = 0x68,
+ MLXSW_TRAP_ID_IPV6_DHCP = 0x69,
+ MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
+ MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71,
+ MLXSW_TRAP_ID_IPV6_PIM = 0x79,
+ MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
+ MLXSW_TRAP_ID_RTR_EGRESS0 = 0x80,
+ MLXSW_TRAP_ID_IPV4_BGP = 0x88,
+ MLXSW_TRAP_ID_IPV6_BGP = 0x89,
+ MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
+ MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISEMENT = 0x8B,
+ MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C,
+ MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISEMENT = 0x8D,
+ MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E,
+ MLXSW_TRAP_ID_IPV4_DHCP = 0x8F,
+ MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
+ MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
+ MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
+ MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+ MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
+ MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
+ MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
+ MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
+ MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
+ MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
+ MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
+ MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
+ MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
+ MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
+ MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_STP = 0x14A,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_NON_IP_PACKET = 0x160,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_CLASS_E = 0x164,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_MC_DMAC = 0x168,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_DIP = 0x169,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_DEC_PKT = 0x188,
+ MLXSW_TRAP_ID_DISCARD_OVERLAY_SMAC_MC = 0x190,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
+ MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
+ MLXSW_TRAP_ID_ACL0 = 0x1C0,
+ /* Multicast trap used for routes with trap action */
+ MLXSW_TRAP_ID_ACL1 = 0x1C1,
+ /* Multicast trap used for routes with trap-and-forward action */
+ MLXSW_TRAP_ID_ACL2 = 0x1C2,
+ MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3,
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4,
+ MLXSW_TRAP_ID_MIRROR_SESSION0 = 0x220,
+ MLXSW_TRAP_ID_MIRROR_SESSION1 = 0x221,
+ MLXSW_TRAP_ID_MIRROR_SESSION2 = 0x222,
+ MLXSW_TRAP_ID_MIRROR_SESSION3 = 0x223,
+ MLXSW_TRAP_ID_MIRROR_SESSION4 = 0x224,
+ MLXSW_TRAP_ID_MIRROR_SESSION5 = 0x225,
+ MLXSW_TRAP_ID_MIRROR_SESSION6 = 0x226,
+ MLXSW_TRAP_ID_MIRROR_SESSION7 = 0x227,
+
+ MLXSW_TRAP_ID_MAX = 0x3FF,
+};
+
+enum mlxsw_event_trap_id {
+ /* Fatal Event generated by FW */
+ MLXSW_TRAP_ID_MFDE = 0x3,
+ /* Port Up/Down event generated by hardware */
+ MLXSW_TRAP_ID_PUDE = 0x8,
+ /* Port Module Plug/Unplug Event generated by hardware */
+ MLXSW_TRAP_ID_PMPE = 0x9,
+ /* Temperature Warning event generated by hardware */
+ MLXSW_TRAP_ID_MTWE = 0xC,
+ /* PTP Ingress FIFO has a new entry */
+ MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
+ /* PTP Egress FIFO has a new entry */
+ MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
+ /* Downstream Device Status Change */
+ MLXSW_TRAP_ID_DSDSC = 0x321,
+ /* Binary Code Transfer Operation Executed Event */
+ MLXSW_TRAP_ID_BCTOE = 0x322,
+ /* Port mapping change */
+ MLXSW_TRAP_ID_PMLPE = 0x32E,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 000000000..da51dd9d5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+#define MLXSW_TXHDR_VERSION_1 1
+
+enum {
+ MLXSW_TXHDR_ETH_CTL,
+ MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+ MLXSW_TXHDR_ETCLASS_0,
+ MLXSW_TXHDR_ETCLASS_1,
+ MLXSW_TXHDR_ETCLASS_2,
+ MLXSW_TXHDR_ETCLASS_3,
+ MLXSW_TXHDR_ETCLASS_4,
+ MLXSW_TXHDR_ETCLASS_5,
+ MLXSW_TXHDR_ETCLASS_6,
+ MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+ MLXSW_TXHDR_RDQ_OTHER,
+ MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+ MLXSW_TXHDR_NOT_EMAD,
+ MLXSW_TXHDR_EMAD,
+};
+
+enum {
+ MLXSW_TXHDR_TYPE_DATA,
+ MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif