summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/fm10k
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/net/fm10k')
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/Makefile77
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c346
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h35
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c550
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h23
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c2225
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h297
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h139
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c2099
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h164
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c887
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h165
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h854
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c646
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h68
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/base/meson.build28
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k.h356
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c3348
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h52
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c728
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c892
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/meson.build16
-rw-r--r--src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map3
23 files changed, 13998 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/fm10k/Makefile b/src/spdk/dpdk/drivers/net/fm10k/Makefile
new file mode 100644
index 000000000..d48638992
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/Makefile
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013-2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_fm10k.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_fm10k_version.map
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+
+#
+## CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+#
+# Add extra flags for base driver source files to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+# base driver is based on the package of cid-fm10k.2017.01.24.tar.gz
+#
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_rxtx.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_tlv.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_api.c
+ifeq ($(CONFIG_RTE_ARCH_X86), y)
+SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c
new file mode 100644
index 000000000..dfb50a10d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#include "fm10k_api.h"
+#include "fm10k_common.h"
+
+/**
+ * fm10k_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 fm10k_set_mac_type(struct fm10k_hw *hw)
+{
+ s32 ret_val = FM10K_SUCCESS;
+
+ DEBUGFUNC("fm10k_set_mac_type");
+
+ if (hw->vendor_id != FM10K_INTEL_VENDOR_ID) {
+ ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED,
+ "Unsupported vendor id: %x\n", hw->vendor_id);
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ switch (hw->device_id) {
+ case FM10K_DEV_ID_PF:
+#ifdef BOULDER_RAPIDS_HW
+ case FM10K_DEV_ID_SDI_FM10420_QDA2:
+#endif /* BOULDER_RAPIDS_HW */
+#ifdef ATWOOD_CHANNEL_HW
+ case FM10K_DEV_ID_SDI_FM10420_DA2:
+#endif /* ATWOOD_CHANNEL_HW */
+ hw->mac.type = fm10k_mac_pf;
+ break;
+ case FM10K_DEV_ID_VF:
+ hw->mac.type = fm10k_mac_vf;
+ break;
+ default:
+ ret_val = FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED,
+ "Unsupported device id: %x\n",
+ hw->device_id);
+ break;
+ }
+
+ DEBUGOUT2("fm10k_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+
+ return ret_val;
+}
+
+/**
+ * fm10k_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The fm10k_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 fm10k_init_shared_code(struct fm10k_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("fm10k_init_shared_code");
+
+ /* Set the mac type */
+ fm10k_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case fm10k_mac_pf:
+ status = fm10k_init_ops_pf(hw);
+ break;
+ case fm10k_mac_vf:
+ status = fm10k_init_ops_vf(hw);
+ break;
+ default:
+ status = FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+#define fm10k_call_func(hw, func, params, error) \
+ ((func) ? (func params) : (error))
+
+/**
+ * fm10k_reset_hw - Reset the hardware to known good state
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after being powered on.
+ **/
+s32 fm10k_reset_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 fm10k_init_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.init_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_stop_hw - Prepares hardware to shutdown Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Disables Rx/Tx queues and disables the DMA engine.
+ **/
+s32 fm10k_stop_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.stop_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * This function sets the flags indicating that the hardware is ready to
+ * begin operation.
+ **/
+s32 fm10k_start_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.start_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the fm10k_hw structure
+ **/
+s32 fm10k_get_bus_info(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+/**
+ * fm10k_is_slot_appropriate - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device.
+ **/
+bool fm10k_is_slot_appropriate(struct fm10k_hw *hw)
+{
+ if (hw->mac.ops.is_slot_appropriate)
+ return hw->mac.ops.is_slot_appropriate(hw);
+ return true;
+}
+
+#endif
+/**
+ * fm10k_update_vlan - Clear VLAN ID to VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @idx: Index indicating VF ID or PF ID in table
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for the corresponding function.
+ **/
+s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_vlan, (hw, vid, idx, set),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_read_mac_addr - Reads MAC address
+ * @hw: pointer to hardware structure
+ *
+ * Reads the MAC address out of the interface and stores it in the HW
+ * structures.
+ **/
+s32 fm10k_read_mac_addr(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.read_mac_addr, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_hw_stats - Update hw statistics
+ * @hw: pointer to hardware structure
+ *
+ * This function updates statistics that are related to hardware.
+ * */
+void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
+{
+ switch (hw->mac.type) {
+ case fm10k_mac_pf:
+ return fm10k_update_hw_stats_pf(hw, stats);
+ case fm10k_mac_vf:
+ return fm10k_update_hw_stats_vf(hw, stats);
+ default:
+ break;
+ }
+}
+
+/**
+ * fm10k_rebind_hw_stats - Reset base for hw statistics
+ * @hw: pointer to hardware structure
+ *
+ * This function resets the base for statistics that are related to hardware.
+ * */
+void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
+{
+ switch (hw->mac.type) {
+ case fm10k_mac_pf:
+ return fm10k_rebind_hw_stats_pf(hw, stats);
+ case fm10k_mac_vf:
+ return fm10k_rebind_hw_stats_vf(hw, stats);
+ default:
+ break;
+ }
+}
+
+/**
+ * fm10k_configure_dglort_map - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ return fm10k_call_func(hw, hw->mac.ops.configure_dglort_map,
+ (hw, dglort), FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_set_dma_mask - Configures PhyAddrSpace to limit DMA to system
+ * @hw: pointer to hardware structure
+ * @dma_mask: 64 bit DMA mask required for platform
+ *
+ * This function configures the endpoint to limit the access to memory
+ * beyond what is physically in the system.
+ **/
+void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask)
+{
+ if (hw->mac.ops.set_dma_mask)
+ hw->mac.ops.set_dma_mask(hw, dma_mask);
+}
+
+/**
+ * fm10k_get_fault - Record a fault in one of the interface units
+ * @hw: pointer to hardware structure
+ * @type: pointer to fault type register offset
+ * @fault: pointer to memory location to record the fault
+ *
+ * Record the fault register contents to the fault data structure and
+ * clear the entry from the register.
+ *
+ * Returns ERR_PARAM if invalid register is specified or no error is present.
+ **/
+s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault)
+{
+ return fm10k_call_func(hw, hw->mac.ops.get_fault, (hw, type, fault),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_uc_addr - Update device unicast address
+ * @hw: pointer to the HW structure
+ * @lport: logical port ID to update - unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure - unused
+ *
+ * This function is used to add or remove unicast MAC addresses
+ **/
+s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_uc_addr,
+ (hw, lport, mac, vid, add, flags),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_mc_addr - Update device multicast address
+ * @hw: pointer to the HW structure
+ * @lport: logical port ID to update - unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses
+ **/
+s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_mc_addr,
+ (hw, lport, mac, vid, add),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_adjust_systime - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function is meant to update the frequency of the clock represented
+ * by the SYSTIME register.
+ **/
+s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb)
+{
+ return fm10k_call_func(hw, hw->mac.ops.adjust_systime,
+ (hw, ppb), FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_notify_offset - Notify switch of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @offset: 64bit unsigned offset from hardware SYSTIME value
+ *
+ * This function is meant to notify switch of change in the PTP offset for
+ * the hardware SYSTIME registers.
+ **/
+s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset)
+{
+ return fm10k_call_func(hw, hw->mac.ops.notify_offset,
+ (hw, offset), FM10K_NOT_IMPLEMENTED);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h
new file mode 100644
index 000000000..d9593bba0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_API_H_
+#define _FM10K_API_H_
+
+#include "fm10k_pf.h"
+#include "fm10k_vf.h"
+
+s32 fm10k_set_mac_type(struct fm10k_hw *hw);
+s32 fm10k_reset_hw(struct fm10k_hw *hw);
+s32 fm10k_init_hw(struct fm10k_hw *hw);
+s32 fm10k_stop_hw(struct fm10k_hw *hw);
+s32 fm10k_start_hw(struct fm10k_hw *hw);
+s32 fm10k_init_shared_code(struct fm10k_hw *hw);
+s32 fm10k_get_bus_info(struct fm10k_hw *hw);
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+bool fm10k_is_slot_appropriate(struct fm10k_hw *hw);
+#endif
+s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set);
+s32 fm10k_read_mac_addr(struct fm10k_hw *hw);
+void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
+void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
+s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort);
+void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask);
+s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault);
+s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add, u8 flags);
+s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add);
+s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb);
+s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset);
+#endif /* _FM10K_API_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c
new file mode 100644
index 000000000..b78d4b575
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c
@@ -0,0 +1,550 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#include "fm10k_common.h"
+
+/**
+ * fm10k_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Gets the PCI bus info (speed, width, type) then calls helper function to
+ * store this data within the fm10k_hw structure.
+ **/
+STATIC s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
+{
+ u16 link_cap, link_status, device_cap, device_control;
+
+ DEBUGFUNC("fm10k_get_bus_info_generic");
+
+ /* Get the maximum link width and speed from PCIe config space */
+ link_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_CAP);
+
+ switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
+ case FM10K_PCIE_LINK_WIDTH_1:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x1;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_2:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x2;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_4:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x4;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_8:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus_caps.width = fm10k_bus_width_unknown;
+ break;
+ }
+
+ switch (link_cap & FM10K_PCIE_LINK_SPEED) {
+ case FM10K_PCIE_LINK_SPEED_2500:
+ hw->bus_caps.speed = fm10k_bus_speed_2500;
+ break;
+ case FM10K_PCIE_LINK_SPEED_5000:
+ hw->bus_caps.speed = fm10k_bus_speed_5000;
+ break;
+ case FM10K_PCIE_LINK_SPEED_8000:
+ hw->bus_caps.speed = fm10k_bus_speed_8000;
+ break;
+ default:
+ hw->bus_caps.speed = fm10k_bus_speed_unknown;
+ break;
+ }
+
+ /* Get the PCIe maximum payload size for the PCIe function */
+ device_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CAP);
+
+ switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
+ hw->bus_caps.payload = fm10k_bus_payload_128;
+ break;
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
+ hw->bus_caps.payload = fm10k_bus_payload_256;
+ break;
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
+ hw->bus_caps.payload = fm10k_bus_payload_512;
+ break;
+ default:
+ hw->bus_caps.payload = fm10k_bus_payload_unknown;
+ break;
+ }
+
+ /* Get the negotiated link width and speed from PCIe config space */
+ link_status = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_STATUS);
+
+ switch (link_status & FM10K_PCIE_LINK_WIDTH) {
+ case FM10K_PCIE_LINK_WIDTH_1:
+ hw->bus.width = fm10k_bus_width_pcie_x1;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_2:
+ hw->bus.width = fm10k_bus_width_pcie_x2;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_4:
+ hw->bus.width = fm10k_bus_width_pcie_x4;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_8:
+ hw->bus.width = fm10k_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = fm10k_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & FM10K_PCIE_LINK_SPEED) {
+ case FM10K_PCIE_LINK_SPEED_2500:
+ hw->bus.speed = fm10k_bus_speed_2500;
+ break;
+ case FM10K_PCIE_LINK_SPEED_5000:
+ hw->bus.speed = fm10k_bus_speed_5000;
+ break;
+ case FM10K_PCIE_LINK_SPEED_8000:
+ hw->bus.speed = fm10k_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = fm10k_bus_speed_unknown;
+ break;
+ }
+
+ /* Get the negotiated PCIe maximum payload size for the PCIe function */
+ device_control = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CTRL);
+
+ switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
+ hw->bus.payload = fm10k_bus_payload_128;
+ break;
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
+ hw->bus.payload = fm10k_bus_payload_256;
+ break;
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
+ hw->bus.payload = fm10k_bus_payload_512;
+ break;
+ default:
+ hw->bus.payload = fm10k_bus_payload_unknown;
+ break;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
+{
+ u16 msix_count;
+
+ DEBUGFUNC("fm10k_get_pcie_msix_count_generic");
+
+ /* read in value from MSI-X capability register */
+ msix_count = FM10K_READ_PCI_WORD(hw, FM10K_PCI_MSIX_MSG_CTRL);
+ msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW */
+ msix_count++;
+
+ if (msix_count > FM10K_MAX_MSIX_VECTORS)
+ msix_count = FM10K_MAX_MSIX_VECTORS;
+
+ return msix_count;
+}
+
+/**
+ * fm10k_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 fm10k_init_ops_generic(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("fm10k_init_ops_generic");
+
+ /* MAC */
+ mac->ops.get_bus_info = &fm10k_get_bus_info_generic;
+
+ /* initialize GLORT state to avoid any false hits */
+ mac->dglort_map = FM10K_DGLORTMAP_NONE;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * This function sets the Tx ready flag to indicate that the Tx path has
+ * been initialized.
+ **/
+s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_start_hw_generic");
+
+ /* set flag indicating we are beginning Tx */
+ hw->mac.tx_ready = true;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_disable_queues_generic - Stop Tx/Rx queues
+ * @hw: pointer to hardware structure
+ * @q_cnt: number of queues to be disabled
+ *
+ **/
+s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
+{
+ u32 reg;
+ u16 i, time;
+
+ DEBUGFUNC("fm10k_disable_queues_generic");
+
+ /* clear tx_ready to prevent any false hits for reset */
+ hw->mac.tx_ready = false;
+
+ if (FM10K_REMOVED(hw->hw_addr))
+ return FM10K_SUCCESS;
+
+ /* clear the enable bit for all rings */
+ for (i = 0; i < q_cnt; i++) {
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i),
+ reg & ~FM10K_TXDCTL_ENABLE);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i),
+ reg & ~FM10K_RXQCTL_ENABLE);
+ }
+
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* loop through all queues to verify that they are all disabled */
+ for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
+ /* if we are at end of rings all rings are disabled */
+ if (i == q_cnt)
+ return FM10K_SUCCESS;
+
+ /* if queue enables cleared, then move to next ring pair */
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
+ i++;
+ continue;
+ }
+ }
+
+ /* decrement time and wait 1 usec */
+ time--;
+ if (time)
+ usec_delay(1);
+ }
+
+ return FM10K_ERR_REQUESTS_PENDING;
+}
+
+/**
+ * fm10k_stop_hw_generic - Stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_stop_hw_generic");
+
+ return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_read_hw_stats_32b - Reads value of 32-bit registers
+ * @hw: pointer to the hardware structure
+ * @addr: address of register containing a 32-bit value
+ *
+ * Function reads the content of the register and returns the delta
+ * between the base and the current value.
+ * **/
+u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat)
+{
+ u32 delta = FM10K_READ_REG(hw, addr) - stat->base_l;
+
+ DEBUGFUNC("fm10k_read_hw_stats_32b");
+
+ if (FM10K_REMOVED(hw->hw_addr))
+ stat->base_h = 0;
+
+ return delta;
+}
+
+/**
+ * fm10k_read_hw_stats_48b - Reads value of 48-bit registers
+ * @hw: pointer to the hardware structure
+ * @addr: address of register containing the lower 32-bit value
+ *
+ * Function reads the content of 2 registers, combined to represent a 48-bit
+ * statistical value. Extra processing is required to handle overflowing.
+ * Finally, a delta value is returned representing the difference between the
+ * values stored in registers and values stored in the statistic counters.
+ * **/
+STATIC u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat)
+{
+ u32 count_l;
+ u32 count_h;
+ u32 count_tmp;
+ u64 delta;
+
+ DEBUGFUNC("fm10k_read_hw_stats_48b");
+
+ count_h = FM10K_READ_REG(hw, addr + 1);
+
+ /* Check for overflow */
+ do {
+ count_tmp = count_h;
+ count_l = FM10K_READ_REG(hw, addr);
+ count_h = FM10K_READ_REG(hw, addr + 1);
+ } while (count_h != count_tmp);
+
+ delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
+ delta -= stat->base_l;
+
+ return delta & FM10K_48_BIT_MASK;
+}
+
+/**
+ * fm10k_update_hw_base_48b - Updates 48-bit statistic base value
+ * @stat: pointer to the hardware statistic structure
+ * @delta: value to be updated into the hardware statistic structure
+ *
+ * Function receives a value and determines if an update is required based on
+ * a delta calculation. Only the base value will be updated.
+ **/
+STATIC void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
+{
+ DEBUGFUNC("fm10k_update_hw_base_48b");
+
+ if (!delta)
+ return;
+
+ /* update lower 32 bits */
+ delta += stat->base_l;
+ stat->base_l = (u32)delta;
+
+ /* update upper 32 bits */
+ stat->base_h += (u32)(delta >> 32);
+}
+
+/**
+ * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ *
+ * Function updates the TX queue statistics counters that are related to the
+ * hardware.
+ **/
+STATIC void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u32 idx)
+{
+ u32 id_tx, id_tx_prev, tx_packets;
+ u64 tx_bytes = 0;
+
+ DEBUGFUNC("fm10k_update_hw_stats_tx_q");
+
+ /* Retrieve TX Owner Data */
+ id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx));
+
+ /* Process TX Ring */
+ do {
+ tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
+ &q->tx_packets);
+
+ if (tx_packets)
+ tx_bytes = fm10k_read_hw_stats_48b(hw,
+ FM10K_QBTC_L(idx),
+ &q->tx_bytes);
+
+ /* Re-Check Owner Data */
+ id_tx_prev = id_tx;
+ id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx));
+ } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id_tx &= FM10K_TXQCTL_ID_MASK;
+ id_tx |= FM10K_STAT_VALID;
+
+ /* update packet counts */
+ if (q->tx_stats_idx == id_tx) {
+ q->tx_packets.count += tx_packets;
+ q->tx_bytes.count += tx_bytes;
+ }
+
+ /* update bases and record ID */
+ fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
+ fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
+
+ q->tx_stats_idx = id_tx;
+}
+
+/**
+ * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ *
+ * Function updates the RX queue statistics counters that are related to the
+ * hardware.
+ **/
+STATIC void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u32 idx)
+{
+ u32 id_rx, id_rx_prev, rx_packets, rx_drops;
+ u64 rx_bytes = 0;
+
+ DEBUGFUNC("fm10k_update_hw_stats_rx_q");
+
+ /* Retrieve RX Owner Data */
+ id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx));
+
+ /* Process RX Ring */
+ do {
+ rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
+ &q->rx_drops);
+
+ rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
+ &q->rx_packets);
+
+ if (rx_packets)
+ rx_bytes = fm10k_read_hw_stats_48b(hw,
+ FM10K_QBRC_L(idx),
+ &q->rx_bytes);
+
+ /* Re-Check Owner Data */
+ id_rx_prev = id_rx;
+ id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx));
+ } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id_rx &= FM10K_RXQCTL_ID_MASK;
+ id_rx |= FM10K_STAT_VALID;
+
+ /* update packet counts */
+ if (q->rx_stats_idx == id_rx) {
+ q->rx_drops.count += rx_drops;
+ q->rx_packets.count += rx_packets;
+ q->rx_bytes.count += rx_bytes;
+ }
+
+ /* update bases and record ID */
+ fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
+ fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
+ fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
+
+ q->rx_stats_idx = id_rx;
+}
+
+/**
+ * fm10k_update_hw_stats_q - Updates queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ * @count: number of queues to iterate over
+ *
+ * Function updates the queue statistics counters that are related to the
+ * hardware.
+ **/
+void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
+ u32 idx, u32 count)
+{
+ u32 i;
+
+ DEBUGFUNC("fm10k_update_hw_stats_q");
+
+ for (i = 0; i < count; i++, idx++, q++) {
+ fm10k_update_hw_stats_tx_q(hw, q, idx);
+ fm10k_update_hw_stats_rx_q(hw, q, idx);
+ }
+}
+
+/**
+ * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ * @count: number of queues to iterate over
+ *
+ * Function invalidates the index values for the queues so any updates that
+ * may have happened are ignored and the base for the queue stats is reset.
+ **/
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++, idx++, q++) {
+ q->rx_stats_idx = 0;
+ q->tx_stats_idx = 0;
+ }
+}
+
+/**
+ * fm10k_get_host_state_generic - Returns the state of the host
+ * @hw: pointer to hardware structure
+ * @host_ready: pointer to boolean value that will record host state
+ *
+ * This function will check the health of the mailbox and Tx queue 0
+ * in order to determine if we should report that the link is up or not.
+ **/
+s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_mac_info *mac = &hw->mac;
+ s32 ret_val = FM10K_SUCCESS;
+ u32 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(0));
+
+ DEBUGFUNC("fm10k_get_host_state_generic");
+
+ /* process upstream mailbox in case interrupts were disabled */
+ mbx->ops.process(hw, mbx);
+
+ /* If Tx is no longer enabled link should come down */
+ if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
+ mac->get_host_state = true;
+
+ /* exit if not checking for link, or link cannot be changed */
+ if (!mac->get_host_state || !(~txdctl))
+ goto out;
+
+ /* if we somehow dropped the Tx enable we should reset */
+ if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ ret_val = FM10K_ERR_RESET_REQUESTED;
+ goto out;
+ }
+
+ /* if Mailbox timed out we should request reset */
+ if (!mbx->timeout) {
+ ret_val = FM10K_ERR_RESET_REQUESTED;
+ goto out;
+ }
+
+ /* verify Mailbox is still valid */
+ if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
+ goto out;
+
+ /* interface cannot receive traffic without logical ports */
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
+ if (mac->ops.request_lport_map)
+ ret_val = mac->ops.request_lport_map(hw);
+
+ goto out;
+ }
+
+ /* if we passed all the tests above then the switch is ready and we no
+ * longer need to check for link
+ */
+ mac->get_host_state = false;
+
+out:
+ *host_ready = !mac->get_host_state;
+ return ret_val;
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h
new file mode 100644
index 000000000..91304b072
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_COMMON_H_
+#define _FM10K_COMMON_H_
+
+#include "fm10k_type.h"
+
+u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw);
+s32 fm10k_init_ops_generic(struct fm10k_hw *hw);
+s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt);
+s32 fm10k_start_hw_generic(struct fm10k_hw *hw);
+s32 fm10k_stop_hw_generic(struct fm10k_hw *hw);
+u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat);
+#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta))
+void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
+ u32 idx, u32 count);
+#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
+#endif /* _FM10K_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c
new file mode 100644
index 000000000..2bb0d82ef
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c
@@ -0,0 +1,2225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#include "fm10k_common.h"
+
+/**
+ * fm10k_fifo_init - Initialize a message FIFO
+ * @fifo: pointer to FIFO
+ * @buffer: pointer to memory to be used to store FIFO
+ * @size: maximum message size to store in FIFO, must be 2^n - 1
+ **/
+STATIC void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size)
+{
+ fifo->buffer = buffer;
+ fifo->size = size;
+ fifo->head = 0;
+ fifo->tail = 0;
+}
+
+/**
+ * fm10k_fifo_used - Retrieve used space in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the number of DWORDs used in the FIFO
+ **/
+STATIC u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->tail - fifo->head;
+}
+
+/**
+ * fm10k_fifo_unused - Retrieve unused space in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the number of unused DWORDs in the FIFO
+ **/
+STATIC u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->size + fifo->head - fifo->tail;
+}
+
+/**
+ * fm10k_fifo_empty - Test to verify if FIFO is empty
+ * @fifo: pointer to FIFO
+ *
+ * This function returns true if the FIFO is empty, else false
+ **/
+STATIC bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->head == fifo->tail;
+}
+
+/**
+ * fm10k_fifo_head_offset - returns indices of head with given offset
+ * @fifo: pointer to FIFO
+ * @offset: offset to add to head
+ *
+ * This function returns the indices into the FIFO based on head + offset
+ **/
+STATIC u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
+{
+ return (fifo->head + offset) & (fifo->size - 1);
+}
+
+/**
+ * fm10k_fifo_tail_offset - returns indices of tail with given offset
+ * @fifo: pointer to FIFO
+ * @offset: offset to add to tail
+ *
+ * This function returns the indices into the FIFO based on tail + offset
+ **/
+STATIC u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
+{
+ return (fifo->tail + offset) & (fifo->size - 1);
+}
+
+/**
+ * fm10k_fifo_head_len - Retrieve length of first message in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the size of the first message in the FIFO
+ **/
+STATIC u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo)
+{
+ u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0);
+
+ /* verify there is at least 1 DWORD in the fifo so *head is valid */
+ if (fm10k_fifo_empty(fifo))
+ return 0;
+
+ /* retieve the message length */
+ return FM10K_TLV_DWORD_LEN(*head);
+}
+
+/**
+ * fm10k_fifo_head_drop - Drop the first message in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the size of the message dropped from the FIFO
+ **/
+STATIC u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo)
+{
+ u16 len = fm10k_fifo_head_len(fifo);
+
+ /* update head so it is at the start of next frame */
+ fifo->head += len;
+
+ return len;
+}
+
+/**
+ * fm10k_fifo_drop_all - Drop all messages in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function resets the head pointer to drop all messages in the FIFO and
+ * ensure the FIFO is empty.
+ **/
+STATIC void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
+{
+ fifo->head = fifo->tail;
+}
+
+/**
+ * fm10k_mbx_index_len - Convert a head/tail index into a length value
+ * @mbx: pointer to mailbox
+ * @head: head index
+ * @tail: head index
+ *
+ * This function takes the head and tail index and determines the length
+ * of the data indicated by this pair.
+ **/
+STATIC u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail)
+{
+ u16 len = tail - head;
+
+ /* we wrapped so subtract 2, one for index 0, one for all 1s index */
+ if (len > tail)
+ len -= 2;
+
+ return len & ((mbx->mbmem_len << 1) - 1);
+}
+
+/**
+ * fm10k_mbx_tail_add - Determine new tail value with added offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to tail offset
+ *
+ * This function takes the local tail index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* add/sub 1 because we cannot have offset 0 or all 1s */
+ return (tail > mbx->tail) ? --tail : ++tail;
+}
+
+/**
+ * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to tail offset
+ *
+ * This function takes the local tail index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* sub/add 1 because we cannot have offset 0 or all 1s */
+ return (tail < mbx->tail) ? ++tail : --tail;
+}
+
+/**
+ * fm10k_mbx_head_add - Determine new head value with added offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local head index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* add/sub 1 because we cannot have offset 0 or all 1s */
+ return (head > mbx->head) ? --head : ++head;
+}
+
+/**
+ * fm10k_mbx_head_sub - Determine new head value with subtracted offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local head index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* sub/add 1 because we cannot have offset 0 or all 1s */
+ return (head < mbx->head) ? ++head : --head;
+}
+
+/**
+ * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed
+ * @mbx: pointer to mailbox
+ *
+ * This function will return the length of the message currently being
+ * pushed onto the tail of the Rx queue.
+ **/
+STATIC u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx)
+{
+ u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0);
+
+ /* pushed tail is only valid if pushed is set */
+ if (!mbx->pushed)
+ return 0;
+
+ return FM10K_TLV_DWORD_LEN(*tail);
+}
+
+/**
+ * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO
+ * @fifo: pointer to FIFO
+ * @msg: message array to populate
+ * @tail_offset: additional offset to add to tail pointer
+ * @len: length of FIFO to copy into message header
+ *
+ * This function will take a message and copy it into a section of the
+ * FIFO. In order to get something into a location other than just
+ * the tail you can use tail_offset to adjust the pointer.
+ **/
+STATIC void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo,
+ const u32 *msg, u16 tail_offset, u16 len)
+{
+ u16 end = fm10k_fifo_tail_offset(fifo, tail_offset);
+ u32 *tail = fifo->buffer + end;
+
+ /* track when we should cross the end of the FIFO */
+ end = fifo->size - end;
+
+ /* copy end of message before start of message */
+ if (end < len)
+ memcpy(fifo->buffer, msg + end, (len - end) << 2);
+ else
+ end = len;
+
+ /* Copy remaining message into Tx FIFO */
+ memcpy(tail, msg, end << 2);
+}
+
+/**
+ * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO
+ * @fifo: pointer to FIFO
+ * @msg: message array to read
+ *
+ * This function enqueues a message up to the size specified by the length
+ * contained in the first DWORD of the message and will place at the tail
+ * of the FIFO. It will return 0 on success, or a negative value on error.
+ **/
+STATIC s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg)
+{
+ u16 len = FM10K_TLV_DWORD_LEN(*msg);
+
+ DEBUGFUNC("fm10k_fifo_enqueue");
+
+ /* verify parameters */
+ if (len > fifo->size)
+ return FM10K_MBX_ERR_SIZE;
+
+ /* verify there is room for the message */
+ if (len > fm10k_fifo_unused(fifo))
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* Copy message into FIFO */
+ fm10k_fifo_write_copy(fifo, msg, 0, len);
+
+ /* memory barrier to guarantee FIFO is written before tail update */
+ FM10K_WMB();
+
+ /* Update Tx FIFO tail */
+ fifo->tail += len;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_validate_msg_size - Validate incoming message based on size
+ * @mbx: pointer to mailbox
+ * @len: length of data pushed onto buffer
+ *
+ * This function analyzes the frame and will return a non-zero value when
+ * the start of a message larger than the mailbox is detected.
+ **/
+STATIC u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 total_len = 0, msg_len;
+ u32 *msg;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg_size");
+
+ /* length should include previous amounts pushed */
+ len += mbx->pushed;
+
+ /* offset in message is based off of current message size */
+ do {
+ msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
+ msg_len = FM10K_TLV_DWORD_LEN(*msg);
+ total_len += msg_len;
+ } while (total_len < len);
+
+ /* message extends out of pushed section, but fits in FIFO */
+ if ((len < total_len) && (msg_len <= mbx->max_size))
+ return 0;
+
+ /* return length of invalid section */
+ return (len < total_len) ? len : (len - total_len);
+}
+
+/**
+ * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will take a section of the Tx FIFO and copy it into the
+ * mailbox memory. The offset in mbmem is based on the lower bits of the
+ * tail and len determines the length to copy.
+ **/
+STATIC void fm10k_mbx_write_copy(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ u32 mbmem = mbx->mbmem_reg;
+ u32 *head = fifo->buffer;
+ u16 end, len, tail, mask;
+
+ DEBUGFUNC("fm10k_mbx_write_copy");
+
+ if (!mbx->tail_len)
+ return;
+
+ /* determine data length and mbmem tail index */
+ mask = mbx->mbmem_len - 1;
+ len = mbx->tail_len;
+ tail = fm10k_mbx_tail_sub(mbx, len);
+ if (tail > mask)
+ tail++;
+
+ /* determine offset in the ring */
+ end = fm10k_fifo_head_offset(fifo, mbx->pulled);
+ head += end;
+
+ /* memory barrier to guarantee data is ready to be read */
+ FM10K_RMB();
+
+ /* Copy message from Tx FIFO */
+ for (end = fifo->size - end; len; head = fifo->buffer) {
+ do {
+ /* adjust tail to match offset for FIFO */
+ tail &= mask;
+ if (!tail)
+ tail++;
+
+ mbx->tx_mbmem_pulled++;
+
+ /* write message to hardware FIFO */
+ FM10K_WRITE_MBX(hw, mbmem + tail++, *(head++));
+ } while (--len && --end);
+ }
+}
+
+/**
+ * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number last received
+ *
+ * This function will push the tail index forward based on the remote
+ * head index. It will then pull up to mbmem_len DWORDs off of the
+ * head of the FIFO and will place it in the MBMEM registers
+ * associated with the mailbox.
+ **/
+STATIC void fm10k_mbx_pull_head(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+
+ /* update number of bytes pulled and update bytes in transit */
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* determine length of data to pull, reserve space for mbmem header */
+ mbmem_len = mbx->mbmem_len - 1;
+ len = fm10k_fifo_used(fifo) - mbx->pulled;
+ if (len > mbmem_len)
+ len = mbmem_len;
+
+ /* update tail and record number of bytes in transit */
+ mbx->tail = fm10k_mbx_tail_add(mbx, len - ack);
+ mbx->tail_len = len;
+
+ /* drop pulled messages from the FIFO */
+ for (len = fm10k_fifo_head_len(fifo);
+ len && (mbx->pulled >= len);
+ len = fm10k_fifo_head_len(fifo)) {
+ mbx->pulled -= fm10k_fifo_head_drop(fifo);
+ mbx->tx_messages++;
+ mbx->tx_dwords += len;
+ }
+
+ /* Copy message out from the Tx FIFO */
+ fm10k_mbx_write_copy(hw, mbx);
+}
+
+/**
+ * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will take a section of the mailbox memory and copy it
+ * into the Rx FIFO. The offset is based on the lower bits of the
+ * head and len determines the length to copy.
+ **/
+STATIC void fm10k_mbx_read_copy(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len;
+ u32 *tail = fifo->buffer;
+ u16 end, len, head;
+
+ DEBUGFUNC("fm10k_mbx_read_copy");
+
+ /* determine data length and mbmem head index */
+ len = mbx->head_len;
+ head = fm10k_mbx_head_sub(mbx, len);
+ if (head >= mbx->mbmem_len)
+ head++;
+
+ /* determine offset in the ring */
+ end = fm10k_fifo_tail_offset(fifo, mbx->pushed);
+ tail += end;
+
+ /* Copy message into Rx FIFO */
+ for (end = fifo->size - end; len; tail = fifo->buffer) {
+ do {
+ /* adjust head to match offset for FIFO */
+ head &= mbx->mbmem_len - 1;
+ if (!head)
+ head++;
+
+ mbx->rx_mbmem_pushed++;
+
+ /* read message from hardware FIFO */
+ *(tail++) = FM10K_READ_MBX(hw, mbmem + head++);
+ } while (--len && --end);
+ }
+
+ /* memory barrier to guarantee FIFO is written before tail update */
+ FM10K_WMB();
+}
+
+/**
+ * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @tail: tail index of message
+ *
+ * This function will first validate the tail index and size for the
+ * incoming message. It then updates the acknowledgment number and
+ * copies the data into the FIFO. It will return the number of messages
+ * dequeued on success and a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_push_tail(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx,
+ u16 tail)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail);
+
+ DEBUGFUNC("fm10k_mbx_push_tail");
+
+ /* determine length of data to push */
+ len = fm10k_fifo_unused(fifo) - mbx->pushed;
+ if (len > seq)
+ len = seq;
+
+ /* update head and record bytes received */
+ mbx->head = fm10k_mbx_head_add(mbx, len);
+ mbx->head_len = len;
+
+ /* nothing to do if there is no data */
+ if (!len)
+ return FM10K_SUCCESS;
+
+ /* Copy msg into Rx FIFO */
+ fm10k_mbx_read_copy(hw, mbx);
+
+ /* determine if there are any invalid lengths in message */
+ if (fm10k_mbx_validate_msg_size(mbx, len))
+ return FM10K_MBX_ERR_SIZE;
+
+ /* Update pushed */
+ mbx->pushed += len;
+
+ /* flush any completed messages */
+ for (len = fm10k_mbx_pushed_tail_len(mbx);
+ len && (mbx->pushed >= len);
+ len = fm10k_mbx_pushed_tail_len(mbx)) {
+ fifo->tail += len;
+ mbx->pushed -= len;
+ mbx->rx_messages++;
+ mbx->rx_dwords += len;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/* pre-generated data for generating the CRC based on the poly 0xAC9A. */
+static const u16 fm10k_crc_16b_table[256] = {
+ 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797,
+ 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678,
+ 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449,
+ 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6,
+ 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B,
+ 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4,
+ 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5,
+ 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A,
+ 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA,
+ 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035,
+ 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204,
+ 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB,
+ 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666,
+ 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789,
+ 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8,
+ 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457,
+ 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D,
+ 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2,
+ 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3,
+ 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C,
+ 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1,
+ 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E,
+ 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F,
+ 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80,
+ 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40,
+ 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF,
+ 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E,
+ 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71,
+ 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC,
+ 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13,
+ 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922,
+ 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD };
+
+/**
+ * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data
+ * @data: pointer to data to process
+ * @seed: seed value for CRC
+ * @len: length measured in 16 bits words
+ *
+ * This function will generate a CRC based on the polynomial 0xAC9A and
+ * whatever value is stored in the seed variable. Note that this
+ * value inverts the local seed and the result in order to capture all
+ * leading and trailing zeros.
+ */
+STATIC u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len)
+{
+ u32 result = seed;
+
+ while (len--) {
+ result ^= *(data++);
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+
+ if (!(len--))
+ break;
+
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ }
+
+ return (u16)result;
+}
+
+/**
+ * fm10k_fifo_crc - generate a CRC based off of FIFO data
+ * @fifo: pointer to FIFO
+ * @offset: offset point for start of FIFO
+ * @len: number of DWORDS words to process
+ * @seed: seed value for CRC
+ *
+ * This function generates a CRC for some region of the FIFO
+ **/
+STATIC u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset,
+ u16 len, u16 seed)
+{
+ u32 *data = fifo->buffer + offset;
+
+ /* track when we should cross the end of the FIFO */
+ offset = fifo->size - offset;
+
+ /* if we are in 2 blocks process the end of the FIFO first */
+ if (offset < len) {
+ seed = fm10k_crc_16b(data, seed, offset * 2);
+ data = fifo->buffer;
+ len -= offset;
+ }
+
+ /* process any remaining bits */
+ return fm10k_crc_16b(data, seed, len * 2);
+}
+
+/**
+ * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data
+ * @mbx: pointer to mailbox
+ * @head: head index provided by remote mailbox
+ *
+ * This function will generate the CRC for all data from the end of the
+ * last head update to the current one. It uses the result of the
+ * previous CRC as the seed for this update. The result is stored in
+ * mbx->local.
+ **/
+STATIC void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head)
+{
+ u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail);
+
+ /* determine the offset for the start of the region to be pulled */
+ head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled);
+
+ /* update local CRC to include all of the pulled data */
+ mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local);
+}
+
+/**
+ * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data
+ * @mbx: pointer to mailbox
+ *
+ * This function will take all data that has been provided from the remote
+ * end and generate a CRC for it. This is stored in mbx->remote. The
+ * CRC for the header is then computed and if the result is non-zero this
+ * is an error and we signal an error dropping all data and resetting the
+ * connection.
+ */
+STATIC s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 len = mbx->head_len;
+ u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len;
+ u16 crc;
+
+ /* update the remote CRC if new data has been received */
+ if (len)
+ mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote);
+
+ /* process the full header as we have to validate the CRC */
+ crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1);
+
+ /* notify other end if we have a problem */
+ return crc ? FM10K_MBX_ERR_CRC : FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO
+ * @mbx: pointer to mailbox
+ *
+ * This function returns true if there is a message in the Rx FIFO to dequeue.
+ **/
+STATIC bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx)
+{
+ u16 msg_size = fm10k_fifo_head_len(&mbx->rx);
+
+ return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size);
+}
+
+/**
+ * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx
+ * @mbx: pointer to mailbox
+ * @len: verify free space is >= this value
+ *
+ * This function returns true if the mailbox is in a state ready to transmit.
+ **/
+STATIC bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len)
+{
+ u16 fifo_unused = fm10k_fifo_unused(&mbx->tx);
+
+ return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len);
+}
+
+/**
+ * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied
+ * @mbx: pointer to mailbox
+ *
+ * This function returns true if the Tx FIFO is empty.
+ **/
+STATIC bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx)
+{
+ return fm10k_fifo_empty(&mbx->tx);
+}
+
+/**
+ * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function dequeues messages and hands them off to the TLV parser.
+ * It will return the number of messages processed when called.
+ **/
+STATIC u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ s32 err;
+ u16 cnt;
+
+ /* parse Rx messages out of the Rx FIFO to empty it */
+ for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) {
+ err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head,
+ mbx, mbx->msg_data);
+ if (err < 0)
+ mbx->rx_parse_err++;
+
+ fm10k_fifo_head_drop(fifo);
+ }
+
+ /* shift remaining bytes back to start of FIFO */
+ memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2);
+
+ /* shift head and tail based on the memory we moved */
+ fifo->tail -= fifo->head;
+ fifo->head = 0;
+
+ return cnt;
+}
+
+/**
+ * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg: message array to read
+ *
+ * This function enqueues a message up to the size specified by the length
+ * contained in the first DWORD of the message and will place at the tail
+ * of the FIFO. It will return 0 on success, or a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, const u32 *msg)
+{
+ u32 countdown = mbx->timeout;
+ s32 err;
+
+ switch (mbx->state) {
+ case FM10K_STATE_CLOSED:
+ case FM10K_STATE_DISCONNECT:
+ return FM10K_MBX_ERR_NO_MBX;
+ default:
+ break;
+ }
+
+ /* enqueue the message on the Tx FIFO */
+ err = fm10k_fifo_enqueue(&mbx->tx, msg);
+
+ /* if it failed give the FIFO a chance to drain */
+ while (err && countdown) {
+ countdown--;
+ usec_delay(mbx->usec_delay);
+ mbx->ops.process(hw, mbx);
+ err = fm10k_fifo_enqueue(&mbx->tx, msg);
+ }
+
+ /* if we failed treat the error */
+ if (err) {
+ mbx->timeout = 0;
+ mbx->tx_busy++;
+ }
+
+ /* begin processing message, ignore errors as this is just meant
+ * to start the mailbox flow so we are not concerned if there
+ * is a bad error, or the mailbox is already busy with a request
+ */
+ if (!mbx->tail_len)
+ mbx->ops.process(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_read - Copies the mbmem to local message buffer
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function copies the message from the mbmem to the message array
+ **/
+STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_read");
+
+ /* only allow one reader in here at a time */
+ if (mbx->mbx_hdr)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* read to capture initial interrupt bits */
+ if (FM10K_READ_MBX(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT)
+ mbx->mbx_lock = FM10K_MBX_ACK;
+
+ /* write back interrupt bits to clear */
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg,
+ FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT);
+
+ /* read remote header */
+ mbx->mbx_hdr = FM10K_READ_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_write - Copies the local message buffer to mbmem
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function copies the message from the message array to mbmem
+ **/
+STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ u32 mbmem = mbx->mbmem_reg;
+
+ DEBUGFUNC("fm10k_mbx_write");
+
+ /* write new msg header to notify recipient of change */
+ FM10K_WRITE_MBX(hw, mbmem, mbx->mbx_hdr);
+
+ /* write mailbox to send interrupt */
+ if (mbx->mbx_lock)
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg, mbx->mbx_lock);
+
+ /* we no longer are using the header so free it */
+ mbx->mbx_hdr = 0;
+ mbx->mbx_lock = 0;
+}
+
+/**
+ * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx)
+{
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE);
+}
+
+/**
+ * fm10k_mbx_create_data_hdr - Generate a data mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a data mailbox header
+ **/
+STATIC void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ u16 crc;
+
+ if (mbx->tail_len)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ /* generate CRC for data in flight and header */
+ crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled),
+ mbx->tail_len, mbx->local);
+ crc = fm10k_crc_16b(&hdr, crc, 1);
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a disconnect mailbox header
+ **/
+STATIC void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr
+ * @mbx: pointer to mailbox
+ *
+ * This function creates a fake disconnect header for loading into remote
+ * mailbox header. The primary purpose is to prevent errors on immediate
+ * start up after mbx->connect.
+ **/
+STATIC void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_error_msg - Generate an error message
+ * @mbx: pointer to mailbox
+ * @err: local error encountered
+ *
+ * This function will interpret the error provided by err, and based on
+ * that it may shift the message by 1 DWORD and then place an error header
+ * at the start of the message.
+ **/
+STATIC void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
+{
+ /* only generate an error message for these types */
+ switch (err) {
+ case FM10K_MBX_ERR_TAIL:
+ case FM10K_MBX_ERR_HEAD:
+ case FM10K_MBX_ERR_TYPE:
+ case FM10K_MBX_ERR_SIZE:
+ case FM10K_MBX_ERR_RSVD0:
+ case FM10K_MBX_ERR_CRC:
+ break;
+ default:
+ return;
+ }
+
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+}
+
+/**
+ * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header
+ * @mbx: pointer to mailbox
+ *
+ * This function will parse up the fields in the mailbox header and return
+ * an error if the header contains any of a number of invalid configurations
+ * including unrecognized type, invalid route, or a malformed message.
+ **/
+STATIC s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
+{
+ u16 type, rsvd0, head, tail, size;
+ const u32 *hdr = &mbx->mbx_hdr;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg_hdr");
+
+ type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE);
+ rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0);
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+ size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE);
+
+ if (rsvd0)
+ return FM10K_MBX_ERR_RSVD0;
+
+ switch (type) {
+ case FM10K_MSG_DISCONNECT:
+ /* validate that all data has been received */
+ if (tail != mbx->head)
+ return FM10K_MBX_ERR_TAIL;
+
+ /* fall through */
+ case FM10K_MSG_DATA:
+ /* validate that head is moving correctly */
+ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
+ return FM10K_MBX_ERR_HEAD;
+ if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len)
+ return FM10K_MBX_ERR_HEAD;
+
+ /* validate that tail is moving correctly */
+ if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL)))
+ return FM10K_MBX_ERR_TAIL;
+ if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len)
+ break;
+
+ return FM10K_MBX_ERR_TAIL;
+ case FM10K_MSG_CONNECT:
+ /* validate size is in range and is power of 2 mask */
+ if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1)))
+ return FM10K_MBX_ERR_SIZE;
+
+ /* fall through */
+ case FM10K_MSG_ERROR:
+ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
+ return FM10K_MBX_ERR_HEAD;
+ /* neither create nor error include a tail offset */
+ if (tail)
+ return FM10K_MBX_ERR_TAIL;
+
+ break;
+ default:
+ return FM10K_MBX_ERR_TYPE;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_create_reply - Generate reply based on state and remote head
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number
+ *
+ * This function will generate an outgoing message based on the current
+ * mailbox state and the remote FIFO head. It will return the length
+ * of the outgoing message excluding header on success, and a negative value
+ * on error.
+ **/
+STATIC s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* update our checksum for the outgoing data */
+ fm10k_mbx_update_local_crc(mbx, head);
+
+ /* as long as other end recognizes us keep sending data */
+ fm10k_mbx_pull_head(hw, mbx, head);
+
+ /* generate new header based on data */
+ if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN))
+ fm10k_mbx_create_data_hdr(mbx);
+ else
+ fm10k_mbx_create_disconnect_hdr(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* send disconnect even if we aren't connected */
+ fm10k_mbx_create_connect_hdr(mbx);
+ break;
+ case FM10K_STATE_CLOSED:
+ /* generate new header based on data */
+ fm10k_mbx_create_disconnect_hdr(mbx);
+ default:
+ break;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_reset_work- Reset internal pointers for any pending work
+ * @mbx: pointer to mailbox
+ *
+ * This function will reset all internal pointers so any work in progress
+ * is dropped. This call should occur every time we transition from the
+ * open state to the connect state.
+ **/
+STATIC void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
+{
+ u16 len, head, ack;
+
+ /* reset our outgoing max size back to Rx limits */
+ mbx->max_size = mbx->rx.size - 1;
+
+ /* update mbx->pulled to account for tail_len and ack */
+ head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD);
+ ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* now drop any messages which have started or finished transmitting */
+ while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) {
+ len = fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ if (mbx->pulled >= len)
+ mbx->pulled -= len;
+ else
+ mbx->pulled = 0;
+ }
+
+ /* just do a quick resysnc to start of message */
+ mbx->pushed = 0;
+ mbx->pulled = 0;
+ mbx->tail_len = 0;
+ mbx->head_len = 0;
+ mbx->rx.tail = 0;
+ mbx->rx.head = 0;
+}
+
+/**
+ * fm10k_mbx_update_max_size - Update the max_size and drop any large messages
+ * @mbx: pointer to mailbox
+ * @size: new value for max_size
+ *
+ * This function updates the max_size value and drops any outgoing messages
+ * at the head of the Tx FIFO if they are larger than max_size. It does not
+ * drop all messages, as this is too difficult to parse and remove them from
+ * the FIFO. Instead, rely on the checking to ensure that messages larger
+ * than max_size aren't pushed into the memory buffer.
+ **/
+STATIC void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size)
+{
+ u16 len;
+
+ DEBUGFUNC("fm10k_mbx_update_max_size");
+
+ mbx->max_size = size;
+
+ /* flush any oversized messages from the queue */
+ for (len = fm10k_fifo_head_len(&mbx->tx);
+ len > size;
+ len = fm10k_fifo_head_len(&mbx->tx)) {
+ fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ }
+}
+
+/**
+ * fm10k_mbx_connect_reset - Reset following request for reset
+ * @mbx: pointer to mailbox
+ *
+ * This function resets the mailbox to either a disconnected state
+ * or a connect state depending on the current mailbox state
+ **/
+STATIC void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx)
+{
+ /* just do a quick resysnc to start of frame */
+ fm10k_mbx_reset_work(mbx);
+
+ /* reset CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* we cannot exit connect until the size is good */
+ if (mbx->state == FM10K_STATE_OPEN)
+ mbx->state = FM10K_STATE_CONNECT;
+ else
+ mbx->state = FM10K_STATE_CLOSED;
+}
+
+/**
+ * fm10k_mbx_process_connect - Process connect header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming connect header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_connect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 size, head;
+
+ /* we will need to pull all of the fields for verification */
+ size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ case FM10K_STATE_OPEN:
+ /* reset any in-progress work */
+ fm10k_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* we cannot exit connect until the size is good */
+ if (size > mbx->rx.size) {
+ mbx->max_size = mbx->rx.size - 1;
+ } else {
+ /* record the remote system requesting connection */
+ mbx->state = FM10K_STATE_OPEN;
+
+ fm10k_mbx_update_max_size(mbx, size);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* align our tail index to remote head index */
+ mbx->tail = head;
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_data - Process data header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming data header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_data(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head, tail;
+ s32 err;
+
+ DEBUGFUNC("fm10k_mbx_process_data");
+
+ /* we will need to pull all of the fields for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
+
+ /* if we are in connect just update our data and go */
+ if (mbx->state == FM10K_STATE_CONNECT) {
+ mbx->tail = head;
+ mbx->state = FM10K_STATE_OPEN;
+ }
+
+ /* abort on message size errors */
+ err = fm10k_mbx_push_tail(hw, mbx, tail);
+ if (err < 0)
+ return err;
+
+ /* verify the checksum on the incoming data */
+ err = fm10k_mbx_verify_remote_crc(mbx);
+ if (err)
+ return err;
+
+ /* process messages if we have received any */
+ fm10k_mbx_dequeue_rx(hw, mbx);
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_disconnect - Process disconnect header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming disconnect header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head;
+ s32 err;
+
+ /* we will need to pull the header field for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ /* We should not be receiving disconnect if Rx is incomplete */
+ if (mbx->pushed)
+ return FM10K_MBX_ERR_TAIL;
+
+ /* we have already verified mbx->head == tail so we know this is 0 */
+ mbx->head_len = 0;
+
+ /* verify the checksum on the incoming header is correct */
+ err = fm10k_mbx_verify_remote_crc(mbx);
+ if (err)
+ return err;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ case FM10K_STATE_OPEN:
+ /* state doesn't change if we still have work to do */
+ if (!fm10k_mbx_tx_complete(mbx))
+ break;
+
+ /* verify the head indicates we completed all transmits */
+ if (head != mbx->tail)
+ return FM10K_MBX_ERR_HEAD;
+
+ /* reset any in-progress work */
+ fm10k_mbx_connect_reset(mbx);
+ break;
+ default:
+ break;
+ }
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_error - Process error header
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming error header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head;
+
+ /* we will need to pull all of the fields for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* flush any uncompleted work */
+ fm10k_mbx_reset_work(mbx);
+
+ /* reset CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* reset tail index and size to prepare for reconnect */
+ mbx->tail = head;
+
+ /* if open then reset max_size and go back to connect */
+ if (mbx->state == FM10K_STATE_OPEN) {
+ mbx->state = FM10K_STATE_CONNECT;
+ break;
+ }
+
+ /* send a connect message to get data flowing again */
+ fm10k_mbx_create_connect_hdr(mbx);
+ return FM10K_SUCCESS;
+ default:
+ break;
+ }
+
+ return fm10k_mbx_create_reply(hw, mbx, mbx->tail);
+}
+
+/**
+ * fm10k_mbx_process - Process mailbox interrupt
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will process incoming mailbox events and generate mailbox
+ * replies. It will return a value indicating the number of DWORDs
+ * transmitted excluding header on success or a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_process(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_mbx_process");
+
+ /* we do not read mailbox if closed */
+ if (mbx->state == FM10K_STATE_CLOSED)
+ return FM10K_SUCCESS;
+
+ /* copy data from mailbox */
+ err = fm10k_mbx_read(hw, mbx);
+ if (err)
+ return err;
+
+ /* validate type, source, and destination */
+ err = fm10k_mbx_validate_msg_hdr(mbx);
+ if (err < 0)
+ goto msg_err;
+
+ switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) {
+ case FM10K_MSG_CONNECT:
+ err = fm10k_mbx_process_connect(hw, mbx);
+ break;
+ case FM10K_MSG_DATA:
+ err = fm10k_mbx_process_data(hw, mbx);
+ break;
+ case FM10K_MSG_DISCONNECT:
+ err = fm10k_mbx_process_disconnect(hw, mbx);
+ break;
+ case FM10K_MSG_ERROR:
+ err = fm10k_mbx_process_error(hw, mbx);
+ break;
+ default:
+ err = FM10K_MBX_ERR_TYPE;
+ break;
+ }
+
+msg_err:
+ /* notify partner of errors on our end */
+ if (err < 0)
+ fm10k_mbx_create_error_msg(mbx, err);
+
+ /* copy data from mailbox */
+ fm10k_mbx_write(hw, mbx);
+
+ return err;
+}
+
+/**
+ * fm10k_mbx_disconnect - Shutdown mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will shut down the mailbox. It places the mailbox first
+ * in the disconnect state, it then allows up to a predefined timeout for
+ * the mailbox to transition to close on its own. If this does not occur
+ * then the mailbox will be forced into the closed state.
+ *
+ * Any mailbox transactions not completed before calling this function
+ * are not guaranteed to complete and may be dropped.
+ **/
+STATIC void fm10k_mbx_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0;
+
+ DEBUGFUNC("fm10k_mbx_disconnect");
+
+ /* Place mbx in ready to disconnect state */
+ mbx->state = FM10K_STATE_DISCONNECT;
+
+ /* trigger interrupt to start shutdown process */
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg, FM10K_MBX_REQ |
+ FM10K_MBX_INTERRUPT_DISABLE);
+ do {
+ usec_delay(FM10K_MBX_POLL_DELAY);
+ mbx->ops.process(hw, mbx);
+ timeout -= FM10K_MBX_POLL_DELAY;
+ } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
+
+ /* in case we didn't close, just force the mailbox into shutdown and
+ * drop all left over messages in the FIFO.
+ */
+ fm10k_mbx_connect_reset(mbx);
+ fm10k_fifo_drop_all(&mbx->tx);
+
+ FM10K_WRITE_MBX(hw, mbx->mbmem_reg, 0);
+}
+
+/**
+ * fm10k_mbx_connect - Start mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will initiate a mailbox connection. It will populate the
+ * mailbox with a broadcast connect message and then initialize the lock.
+ * This is safe since the connect message is a single DWORD so the mailbox
+ * transaction is guaranteed to be atomic.
+ *
+ * This function will return an error if the mailbox has not been initiated
+ * or is currently in use.
+ **/
+STATIC s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_connect");
+
+ /* we cannot connect an uninitialized mailbox */
+ if (!mbx->rx.buffer)
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* we cannot connect an already connected mailbox */
+ if (mbx->state != FM10K_STATE_CLOSED)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = FM10K_MBX_INIT_TIMEOUT;
+
+ /* Place mbx in ready to connect state */
+ mbx->state = FM10K_STATE_CONNECT;
+
+ fm10k_mbx_reset_work(mbx);
+
+ /* initialize header of remote mailbox */
+ fm10k_mbx_create_fake_disconnect_hdr(mbx);
+ FM10K_WRITE_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr);
+
+ /* enable interrupt and notify other party of new message */
+ mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT |
+ FM10K_MBX_INTERRUPT_ENABLE;
+
+ /* generate and load connect header into mailbox */
+ fm10k_mbx_create_connect_hdr(mbx);
+ fm10k_mbx_write(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_validate_handlers - Validate layout of message parsing data
+ * @msg_data: handlers for mailbox events
+ *
+ * This function validates the layout of the message parsing data. This
+ * should be mostly static, but it is important to catch any errors that
+ * are made when constructing the parsers.
+ **/
+STATIC s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data)
+{
+ const struct fm10k_tlv_attr *attr;
+ unsigned int id;
+
+ DEBUGFUNC("fm10k_mbx_validate_handlers");
+
+ /* Allow NULL mailboxes that transmit but don't receive */
+ if (!msg_data)
+ return FM10K_SUCCESS;
+
+ while (msg_data->id != FM10K_TLV_ERROR) {
+ /* all messages should have a function handler */
+ if (!msg_data->func)
+ return FM10K_ERR_PARAM;
+
+ /* parser is optional */
+ attr = msg_data->attr;
+ if (attr) {
+ while (attr->id != FM10K_TLV_ERROR) {
+ id = attr->id;
+ attr++;
+ /* ID should always be increasing */
+ if (id >= attr->id)
+ return FM10K_ERR_PARAM;
+ /* ID should fit in results array */
+ if (id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_ERR_PARAM;
+ }
+
+ /* verify terminator is in the list */
+ if (attr->id != FM10K_TLV_ERROR)
+ return FM10K_ERR_PARAM;
+ }
+
+ id = msg_data->id;
+ msg_data++;
+ /* ID should always be increasing */
+ if (id >= msg_data->id)
+ return FM10K_ERR_PARAM;
+ }
+
+ /* verify terminator is in the list */
+ if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func)
+ return FM10K_ERR_PARAM;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ *
+ * This function associates a set of message handling ops with a mailbox.
+ **/
+STATIC s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data)
+{
+ DEBUGFUNC("fm10k_mbx_register_handlers");
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes
+ *
+ * This function initializes the mailbox for use. It will split the
+ * buffer provided and use that to populate both the Tx and Rx FIFO by
+ * evenly splitting it. In order to allow for easy masking of head/tail
+ * the value reported in size must be a power of 2 and is reported in
+ * DWORDs, not bytes. Any invalid values will cause the mailbox to return
+ * error.
+ **/
+s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data, u8 id)
+{
+ DEBUGFUNC("fm10k_pfvf_mbx_init");
+
+ /* initialize registers */
+ switch (hw->mac.type) {
+ case fm10k_mac_vf:
+ mbx->mbx_reg = FM10K_VFMBX;
+ mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR);
+ break;
+ case fm10k_mac_pf:
+ /* there are only 64 VF <-> PF mailboxes */
+ if (id < 64) {
+ mbx->mbx_reg = FM10K_MBX(id);
+ mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
+ break;
+ }
+ /* fallthough */
+ default:
+ return FM10K_MBX_ERR_NO_MBX;
+ }
+
+ /* start out in closed state */
+ mbx->state = FM10K_STATE_CLOSED;
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ /* start mailbox as timed out and let the reset_hw call
+ * set the timeout value to begin communications
+ */
+ mbx->timeout = 0;
+ mbx->usec_delay = FM10K_MBX_INIT_DELAY;
+
+ /* initialize tail and head */
+ mbx->tail = 1;
+ mbx->head = 1;
+
+ /* initialize CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* Split buffer for use by Tx/Rx FIFOs */
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+ mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR;
+
+ /* initialize the FIFOs, sizes are in 4 byte increments */
+ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
+ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
+ FM10K_MBX_RX_BUFFER_SIZE);
+
+ /* initialize function pointers */
+ mbx->ops.connect = fm10k_mbx_connect;
+ mbx->ops.disconnect = fm10k_mbx_disconnect;
+ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
+ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
+ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
+ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
+ mbx->ops.process = fm10k_mbx_process;
+ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a data mailbox header
+ **/
+STATIC void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
+{
+ if (mbx->tail_len)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD);
+}
+
+/**
+ * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO
+ * @mbx: pointer to mailbox
+ * @err: error flags to report if any
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err)
+{
+ if (mbx->local)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) |
+ FM10K_MSG_HDR_FIELD_SET(err, SM_ERR);
+}
+
+/**
+ * fm10k_sm_mbx_connect_reset - Reset following request for reset
+ * @mbx: pointer to mailbox
+ *
+ * This function resets the mailbox to a just connected state
+ **/
+STATIC void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
+{
+ /* flush any uncompleted work */
+ fm10k_mbx_reset_work(mbx);
+
+ /* set local version to max and remote version to 0 */
+ mbx->local = FM10K_SM_MBX_VERSION;
+ mbx->remote = 0;
+
+ /* initialize tail and head */
+ mbx->tail = 1;
+ mbx->head = 1;
+
+ /* reset state back to connect */
+ mbx->state = FM10K_STATE_CONNECT;
+}
+
+/**
+ * fm10k_sm_mbx_connect - Start switch manager mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will initiate a mailbox connection with the switch
+ * manager. To do this it will first disconnect the mailbox, and then
+ * reconnect it in order to complete a reset of the mailbox.
+ *
+ * This function will return an error if the mailbox has not been initiated
+ * or is currently in use.
+ **/
+STATIC s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_sm_mbx_connect");
+
+ /* we cannot connect an uninitialized mailbox */
+ if (!mbx->rx.buffer)
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* we cannot connect an already connected mailbox */
+ if (mbx->state != FM10K_STATE_CLOSED)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = FM10K_MBX_INIT_TIMEOUT;
+
+ /* Place mbx in ready to connect state */
+ mbx->state = FM10K_STATE_CONNECT;
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+
+ /* reset interface back to connect */
+ fm10k_sm_mbx_connect_reset(mbx);
+
+ /* enable interrupt and notify other party of new message */
+ mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT |
+ FM10K_MBX_INTERRUPT_ENABLE;
+
+ /* generate and load connect header into mailbox */
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ fm10k_mbx_write(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_disconnect - Shutdown mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will shut down the mailbox. It places the mailbox first
+ * in the disconnect state, it then allows up to a predefined timeout for
+ * the mailbox to transition to close on its own. If this does not occur
+ * then the mailbox will be forced into the closed state.
+ *
+ * Any mailbox transactions not completed before calling this function
+ * are not guaranteed to complete and may be dropped.
+ **/
+STATIC void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0;
+
+ DEBUGFUNC("fm10k_sm_mbx_disconnect");
+
+ /* Place mbx in ready to disconnect state */
+ mbx->state = FM10K_STATE_DISCONNECT;
+
+ /* trigger interrupt to start shutdown process */
+ FM10K_WRITE_REG(hw, mbx->mbx_reg, FM10K_MBX_REQ |
+ FM10K_MBX_INTERRUPT_DISABLE);
+ do {
+ usec_delay(FM10K_MBX_POLL_DELAY);
+ mbx->ops.process(hw, mbx);
+ timeout -= FM10K_MBX_POLL_DELAY;
+ } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
+
+ /* in case we didn't close just force the mailbox into shutdown */
+ mbx->state = FM10K_STATE_CLOSED;
+ mbx->remote = 0;
+ fm10k_mbx_reset_work(mbx);
+ fm10k_fifo_drop_all(&mbx->tx);
+
+ FM10K_WRITE_REG(hw, mbx->mbmem_reg, 0);
+}
+
+/**
+ * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header
+ * @mbx: pointer to mailbox
+ *
+ * This function will parse up the fields in the mailbox header and return
+ * an error if the header contains any of a number of invalid configurations
+ * including unrecognized offsets or version numbers.
+ **/
+STATIC s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 tail, head, ver;
+
+ DEBUGFUNC("fm10k_sm_mbx_validate_fifo_hdr");
+
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL);
+ ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD);
+
+ switch (ver) {
+ case 0:
+ break;
+ case FM10K_SM_MBX_VERSION:
+ if (!head || head > FM10K_SM_MBX_FIFO_LEN)
+ return FM10K_MBX_ERR_HEAD;
+ if (!tail || tail > FM10K_SM_MBX_FIFO_LEN)
+ return FM10K_MBX_ERR_TAIL;
+ if (mbx->tail < head)
+ head += mbx->mbmem_len - 1;
+ if (tail < mbx->head)
+ tail += mbx->mbmem_len - 1;
+ if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len)
+ return FM10K_MBX_ERR_HEAD;
+ if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len)
+ break;
+ return FM10K_MBX_ERR_TAIL;
+ default:
+ return FM10K_MBX_ERR_SRC;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_process_error - Process header with error flag set
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to respond to a request where the error flag
+ * is set. As a result we will terminate a connection if one is present
+ * and fall back into the reset state with a connection header of version
+ * 0 (RESET).
+ **/
+STATIC void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ /* if there is an error just disconnect */
+ mbx->remote = 0;
+ break;
+ case FM10K_STATE_OPEN:
+ /* flush any uncompleted work */
+ fm10k_sm_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* try connnecting at lower version */
+ if (mbx->remote) {
+ while (mbx->local > 1)
+ mbx->local--;
+ mbx->remote = 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+}
+
+/**
+ * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header
+ * @mbx: pointer to mailbox
+ * @err: local error encountered
+ *
+ * This function will interpret the error provided by err, and based on
+ * that it may set the error bit in the local message header
+ **/
+STATIC void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
+{
+ /* only generate an error message for these types */
+ switch (err) {
+ case FM10K_MBX_ERR_TAIL:
+ case FM10K_MBX_ERR_HEAD:
+ case FM10K_MBX_ERR_SRC:
+ case FM10K_MBX_ERR_SIZE:
+ case FM10K_MBX_ERR_RSVD0:
+ break;
+ default:
+ return;
+ }
+
+ /* process it as though we received an error, and send error reply */
+ fm10k_sm_mbx_process_error(mbx);
+ fm10k_sm_mbx_create_connect_hdr(mbx, 1);
+}
+
+/**
+ * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @tail: tail index of message
+ *
+ * This function will dequeue one message from the Rx switch manager mailbox
+ * FIFO and place it in the Rx mailbox FIFO for processing by software.
+ **/
+STATIC s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx,
+ u16 tail)
+{
+ /* reduce length by 1 to convert to a mask */
+ u16 mbmem_len = mbx->mbmem_len - 1;
+ s32 err;
+
+ DEBUGFUNC("fm10k_sm_mbx_receive");
+
+ /* push tail in front of head */
+ if (tail < mbx->head)
+ tail += mbmem_len;
+
+ /* copy data to the Rx FIFO */
+ err = fm10k_mbx_push_tail(hw, mbx, tail);
+ if (err < 0)
+ return err;
+
+ /* process messages if we have received any */
+ fm10k_mbx_dequeue_rx(hw, mbx);
+
+ /* guarantee head aligns with the end of the last message */
+ mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed);
+ mbx->pushed = 0;
+
+ /* clear any extra bits left over since index adds 1 extra bit */
+ if (mbx->head > mbmem_len)
+ mbx->head -= mbmem_len;
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: head index of message
+ *
+ * This function will dequeue one message from the Tx mailbox FIFO and place
+ * it in the Tx switch manager mailbox FIFO for processing by hardware.
+ **/
+STATIC void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ /* reduce length by 1 to convert to a mask */
+ u16 mbmem_len = mbx->mbmem_len - 1;
+ u16 tail_len, len = 0;
+ u32 *msg;
+
+ DEBUGFUNC("fm10k_sm_mbx_transmit");
+
+ /* push head behind tail */
+ if (mbx->tail < head)
+ head += mbmem_len;
+
+ fm10k_mbx_pull_head(hw, mbx, head);
+
+ /* determine msg aligned offset for end of buffer */
+ do {
+ msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
+ tail_len = len;
+ len += FM10K_TLV_DWORD_LEN(*msg);
+ } while ((len <= mbx->tail_len) && (len < mbmem_len));
+
+ /* guarantee we stop on a message boundary */
+ if (mbx->tail_len > tail_len) {
+ mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len);
+ mbx->tail_len = tail_len;
+ }
+
+ /* clear any extra bits left over since index adds 1 extra bit */
+ if (mbx->tail > mbmem_len)
+ mbx->tail -= mbmem_len;
+}
+
+/**
+ * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number
+ *
+ * This function will generate an outgoing message based on the current
+ * mailbox state and the remote FIFO head. It will return the length
+ * of the outgoing message excluding header on success, and a negative value
+ * on error.
+ **/
+STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* flush out Tx data */
+ fm10k_sm_mbx_transmit(hw, mbx, head);
+
+ /* generate new header based on data */
+ if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) {
+ fm10k_sm_mbx_create_data_hdr(mbx);
+ } else {
+ mbx->remote = 0;
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ }
+ break;
+ case FM10K_STATE_CONNECT:
+ case FM10K_STATE_CLOSED:
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET)
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to respond to a request where the version data
+ * is set to 0. As such we will either terminate the connection or go
+ * into the connect state in order to re-establish the connection. This
+ * function can also be used to respond to an error as the connection
+ * resetting would also be a means of dealing with errors.
+ **/
+STATIC s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err = FM10K_SUCCESS;
+ const enum fm10k_mbx_state state = mbx->state;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ /* drop remote connections and disconnect */
+ mbx->state = FM10K_STATE_CLOSED;
+ mbx->remote = 0;
+ mbx->local = 0;
+ break;
+ case FM10K_STATE_OPEN:
+ /* flush any incomplete work */
+ fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
+ break;
+ case FM10K_STATE_CONNECT:
+ /* Update remote value to match local value */
+ mbx->remote = mbx->local;
+ default:
+ break;
+ }
+
+ fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_process_version_1 - Process header with version == 1
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to process messages received when the remote
+ * mailbox is active.
+ **/
+STATIC s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head, tail;
+ s32 len;
+
+ /* pull all fields needed for verification */
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD);
+
+ /* if we are in connect and wanting version 1 then start up and go */
+ if (mbx->state == FM10K_STATE_CONNECT) {
+ if (!mbx->remote)
+ goto send_reply;
+ if (mbx->remote != 1)
+ return FM10K_MBX_ERR_SRC;
+
+ mbx->state = FM10K_STATE_OPEN;
+ }
+
+ do {
+ /* abort on message size errors */
+ len = fm10k_sm_mbx_receive(hw, mbx, tail);
+ if (len < 0)
+ return len;
+
+ /* continue until we have flushed the Rx FIFO */
+ } while (len);
+
+send_reply:
+ fm10k_sm_mbx_create_reply(hw, mbx, head);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_process - Process switch manager mailbox interrupt
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will process incoming mailbox events and generate mailbox
+ * replies. It will return a value indicating the number of DWORDs
+ * transmitted excluding header on success or a negative value on error.
+ **/
+STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_sm_mbx_process");
+
+ /* we do not read mailbox if closed */
+ if (mbx->state == FM10K_STATE_CLOSED)
+ return FM10K_SUCCESS;
+
+ /* retrieve data from switch manager */
+ err = fm10k_mbx_read(hw, mbx);
+ if (err)
+ return err;
+
+ err = fm10k_sm_mbx_validate_fifo_hdr(mbx);
+ if (err < 0)
+ goto fifo_err;
+
+ if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) {
+ fm10k_sm_mbx_process_error(mbx);
+ goto fifo_err;
+ }
+
+ switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
+ case 0:
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
+ break;
+ case FM10K_SM_MBX_VERSION:
+ err = fm10k_sm_mbx_process_version_1(hw, mbx);
+ break;
+ }
+
+fifo_err:
+ if (err < 0)
+ fm10k_sm_mbx_create_error_msg(mbx, err);
+
+ /* report data to switch manager */
+ fm10k_mbx_write(hw, mbx);
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ *
+ * This function initializes the PF/SM mailbox for use. It will split the
+ * buffer provided and use that to populate both the Tx and Rx FIFO by
+ * evenly splitting it. In order to allow for easy masking of head/tail
+ * the value reported in size must be a power of 2 and is reported in
+ * DWORDs, not bytes. Any invalid values will cause the mailbox to return
+ * error.
+ **/
+s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data)
+{
+ DEBUGFUNC("fm10k_sm_mbx_init");
+ UNREFERENCED_1PARAMETER(hw);
+
+ mbx->mbx_reg = FM10K_GMBX;
+ mbx->mbmem_reg = FM10K_MBMEM_PF(0);
+
+ /* start out in closed state */
+ mbx->state = FM10K_STATE_CLOSED;
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ /* start mailbox as timed out and let the reset_hw call
+ * set the timeout value to begin communications
+ */
+ mbx->timeout = 0;
+ mbx->usec_delay = FM10K_MBX_INIT_DELAY;
+
+ /* Split buffer for use by Tx/Rx FIFOs */
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+ mbx->mbmem_len = FM10K_MBMEM_PF_XOR;
+
+ /* initialize the FIFOs, sizes are in 4 byte increments */
+ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
+ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
+ FM10K_MBX_RX_BUFFER_SIZE);
+
+ /* initialize function pointers */
+ mbx->ops.connect = fm10k_sm_mbx_connect;
+ mbx->ops.disconnect = fm10k_sm_mbx_disconnect;
+ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
+ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
+ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
+ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
+ mbx->ops.process = fm10k_sm_mbx_process;
+ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
+
+ return FM10K_SUCCESS;
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h
new file mode 100644
index 000000000..a4a8e5adb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_MBX_H_
+#define _FM10K_MBX_H_
+
+/* forward declaration */
+struct fm10k_mbx_info;
+
+#include "fm10k_type.h"
+#include "fm10k_tlv.h"
+
+/* PF Mailbox Registers */
+#define FM10K_MBMEM(_n) ((_n) + 0x18000)
+#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000)
+#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400)
+#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600)
+/* XOR provides means of switching from Tx to Rx FIFO */
+#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0))
+#define FM10K_MBX(_n) ((_n) + 0x18800)
+#define FM10K_MBX_REQ 0x00000002
+#define FM10K_MBX_ACK 0x00000004
+#define FM10K_MBX_REQ_INTERRUPT 0x00000008
+#define FM10K_MBX_ACK_INTERRUPT 0x00000010
+#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
+#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
+#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
+#define FM10K_MBICR(_n) ((_n) + 0x18840)
+#define FM10K_GMBX 0x18842
+
+/* VF Mailbox Registers */
+#define FM10K_VFMBX 0x00010
+#define FM10K_VFMBMEM(_n) ((_n) + 0x00020)
+#define FM10K_VFMBMEM_LEN 16
+#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2)
+
+/* Delays/timeouts */
+#define FM10K_MBX_DISCONNECT_TIMEOUT 500
+#define FM10K_MBX_POLL_DELAY 19
+#define FM10K_MBX_INT_DELAY 20
+
+#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value)
+
+/* PF/VF Mailbox state machine
+ *
+ * +----------+ connect() +----------+
+ * | CLOSED | --------------> | CONNECT |
+ * +----------+ +----------+
+ * ^ ^ |
+ * | rcv: rcv: | | rcv:
+ * | Connect Disconnect | | Connect
+ * | Disconnect Error | | Data
+ * | | |
+ * | | V
+ * +----------+ disconnect() +----------+
+ * |DISCONNECT| <-------------- | OPEN |
+ * +----------+ +----------+
+ *
+ * The diagram above describes the PF/VF mailbox state machine. There
+ * are four main states to this machine.
+ * Closed: This state represents a mailbox that is in a standby state
+ * with interrupts disabled. In this state the mailbox should not
+ * read the mailbox or write any data. The only means of exiting
+ * this state is for the system to make the connect() call for the
+ * mailbox, it will then transition to the connect state.
+ * Connect: In this state the mailbox is seeking a connection. It will
+ * post a connect message with no specified destination and will
+ * wait for a reply from the other side of the mailbox. This state
+ * is exited when either a connect with the local mailbox as the
+ * destination is received or when a data message is received with
+ * a valid sequence number.
+ * Open: In this state the mailbox is able to transfer data between the local
+ * entity and the remote. It will fall back to connect in the event of
+ * receiving either an error message, or a disconnect message. It will
+ * transition to disconnect on a call to disconnect();
+ * Disconnect: In this state the mailbox is attempting to gracefully terminate
+ * the connection. It will do so at the first point where it knows
+ * that the remote endpoint is either done sending, or when the
+ * remote endpoint has fallen back into connect.
+ */
+enum fm10k_mbx_state {
+ FM10K_STATE_CLOSED,
+ FM10K_STATE_CONNECT,
+ FM10K_STATE_OPEN,
+ FM10K_STATE_DISCONNECT,
+};
+
+/* PF/VF Mailbox header format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The layout above describes the format for the header used in the PF/VF
+ * mailbox. The header is broken out into the following fields:
+ * Type: There are 4 supported message types
+ * 0x8: Data header - used to transport message data
+ * 0xC: Connect header - used to establish connection
+ * 0xD: Disconnect header - used to tear down a connection
+ * 0xE: Error header - used to address message exceptions
+ * Tail: Tail index for local FIFO
+ * Tail index actually consists of two parts. The MSB of
+ * the head is a loop tracker, it is 0 on an even numbered
+ * loop through the FIFO, and 1 on the odd numbered loops.
+ * To get the actual mailbox offset based on the tail it
+ * is necessary to add bit 3 to bit 0 and clear bit 3. This
+ * gives us a valid range of 0x1 - 0xE.
+ * Head: Head index for remote FIFO
+ * Head index follows the same format as the tail index.
+ * Rsvd0: Reserved 0 portion of the mailbox header
+ * CRC: Running CRC for all data since connect plus current message header
+ * Size: Maximum message size - Applies only to connect headers
+ * The maximum message size is provided during connect to avoid
+ * jamming the mailbox with messages that do not fit.
+ * Err_no: Error number - Applies only to error headers
+ * The error number provides an indication of the type of error
+ * experienced.
+ */
+
+/* macros for retrieving and setting header values */
+#define FM10K_MSG_HDR_MASK(name) \
+ ((0x1u << FM10K_MSG_##name##_SIZE) - 1)
+#define FM10K_MSG_HDR_FIELD_SET(value, name) \
+ (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT)
+#define FM10K_MSG_HDR_FIELD_GET(value, name) \
+ ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name))
+
+/* offsets shared between all headers */
+#define FM10K_MSG_TYPE_SHIFT 0
+#define FM10K_MSG_TYPE_SIZE 4
+#define FM10K_MSG_TAIL_SHIFT 4
+#define FM10K_MSG_TAIL_SIZE 4
+#define FM10K_MSG_HEAD_SHIFT 8
+#define FM10K_MSG_HEAD_SIZE 4
+#define FM10K_MSG_RSVD0_SHIFT 12
+#define FM10K_MSG_RSVD0_SIZE 4
+
+/* offsets for data/disconnect headers */
+#define FM10K_MSG_CRC_SHIFT 16
+#define FM10K_MSG_CRC_SIZE 16
+
+/* offsets for connect headers */
+#define FM10K_MSG_CONNECT_SIZE_SHIFT 16
+#define FM10K_MSG_CONNECT_SIZE_SIZE 16
+
+/* offsets for error headers */
+#define FM10K_MSG_ERR_NO_SHIFT 16
+#define FM10K_MSG_ERR_NO_SIZE 16
+
+enum fm10k_msg_type {
+ FM10K_MSG_DATA = 0x8,
+ FM10K_MSG_CONNECT = 0xC,
+ FM10K_MSG_DISCONNECT = 0xD,
+ FM10K_MSG_ERROR = 0xE,
+};
+
+/* HNI/SM Mailbox FIFO format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-------+-----------------------+-------+-----------------------+
+ * | Error | Remote Head |Version| Local Tail |
+ * +-------+-----------------------+-------+-----------------------+
+ * | |
+ * . Local FIFO Data .
+ * . .
+ * +-------+-----------------------+-------+-----------------------+
+ *
+ * The layout above describes the format for the FIFOs used by the host
+ * network interface and the switch manager to communicate messages back
+ * and forth. Both the HNI and the switch maintain one such FIFO. The
+ * layout in memory has the switch manager FIFO followed immediately by
+ * the HNI FIFO. For this reason I am using just the pointer to the
+ * HNI FIFO in the mailbox ops as the offset between the two is fixed.
+ *
+ * The header for the FIFO is broken out into the following fields:
+ * Local Tail: Offset into FIFO region for next DWORD to write.
+ * Version: Version info for mailbox, only values of 0/1 are supported.
+ * Remote Head: Offset into remote FIFO to indicate how much we have read.
+ * Error: Error indication, values TBD.
+ */
+
+/* version number for switch manager mailboxes */
+#define FM10K_SM_MBX_VERSION 1
+#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1)
+
+/* offsets shared between all SM FIFO headers */
+#define FM10K_MSG_SM_TAIL_SHIFT 0
+#define FM10K_MSG_SM_TAIL_SIZE 12
+#define FM10K_MSG_SM_VER_SHIFT 12
+#define FM10K_MSG_SM_VER_SIZE 4
+#define FM10K_MSG_SM_HEAD_SHIFT 16
+#define FM10K_MSG_SM_HEAD_SIZE 12
+#define FM10K_MSG_SM_ERR_SHIFT 28
+#define FM10K_MSG_SM_ERR_SIZE 4
+
+/* All error messages returned by mailbox functions
+ * The value -511 is 0xFE01 in hex. The idea is to order the errors
+ * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox
+ * messages. This also helps to avoid error number collisions as Linux
+ * doesn't appear to use error numbers 256 - 511.
+ */
+#define FM10K_MBX_ERR(_n) ((_n) - 512)
+#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01)
+#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03)
+#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05)
+#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06)
+#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08)
+#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09)
+#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B)
+#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C)
+#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E)
+#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F)
+
+#define FM10K_MBX_CRC_SEED 0xFFFF
+
+struct fm10k_mbx_ops {
+ s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ bool (*rx_ready)(struct fm10k_mbx_info *);
+ bool (*tx_ready)(struct fm10k_mbx_info *, u16);
+ bool (*tx_complete)(struct fm10k_mbx_info *);
+ s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const u32 *);
+ s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ s32 (*register_handlers)(struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+};
+
+struct fm10k_mbx_fifo {
+ u32 *buffer;
+ u16 head;
+ u16 tail;
+ u16 size;
+};
+
+/* size of buffer to be stored in mailbox for FIFOs */
+#define FM10K_MBX_TX_BUFFER_SIZE 512
+#define FM10K_MBX_RX_BUFFER_SIZE 128
+#define FM10K_MBX_BUFFER_SIZE \
+ (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE)
+
+/* minimum and maximum message size in dwords */
+#define FM10K_MBX_MSG_MAX_SIZE \
+ ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1))
+#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1)
+
+#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+struct fm10k_mbx_info {
+ /* function pointers for mailbox operations */
+ struct fm10k_mbx_ops ops;
+ const struct fm10k_msg_data *msg_data;
+
+ /* message FIFOs */
+ struct fm10k_mbx_fifo rx;
+ struct fm10k_mbx_fifo tx;
+
+ /* delay for handling timeouts */
+ u32 timeout;
+ u32 usec_delay;
+
+ /* mailbox state info */
+ u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr;
+ u16 max_size, mbmem_len;
+ u16 tail, tail_len, pulled;
+ u16 head, head_len, pushed;
+ u16 local, remote;
+ enum fm10k_mbx_state state;
+
+ /* result of last mailbox test */
+ s32 test_result;
+
+ /* statistics */
+ u64 tx_busy;
+ u64 tx_dropped;
+ u64 tx_messages;
+ u64 tx_dwords;
+ u64 tx_mbmem_pulled;
+ u64 rx_messages;
+ u64 rx_dwords;
+ u64 rx_mbmem_pushed;
+ u64 rx_parse_err;
+
+ /* Buffer to store messages */
+ u32 buffer[FM10K_MBX_BUFFER_SIZE];
+};
+
+s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *, u8);
+s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+
+#endif /* _FM10K_MBX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h
new file mode 100644
index 000000000..019fba5e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_OSDEP_H_
+#define _FM10K_OSDEP_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#include "../fm10k_logs.h"
+
+/* TODO: this does not look like it should be used... */
+#define ERROR_REPORT2(v1, v2, v3) do { } while (0)
+
+#ifndef BOULDER_RAPIDS_HW
+#define BOULDER_RAPIDS_HW
+#endif
+
+#define STATIC static
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define FALSE 0
+#define TRUE 1
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+
+#ifndef __le16
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+#endif
+
+/* offsets are WORD offsets, not BYTE offsets */
+#define FM10K_WRITE_REG(hw, reg, val) \
+ rte_write32((val), ((hw)->hw_addr + (reg)))
+
+#define FM10K_READ_REG(hw, reg) rte_read32(((hw)->hw_addr + (reg)))
+
+#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL)
+
+#define FM10K_PCI_REG(reg) rte_read32(reg)
+
+#define FM10K_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+/* not implemented */
+#define FM10K_READ_PCI_WORD(hw, reg) 0
+
+#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value)
+#define FM10K_READ_MBX(hw, reg) FM10K_READ_REG(hw, reg)
+
+#define FM10K_LE16_TO_CPU rte_le_to_cpu_16
+#define FM10K_LE32_TO_CPU rte_le_to_cpu_32
+#define FM10K_CPU_TO_LE32 rte_cpu_to_le_32
+#define FM10K_CPU_TO_LE16 rte_cpu_to_le_16
+#define le16_to_cpu rte_le_to_cpu_16
+
+#define FM10K_RMB rte_rmb
+#define FM10K_WMB rte_wmb
+
+#define usec_delay rte_delay_us
+
+#define FM10K_REMOVED(hw_addr) (!(hw_addr))
+
+#ifndef FM10K_IS_ZERO_ETHER_ADDR
+/* make certain address is not 0 */
+#define FM10K_IS_ZERO_ETHER_ADDR(addr) \
+(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
+#endif
+
+#ifndef FM10K_IS_MULTICAST_ETHER_ADDR
+#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#endif
+
+#ifndef FM10K_IS_VALID_ETHER_ADDR
+/* make certain address is not multicast or 0 */
+#define FM10K_IS_VALID_ETHER_ADDR(addr) \
+(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr))
+#endif
+
+#ifndef do_div
+#define do_div(n, base) ({\
+ (n) = (n) / (base);\
+})
+#endif /* do_div */
+
+/* DPDK can't access IOMEM directly */
+#ifndef FM10K_WRITE_SW_REG
+#define FM10K_WRITE_SW_REG(v1, v2, v3) do { } while (0)
+#endif
+
+#ifndef fm10k_read_reg
+#define fm10k_read_reg FM10K_READ_REG
+#endif
+
+#define FM10K_INTEL_VENDOR_ID 0x8086
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
+#define FM10K_EICR_SRAMERROR 0x00000400
+#define FM10K_SRAM_IP 0x13003
+#define FM10K_RXINT_TIMER_SHIFT 8
+#define FM10K_TXINT_TIMER_SHIFT 8
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
+#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */
+
+#define FM10K_TSO_MINMSS \
+ (FM10K_DMA_CTRL_MINMSS_64 >> FM10K_DMA_CTRL_MINMSS_SHIFT)
+#define FM10K_TSO_MIN_HEADERLEN 54
+#define FM10K_TSO_MAX_HEADERLEN 192
+
+#endif /* _FM10K_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c
new file mode 100644
index 000000000..439dd224d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c
@@ -0,0 +1,2099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#include "fm10k_pf.h"
+#include "fm10k_vf.h"
+
+/**
+ * fm10k_reset_hw_pf - PF hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after being powered on.
+ **/
+STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
+{
+ s32 err;
+ u32 reg;
+ u16 i;
+
+ DEBUGFUNC("fm10k_reset_hw_pf");
+
+ /* Disable interrupts */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
+
+ /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
+
+ /* We assume here Tx and Rx queue 0 are owned by the PF */
+
+ /* Shut off VF access to their queues forcing them to queue 0 */
+ for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
+ }
+
+ /* shut down all rings */
+ err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
+ if (err == FM10K_ERR_REQUESTS_PENDING) {
+ hw->mac.reset_while_pending++;
+ goto force_reset;
+ } else if (err) {
+ return err;
+ }
+
+ /* Verify that DMA is no longer active */
+ reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
+ if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
+ return FM10K_ERR_DMA_PENDING;
+
+force_reset:
+ /* Inititate data path reset */
+ reg = FM10K_DMA_CTRL_DATAPATH_RESET;
+ FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
+
+ /* Flush write and allow 100us for reset to complete */
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(FM10K_RESET_TIMEOUT);
+
+ /* Verify we made it out of reset */
+ reg = FM10K_READ_REG(hw, FM10K_IP);
+ if (!(reg & FM10K_IP_NOTINRESET))
+ return FM10K_ERR_RESET_FAILED;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
+ **/
+STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
+{
+ u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL);
+
+ DEBUGFUNC("fm10k_is_ari_hierarchy_pf");
+
+ return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
+}
+
+/**
+ * fm10k_init_hw_pf - PF hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
+{
+ u32 dma_ctrl, txqctl;
+ u16 i;
+
+ DEBUGFUNC("fm10k_init_hw_pf");
+
+ /* Establish default VSI as valid */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
+ FM10K_DGLORTMAP_ANY);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 1; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
+
+ /* reset ITR2(0) to point to itself */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
+
+ /* reset VF ITR2(0) to point to 0 avoid PF registers */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
+
+ /* loop through all PF ITR2 registers pointing them to the previous */
+ for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
+
+ /* Enable interrupt moderator if not already enabled */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
+
+ /* compute the default txqctl configuration */
+ txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
+ (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
+
+ for (i = 0; i < FM10K_MAX_QUEUES; i++) {
+ /* configure rings for 256 Queue / 32 Descriptor cache mode */
+ FM10K_WRITE_REG(hw, FM10K_TQDLOC(i),
+ (i * FM10K_TQDLOC_BASE_32_DESC) |
+ FM10K_TQDLOC_SIZE_32_DESC);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
+
+ /* configure rings to provide TPH processing hints */
+ FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i),
+ FM10K_TPH_TXCTRL_DESC_TPHEN |
+ FM10K_TPH_TXCTRL_DESC_RROEN |
+ FM10K_TPH_TXCTRL_DESC_WROEN |
+ FM10K_TPH_TXCTRL_DATA_RROEN);
+ FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i),
+ FM10K_TPH_RXCTRL_DESC_TPHEN |
+ FM10K_TPH_RXCTRL_DESC_RROEN |
+ FM10K_TPH_RXCTRL_DATA_WROEN |
+ FM10K_TPH_RXCTRL_HDR_WROEN);
+ }
+
+ /* set max hold interval to align with 1.024 usec in all modes and
+ * store ITR scale
+ */
+ switch (hw->bus.speed) {
+ case fm10k_bus_speed_2500:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
+ break;
+ case fm10k_bus_speed_5000:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
+ break;
+ case fm10k_bus_speed_8000:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+ break;
+ default:
+ dma_ctrl = 0;
+ /* just in case, assume Gen3 ITR scale */
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+ break;
+ }
+
+ /* Configure TSO flags */
+ FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
+ FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
+
+ /* Enable DMA engine
+ * Set Rx Descriptor size to 32
+ * Set Minimum MSS to 64
+ * Set Maximum number of Rx queues to 256 / 32 Descriptor
+ */
+ dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
+ FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
+ FM10K_DMA_CTRL_32_DESC;
+
+ FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl);
+
+ /* record maximum queue count, we limit ourselves to 128 */
+ hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
+
+ /* We support either 64 VFs or 7 VFs depending on if we have ARI */
+ hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
+
+ return FM10K_SUCCESS;
+}
+
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+/**
+ * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device.
+ **/
+STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_is_slot_appropriate_pf");
+
+ return (hw->bus.speed == hw->bus_caps.speed) &&
+ (hw->bus.width == hw->bus_caps.width);
+}
+
+#endif
+/**
+ * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @vsi: Index indicating VF ID or PF ID in table
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for the corresponding function. In addition to the
+ * standard set/clear that supports one bit a multi-bit write is
+ * supported to set 64 bits at a time.
+ **/
+STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
+{
+ u32 vlan_table, reg, mask, bit, len;
+
+ /* verify the VSI index is valid */
+ if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* VLAN multi-bit write:
+ * The multi-bit write has several parts to it.
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RSVD0 | Length |C|RSVD0| VLAN ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * VLAN ID: Vlan Starting value
+ * RSVD0: Reserved section, must be 0
+ * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
+ * Length: Number of times to repeat the bit being set
+ */
+ len = vid >> 16;
+ vid = (vid << 17) >> 17;
+
+ /* verify the reserved 0 fields are 0 */
+ if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* Loop through the table updating all required VLANs */
+ for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
+ len < FM10K_VLAN_TABLE_VID_MAX;
+ len -= 32 - bit, reg++, bit = 0) {
+ /* record the initial state of the register */
+ vlan_table = FM10K_READ_REG(hw, reg);
+
+ /* truncate mask if we are at the start or end of the run */
+ mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
+
+ /* make necessary modifications to the register */
+ mask &= set ? ~vlan_table : vlan_table;
+ if (mask)
+ FM10K_WRITE_REG(hw, reg, vlan_table ^ mask);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_mac_addr_pf - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the SM_AREA and stores the value.
+ **/
+STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
+{
+ u8 perm_addr[ETH_ALEN];
+ u32 serial_num;
+
+ DEBUGFUNC("fm10k_read_mac_addr_pf");
+
+ serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1));
+
+ /* last byte should be all 1's */
+ if ((~serial_num) << 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[0] = (u8)(serial_num >> 24);
+ perm_addr[1] = (u8)(serial_num >> 16);
+ perm_addr[2] = (u8)(serial_num >> 8);
+
+ serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0));
+
+ /* first byte should be all 1's */
+ if ((~serial_num) >> 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[3] = (u8)(serial_num >> 16);
+ perm_addr[4] = (u8)(serial_num >> 8);
+ perm_addr[5] = (u8)(serial_num);
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_glort_valid_pf - Validate that the provided glort is valid
+ * @hw: pointer to the HW structure
+ * @glort: base glort to be validated
+ *
+ * This function will return an error if the provided glort is invalid
+ **/
+bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
+{
+ glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
+
+ return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
+}
+
+/**
+ * fm10k_update_xc_addr_pf - Update device addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure
+ *
+ * This function generates a message to the Switch API requesting
+ * that the given logical port add/remove the given L2 MAC/VLAN address.
+ **/
+STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_mac_update mac_update;
+ u32 msg[5];
+
+ DEBUGFUNC("fm10k_update_xc_addr_pf");
+
+ /* clear set bit from VLAN ID */
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ /* if glort or VLAN are not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* record fields */
+ mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) |
+ ((u32)mac[3] << 16) |
+ ((u32)mac[4] << 8) |
+ ((u32)mac[5]));
+ mac_update.mac_upper = FM10K_CPU_TO_LE16(((u16)mac[0] << 8) |
+ ((u16)mac[1]));
+ mac_update.vlan = FM10K_CPU_TO_LE16(vid);
+ mac_update.glort = FM10K_CPU_TO_LE16(glort);
+ mac_update.action = add ? 0 : 1;
+ mac_update.flags = flags;
+
+ /* populate mac_update fields */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
+ fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
+ &mac_update, sizeof(mac_update));
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_uc_addr_pf - Update device unicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure
+ *
+ * This function is used to add or remove unicast addresses for
+ * the PF.
+ **/
+STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ DEBUGFUNC("fm10k_update_uc_addr_pf");
+
+ /* verify MAC address is valid */
+ if (!IS_VALID_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
+}
+
+/**
+ * fm10k_update_mc_addr_pf - Update device multicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses for
+ * the PF.
+ **/
+STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add)
+{
+ DEBUGFUNC("fm10k_update_mc_addr_pf");
+
+ /* verify multicast address is valid */
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
+}
+
+/**
+ * fm10k_update_xcast_mode_pf - Request update of multicast mode
+ * @hw: pointer to hardware structure
+ * @glort: base resource tag for this request
+ * @mode: integer value indicating mode being requested
+ *
+ * This function will attempt to request a higher mode for the port
+ * so that it can enable either multicast, multicast promiscuous, or
+ * promiscuous mode of operation.
+ **/
+STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3], xcast_mode;
+
+ DEBUGFUNC("fm10k_update_xcast_mode_pf");
+
+ if (mode > FM10K_XCAST_MODE_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* write xcast mode as a single u32 value,
+ * lower 16 bits: glort
+ * upper 16 bits: mode
+ */
+ xcast_mode = ((u32)mode << 16) | glort;
+
+ /* generate message requesting to change xcast mode */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
+ fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
+ * @hw: pointer to hardware structure
+ *
+ * This function walks through the MSI-X vector table to determine the
+ * number of active interrupts and based on that information updates the
+ * interrupt moderator linked list.
+ **/
+STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
+{
+ u32 i;
+
+ /* Disable interrupt moderator */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
+
+ /* loop through PF from last to first looking enabled vectors */
+ for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
+ if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
+ break;
+ }
+
+ /* always reset VFITR2[0] to point to last enabled PF vector */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
+
+ /* reset ITR2[0] to point to last enabled PF vector */
+ if (!hw->iov.num_vfs)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
+
+ /* Enable interrupt moderator */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
+}
+
+/**
+ * fm10k_update_lport_state_pf - Notify the switch of a change in port state
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @count: number of logical ports being updated
+ * @enable: boolean value indicating enable or disable
+ *
+ * This function is used to add/remove a logical port from the switch.
+ **/
+STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
+ u16 count, bool enable)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3], lport_msg;
+
+ DEBUGFUNC("fm10k_lport_state_pf");
+
+ /* do nothing if we are being asked to create or destroy 0 ports */
+ if (!count)
+ return FM10K_SUCCESS;
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
+ /* construct the lport message from the 2 pieces of data we have */
+ lport_msg = ((u32)count << 16) | glort;
+
+ /* generate lport create/delete message */
+ fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
+ FM10K_PF_MSG_ID_LPORT_DELETE);
+ fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ u16 glort, queue_count, vsi_count, pc_count;
+ u16 vsi, queue, pc, q_idx;
+ u32 txqctl, dglortdec, dglortmap;
+
+ /* verify the dglort pointer */
+ if (!dglort)
+ return FM10K_ERR_PARAM;
+
+ /* verify the dglort values */
+ if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
+ (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
+ (dglort->queue_l > 8) || (dglort->queue_b >= 256))
+ return FM10K_ERR_PARAM;
+
+ /* determine count of VSIs and queues */
+ queue_count = BIT(dglort->rss_l + dglort->pc_l);
+ vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
+ glort = dglort->glort;
+ q_idx = dglort->queue_b;
+
+ /* configure SGLORT for queues */
+ for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
+ for (queue = 0; queue < queue_count; queue++, q_idx++) {
+ if (q_idx >= FM10K_MAX_QUEUES)
+ break;
+
+ FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort);
+ FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort);
+ }
+ }
+
+ /* determine count of PCs and queues */
+ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = BIT(dglort->pc_l);
+
+ /* configure PC for Tx queues */
+ for (pc = 0; pc < pc_count; pc++) {
+ q_idx = pc + dglort->queue_b;
+ for (queue = 0; queue < queue_count; queue++) {
+ if (q_idx >= FM10K_MAX_QUEUES)
+ break;
+
+ txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx));
+ txqctl &= ~FM10K_TXQCTL_PC_MASK;
+ txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl);
+
+ q_idx += pc_count;
+ }
+ }
+
+ /* configure DGLORTDEC */
+ dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
+ ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
+ ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
+ ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
+ ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
+ ((u32)(dglort->queue_l));
+ if (dglort->inner_rss)
+ dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
+
+ /* configure DGLORTMAP */
+ dglortmap = (dglort->idx == fm10k_dglort_default) ?
+ FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
+ dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
+ dglortmap |= dglort->glort;
+
+ /* write values to hardware */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
+
+ return FM10K_SUCCESS;
+}
+
+u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
+{
+ u16 num_pools = hw->iov.num_pools;
+
+ return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
+ 8 : FM10K_MAX_QUEUES_POOL;
+}
+
+u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 num_vfs = hw->iov.num_vfs;
+ u16 vf_q_idx = FM10K_MAX_QUEUES;
+
+ vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
+
+ return vf_q_idx;
+}
+
+STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
+{
+ u16 num_pools = hw->iov.num_pools;
+
+ return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
+ FM10K_MAX_VECTORS_POOL;
+}
+
+STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
+
+ vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
+
+ return vf_v_idx;
+}
+
+/**
+ * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
+ * @hw: pointer to the HW structure
+ * @num_vfs: number of VFs to be allocated
+ * @num_pools: number of virtualization pools to be allocated
+ *
+ * Allocates queues and traffic classes to virtualization entities to prepare
+ * the PF for SR-IOV and VMDq
+ **/
+STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
+ u16 num_pools)
+{
+ u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
+ u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
+ int i, j;
+
+ /* hardware only supports up to 64 pools */
+ if (num_pools > 64)
+ return FM10K_ERR_PARAM;
+
+ /* the number of VFs cannot exceed the number of pools */
+ if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
+ return FM10K_ERR_PARAM;
+
+ /* record number of virtualization entities */
+ hw->iov.num_vfs = num_vfs;
+ hw->iov.num_pools = num_pools;
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (num_vfs > 8) ? 32 : 256;
+ qpp = fm10k_queues_per_pool(hw);
+ vpp = fm10k_vectors_per_pool(hw);
+
+ /* calculate starting index for queues */
+ vf_q_idx = fm10k_vf_queue_index(hw, 0);
+ qmap_idx = 0;
+
+ /* establish TCs with -1 credits and no quanta to prevent transmit */
+ for (i = 0; i < num_vfs; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i),
+ FM10K_TC_CREDIT_CREDIT_MASK);
+ }
+
+ /* zero out all mbmem registers */
+ for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
+ FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0);
+
+ /* clear event notification of VF FLR */
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0);
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0);
+
+ /* loop through unallocated rings assigning them back to PF */
+ for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
+ FM10K_TXQCTL_UNLIMITED_BW | vid);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
+ }
+
+ /* PF should have already updated VFITR2[0] */
+
+ /* update all ITR registers to flow to VFITR2[0] */
+ for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
+ if (!(i & (vpp - 1)))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
+ }
+
+ /* update PF ITR2[0] to reference the last vector */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0),
+ fm10k_vf_vector_index(hw, num_vfs - 1));
+
+ /* loop through rings populating rings and TCs */
+ for (i = 0; i < num_vfs; i++) {
+ /* record index for VF queue 0 for use in end of loop */
+ vf_q_idx0 = vf_q_idx;
+
+ for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
+ /* assign VF and locked TC to queues */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx),
+ (i << FM10K_TXQCTL_TC_SHIFT) | i |
+ FM10K_TXQCTL_VF | vid);
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx),
+ FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
+ FM10K_RXDCTL_DROP_ON_EMPTY);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx),
+ (i << FM10K_RXQCTL_VF_SHIFT) |
+ FM10K_RXQCTL_VF);
+
+ /* map queue pair to VF */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
+ }
+
+ /* repeat the first ring for all of the remaining VF rings */
+ for (; j < qmap_stride; j++, qmap_idx++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
+ }
+ }
+
+ /* loop through remaining indexes assigning all to queue 0 */
+ while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0);
+ qmap_idx++;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
+ * @hw: pointer to the HW structure
+ * @vf_idx: index of VF receiving GLORT
+ * @rate: Rate indicated in Mb/s
+ *
+ * Configured the TC for a given VF to allow only up to a given number
+ * of Mb/s of outgoing Tx throughput.
+ **/
+STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
+{
+ /* configure defaults */
+ u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
+ u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* set interval to align with 4.096 usec in all modes */
+ switch (hw->bus.speed) {
+ case fm10k_bus_speed_2500:
+ interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
+ break;
+ case fm10k_bus_speed_5000:
+ interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
+ break;
+ default:
+ break;
+ }
+
+ if (rate) {
+ if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
+ return FM10K_ERR_PARAM;
+
+ /* The quanta is measured in Bytes per 4.096 or 8.192 usec
+ * The rate is provided in Mbits per second
+ * To tralslate from rate to quanta we need to multiply the
+ * rate by 8.192 usec and divide by 8 bits/byte. To avoid
+ * dealing with floating point we can round the values up
+ * to the nearest whole number ratio which gives us 128 / 125.
+ */
+ tc_rate = (rate * 128) / 125;
+
+ /* try to keep the rate limiting accurate by increasing
+ * the number of credits and interval for rates less than 4Gb/s
+ */
+ if (rate < 4000)
+ interval <<= 1;
+ else
+ tc_rate >>= 1;
+ }
+
+ /* update rate limiter with new values */
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
+ * @hw: pointer to the HW structure
+ * @vf_idx: index of VF receiving GLORT
+ *
+ * Update the interrupt moderator linked list to include any MSI-X
+ * interrupts which the VF has enabled in the MSI-X vector table.
+ **/
+STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 vf_v_idx, vf_v_limit, i;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* determine vector offset and count */
+ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
+ vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
+
+ /* search for first vector that is not masked */
+ for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
+ if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
+ break;
+ }
+
+ /* reset linked list so it now includes our active vectors */
+ if (vf_idx == (hw->iov.num_vfs - 1))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
+ * @hw: pointer to the HW structure
+ * @vf_info: pointer to VF information structure
+ *
+ * Assign a MAC address and default VLAN to a VF and notify it of the update
+ **/
+STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
+ u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
+ s32 err = FM10K_SUCCESS;
+ u16 vf_idx, vf_vid;
+
+ /* verify vf is in range */
+ if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
+ queues_per_pool = fm10k_queues_per_pool(hw);
+
+ /* calculate starting index for queues */
+ vf_idx = vf_info->vf_idx;
+ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
+ qmap_idx = qmap_stride * vf_idx;
+
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
+ if (vf_info->pf_vid)
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
+ else
+ vf_vid = vf_info->sw_vid;
+
+ /* generate MAC_ADDR request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
+ vf_info->mac, vf_vid);
+
+ /* Configure Queue control register with new VLAN ID. The TXQCTL
+ * register is RO from the VF, so the PF must do this even in the
+ * case of notifying the VF of a new VID via the mailbox.
+ */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ for (i = 0; i < queues_per_pool; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* try loading a message onto outgoing mailbox first */
+ if (vf_info->mbx.ops.enqueue_tx) {
+ err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ if (err != FM10K_MBX_ERR_NO_MBX)
+ return err;
+ err = FM10K_SUCCESS;
+ }
+
+ /* If we aren't connected to a mailbox, this is most likely because
+ * the VF driver is not running. It should thus be safe to re-map
+ * queues and use the registers to pass the MAC address so that the VF
+ * driver gets correct information during its initialization.
+ */
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
+
+ /* verify ring has disabled before modifying base address registers */
+ txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
+ for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
+ /* limit ourselves to a 1ms timeout */
+ if (timeout == 10) {
+ err = FM10K_ERR_DMA_PENDING;
+ goto err_out;
+ }
+
+ usec_delay(100);
+ txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
+ }
+
+ /* Update base address registers to contain MAC address */
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ tdbal = (((u32)vf_info->mac[3]) << 24) |
+ (((u32)vf_info->mac[4]) << 16) |
+ (((u32)vf_info->mac[5]) << 8);
+
+ tdbah = (((u32)0xFF) << 24) |
+ (((u32)vf_info->mac[0]) << 16) |
+ (((u32)vf_info->mac[1]) << 8) |
+ ((u32)vf_info->mac[2]);
+ }
+
+ /* Record the base address into queue 0 */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah);
+
+ /* Provide the VF the ITR scale, using software-defined fields in TDLEN
+ * to pass the information during VF initialization. See definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
+ FM10K_TDLEN_ITR_SCALE_SHIFT);
+
+err_out:
+ /* restore the queue back to VF ownership */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
+ return err;
+}
+
+/**
+ * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
+ * @hw: pointer to the HW structure
+ * @vf_info: pointer to VF information structure
+ *
+ * Reassign the interrupts and queues to a VF following an FLR
+ **/
+STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
+ u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
+ u16 vf_v_idx, vf_v_limit, vf_vid;
+ u8 vf_idx = vf_info->vf_idx;
+ int i;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* clear event notification of VF FLR */
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
+
+ /* force timeout and then disconnect the mailbox */
+ vf_info->mbx.timeout = 0;
+ if (vf_info->mbx.ops.disconnect)
+ vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
+
+ /* determine vector offset and count */
+ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
+ vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
+ queues_per_pool = fm10k_queues_per_pool(hw);
+ qmap_idx = qmap_stride * vf_idx;
+
+ /* make all the queues inaccessible to the VF */
+ for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
+ }
+
+ /* calculate starting index for queues */
+ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
+
+ /* determine correct default VLAN ID */
+ if (vf_info->pf_vid)
+ vf_vid = vf_info->pf_vid;
+ else
+ vf_vid = vf_info->sw_vid;
+
+ /* configure Queue control register */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
+ (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+ rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
+
+ /* stop further DMA and reset queue ownership back to VF */
+ for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i),
+ FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
+ FM10K_RXDCTL_DROP_ON_EMPTY);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl);
+ }
+
+ /* reset TC with -1 credits and no quanta to prevent transmit */
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx),
+ FM10K_TC_CREDIT_CREDIT_MASK);
+
+ /* update our first entry in the table based on previous VF */
+ if (!vf_idx)
+ hw->mac.ops.update_int_moderator(hw);
+ else
+ hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
+
+ /* reset linked list so it now includes our active vectors */
+ if (vf_idx == (hw->iov.num_vfs - 1))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
+
+ /* link remaining vectors so that next points to previous */
+ for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
+
+ /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
+ for (i = FM10K_VFMBMEM_LEN; i--;)
+ FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
+ for (i = FM10K_VLAN_TABLE_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
+ for (i = FM10K_RETA_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0);
+ for (i = FM10K_RSSRK_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
+ FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
+
+ /* Update base address registers to contain MAC address */
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ tdbal = (((u32)vf_info->mac[3]) << 24) |
+ (((u32)vf_info->mac[4]) << 16) |
+ (((u32)vf_info->mac[5]) << 8);
+ tdbah = (((u32)0xFF) << 24) |
+ (((u32)vf_info->mac[0]) << 16) |
+ (((u32)vf_info->mac[1]) << 8) |
+ ((u32)vf_info->mac[2]);
+ }
+
+ /* map queue pairs back to VF from last to first */
+ for (i = queues_per_pool; i--;) {
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
+ /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
+ * explanation of how TDLEN is used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i),
+ hw->mac.itr_scale <<
+ FM10K_TDLEN_ITR_SCALE_SHIFT);
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
+ }
+
+ /* repeat the first ring for all the remaining VF rings */
+ for (i = queues_per_pool; i < qmap_stride; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to VF information structure
+ * @lport_idx: Logical port offset from the hardware glort
+ * @flags: Set of capability flags to extend port beyond basic functionality
+ *
+ * This function allows enabling a VF port by assigning it a GLORT and
+ * setting the flags so that it can enable an Rx mode.
+ **/
+STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info,
+ u16 lport_idx, u8 flags)
+{
+ u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
+
+ DEBUGFUNC("fm10k_iov_set_lport_state_pf");
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
+ vf_info->glort = glort;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to VF information structure
+ *
+ * This function disables a VF port by stripping it of a GLORT and
+ * setting the flags so that it cannot enable any Rx mode.
+ **/
+STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u32 msg[1];
+
+ DEBUGFUNC("fm10k_iov_reset_lport_state_pf");
+
+ /* need to disable the port if it is already enabled */
+ if (FM10K_VF_FLAG_ENABLED(vf_info)) {
+ /* notify switch that this port has been disabled */
+ fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
+
+ /* generate port state response to notify VF it is not ready */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ }
+
+ /* clear flags and glort if it exists */
+ vf_info->vf_flags = 0;
+ vf_info->glort = 0;
+}
+
+/**
+ * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
+ * @hw: pointer to hardware structure
+ * @q: stats for all queues of a VF
+ * @vf_idx: index of VF
+ *
+ * This function collects queue stats for VFs.
+ **/
+STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u16 vf_idx)
+{
+ u32 idx, qpp;
+
+ /* get stats for all of the queues */
+ qpp = fm10k_queues_per_pool(hw);
+ idx = fm10k_vf_queue_index(hw, vf_idx);
+ fm10k_update_hw_stats_q(hw, q, idx, qpp);
+}
+
+/**
+ * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for MSI-X requests from the VF. The
+ * assumption is that in this case it is acceptable to just directly
+ * hand off the message from the VF to the underlying shared code.
+ **/
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u8 vf_idx = vf_info->vf_idx;
+
+ UNREFERENCED_1PARAMETER(results);
+ DEBUGFUNC("fm10k_iov_msg_msix_pf");
+
+ return hw->iov.ops.assign_int_moderator(hw, vf_idx);
+}
+
+/**
+ * fm10k_iov_select_vid - Select correct default VLAN ID
+ * @hw: Pointer to hardware structure
+ * @vid: VLAN ID to correct
+ *
+ * Will report an error if the VLAN ID is out of range. For VID = 0, it will
+ * return either the pf_vid or sw_vid depending on which one is set.
+ */
+STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+{
+ if (!vid)
+ return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
+ else if (vf_info->pf_vid && vid != vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ else
+ return vid;
+}
+
+/**
+ * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for MAC/VLAN requests from the VF.
+ * The assumption is that in this case it is acceptable to just directly
+ * hand off the message from the VF to the underlying shared code.
+ **/
+s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u8 mac[ETH_ALEN];
+ u32 *result;
+ int err = FM10K_SUCCESS;
+ bool set;
+ u16 vlan;
+ u32 vid;
+
+ DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf");
+
+ /* we shouldn't be updating rules on a disabled interface */
+ if (!FM10K_VF_FLAG_ENABLED(vf_info))
+ err = FM10K_ERR_PARAM;
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
+ result = results[FM10K_MAC_VLAN_MSG_VLAN];
+
+ /* record VLAN id requested */
+ err = fm10k_tlv_attr_get_u32(result, &vid);
+ if (err)
+ return err;
+
+ set = !(vid & FM10K_VLAN_CLEAR);
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
+
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
+
+ /* update VSI info for VF in regards to VLAN table */
+ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
+ result = results[FM10K_MAC_VLAN_MSG_MAC];
+
+ /* record unicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* block attempts to set MAC for a locked device */
+ if (IS_VALID_ETHER_ADDR(vf_info->mac) &&
+ memcmp(mac, vf_info->mac, ETH_ALEN))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+
+ vlan = (u16)err;
+
+ /* notify switch of request for new unicast address */
+ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
+ mac, vlan, set, 0);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
+ result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
+
+ /* record multicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* verify that the VF is allowed to request multicast */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+
+ vlan = (u16)err;
+
+ /* notify switch of request for new multicast address */
+ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
+ mac, vlan, set);
+ }
+
+ return err;
+}
+
+/**
+ * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
+ * @vf_info: VF info structure containing capability flags
+ * @mode: Requested xcast mode
+ *
+ * This function outputs the mode that most closely matches the requested
+ * mode. If not modes match it will request we disable the port
+ **/
+STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
+ u8 mode)
+{
+ u8 vf_flags = vf_info->vf_flags;
+
+ /* match up mode to capabilities as best as possible */
+ switch (mode) {
+ case FM10K_XCAST_MODE_PROMISC:
+ if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
+ return FM10K_XCAST_MODE_PROMISC;
+ /* fallthough */
+ case FM10K_XCAST_MODE_ALLMULTI:
+ if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
+ return FM10K_XCAST_MODE_ALLMULTI;
+ /* fallthough */
+ case FM10K_XCAST_MODE_MULTI:
+ if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
+ return FM10K_XCAST_MODE_MULTI;
+ /* fallthough */
+ case FM10K_XCAST_MODE_NONE:
+ if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
+ return FM10K_XCAST_MODE_NONE;
+ /* fallthough */
+ default:
+ break;
+ }
+
+ /* disable interface as it should not be able to request any */
+ return FM10K_XCAST_MODE_DISABLE;
+}
+
+/**
+ * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for port state requests. The port
+ * state requests for now are basic and consist of enabling or disabling
+ * the port.
+ **/
+s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u32 *result;
+ s32 err = FM10K_SUCCESS;
+ u32 msg[2];
+ u8 mode = 0;
+
+ DEBUGFUNC("fm10k_iov_msg_lport_state_pf");
+
+ /* verify VF is allowed to enable even minimal mode */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
+ return FM10K_ERR_PARAM;
+
+ if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
+ result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+
+ /* XCAST mode update requested */
+ err = fm10k_tlv_attr_get_u8(result, &mode);
+ if (err)
+ return FM10K_ERR_PARAM;
+
+ /* prep for possible demotion depending on capabilities */
+ mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
+
+ /* if mode is not currently enabled, enable it */
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
+ fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
+
+ /* swap mode back to a bit flag */
+ mode = FM10K_VF_FLAG_SET_MODE(mode);
+ } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
+ /* need to disable the port if it is already enabled */
+ if (FM10K_VF_FLAG_ENABLED(vf_info))
+ err = fm10k_update_lport_state_pf(hw, vf_info->glort,
+ 1, false);
+
+ /* we need to clear VF_FLAG_ENABLED flags in order to ensure
+ * that we actually re-enable the LPORT state below. Note that
+ * this has no impact if the VF is already disabled, as the
+ * flags are already cleared.
+ */
+ if (!err)
+ vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
+
+ /* when enabling the port we should reset the rate limiters */
+ hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
+
+ /* set mode for minimal functionality */
+ mode = FM10K_VF_FLAG_SET_MODE_NONE;
+
+ /* generate port state response to notify VF it is ready */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
+ mbx->ops.enqueue_tx(hw, mbx, msg);
+ }
+
+ /* if enable state toggled note the update */
+ if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
+ err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
+ !!mode);
+
+ /* if state change succeeded, then update our stored state */
+ mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
+ if (!err)
+ vf_info->vf_flags = mode;
+
+ return err;
+}
+
+#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS
+const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+#endif
+/**
+ * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function collects and aggregates global and per queue hardware
+ * statistics.
+ **/
+void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
+ u32 id, id_prev;
+
+ DEBUGFUNC("fm10k_update_hw_stats_pf");
+
+ /* Use Tx queue 0 as a canary to detect a reset */
+ id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
+
+ /* Read Global Statistics */
+ do {
+ timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
+ &stats->timeout);
+ ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
+ ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
+ um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
+ xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
+ vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
+ &stats->vlan_drop);
+ loopback_drop =
+ fm10k_read_hw_stats_32b(hw,
+ FM10K_STATS_LOOPBACK_DROP,
+ &stats->loopback_drop);
+ nodesc_drop = fm10k_read_hw_stats_32b(hw,
+ FM10K_STATS_NODESC_DROP,
+ &stats->nodesc_drop);
+
+ /* if value has not changed then we have consistent data */
+ id_prev = id;
+ id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
+ } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id &= FM10K_TXQCTL_ID_MASK;
+ id |= FM10K_STAT_VALID;
+
+ /* Update Global Statistics */
+ if (stats->stats_idx == id) {
+ stats->timeout.count += timeout;
+ stats->ur.count += ur;
+ stats->ca.count += ca;
+ stats->um.count += um;
+ stats->xec.count += xec;
+ stats->vlan_drop.count += vlan_drop;
+ stats->loopback_drop.count += loopback_drop;
+ stats->nodesc_drop.count += nodesc_drop;
+ }
+
+ /* Update bases and record current PF id */
+ fm10k_update_hw_base_32b(&stats->timeout, timeout);
+ fm10k_update_hw_base_32b(&stats->ur, ur);
+ fm10k_update_hw_base_32b(&stats->ca, ca);
+ fm10k_update_hw_base_32b(&stats->um, um);
+ fm10k_update_hw_base_32b(&stats->xec, xec);
+ fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
+ fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
+ fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
+ stats->stats_idx = id;
+
+ /* Update Queue Statistics */
+ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function resets the base for global and per queue hardware
+ * statistics.
+ **/
+void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_rebind_hw_stats_pf");
+
+ /* Unbind Global Statistics */
+ fm10k_unbind_hw_stats_32b(&stats->timeout);
+ fm10k_unbind_hw_stats_32b(&stats->ur);
+ fm10k_unbind_hw_stats_32b(&stats->ca);
+ fm10k_unbind_hw_stats_32b(&stats->um);
+ fm10k_unbind_hw_stats_32b(&stats->xec);
+ fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
+ fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
+ fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
+
+ /* Unbind Queue Statistics */
+ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+
+ /* Reinitialize bases for all stats */
+ fm10k_update_hw_stats_pf(hw, stats);
+}
+
+/**
+ * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
+ * @hw: pointer to hardware structure
+ * @dma_mask: 64 bit DMA mask required for platform
+ *
+ * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
+ * to limit the access to memory beyond what is physically in the system.
+ **/
+STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
+{
+ /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
+ u32 phyaddr = (u32)(dma_mask >> 32);
+
+ DEBUGFUNC("fm10k_set_dma_mask_pf");
+
+ FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr);
+}
+
+/**
+ * fm10k_get_fault_pf - Record a fault in one of the interface units
+ * @hw: pointer to hardware structure
+ * @type: pointer to fault type register offset
+ * @fault: pointer to memory location to record the fault
+ *
+ * Record the fault register contents to the fault data structure and
+ * clear the entry from the register.
+ *
+ * Returns ERR_PARAM if invalid register is specified or no error is present.
+ **/
+STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
+ struct fm10k_fault *fault)
+{
+ u32 func;
+
+ DEBUGFUNC("fm10k_get_fault_pf");
+
+ /* verify the fault register is in range and is aligned */
+ switch (type) {
+ case FM10K_PCA_FAULT:
+ case FM10K_THI_FAULT:
+ case FM10K_FUM_FAULT:
+ break;
+ default:
+ return FM10K_ERR_PARAM;
+ }
+
+ /* only service faults that are valid */
+ func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC);
+ if (!(func & FM10K_FAULT_FUNC_VALID))
+ return FM10K_ERR_PARAM;
+
+ /* read remaining fields */
+ fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI);
+ fault->address <<= 32;
+ fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO);
+
+ /* clear valid bit to allow for next error */
+ FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
+
+ /* Record which function triggered the error */
+ if (func & FM10K_FAULT_FUNC_PF)
+ fault->func = 0;
+ else
+ fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
+ FM10K_FAULT_FUNC_VF_SHIFT);
+
+ /* record fault type */
+ fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_request_lport_map_pf - Request LPORT map from the switch API
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[1];
+
+ DEBUGFUNC("fm10k_request_lport_pf");
+
+ /* issue request asking for LPORT map */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
+ * @hw: pointer to hardware structure
+ * @switch_ready: pointer to boolean value that will record switch state
+ *
+ * This function will check the DMA_CTRL2 register and mailbox in order
+ * to determine if the switch is ready for the PF to begin requesting
+ * addresses and mapping traffic to the local interface.
+ **/
+STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
+{
+ u32 dma_ctrl2;
+
+ DEBUGFUNC("fm10k_get_host_state_pf");
+
+ /* verify the switch is ready for interaction */
+ dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
+ if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
+ return FM10K_SUCCESS;
+
+ /* retrieve generic host state info */
+ return fm10k_get_host_state_generic(hw, switch_ready);
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
+ FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the lport mapping based on the reply from the
+ * switch API.
+ **/
+s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u16 glort, mask;
+ u32 dglort_map;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_lport_map_pf");
+
+ err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
+ &dglort_map);
+ if (err)
+ return err;
+
+ /* extract values out of the header */
+ glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
+ mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
+
+ /* verify mask is set and none of the masked bits in glort are set */
+ if (!mask || (glort & ~mask))
+ return FM10K_ERR_PARAM;
+
+ /* verify the mask is contiguous, and that it is 1's followed by 0's */
+ if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* record the glort, mask, and port count */
+ hw->mac.dglort_map = dglort_map;
+
+ return FM10K_SUCCESS;
+}
+
+const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the default VLAN for the PF
+ **/
+static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u16 glort, pvid;
+ u32 pvid_update;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_update_pvid_pf");
+
+ err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
+ &pvid_update);
+ if (err)
+ return err;
+
+ /* extract values from the pvid update */
+ glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
+ pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* verify VLAN ID is valid */
+ if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* record the port VLAN ID value */
+ hw->mac.default_vid = pvid;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_record_global_table_data - Move global table data to swapi table info
+ * @from: pointer to source table data structure
+ * @to: pointer to destination table info structure
+ *
+ * This function is will copy table_data to the table_info contained in
+ * the hw struct.
+ **/
+static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
+ struct fm10k_swapi_table_info *to)
+{
+ /* convert from le32 struct to CPU byte ordered values */
+ to->used = FM10K_LE32_TO_CPU(from->used);
+ to->avail = FM10K_LE32_TO_CPU(from->avail);
+}
+
+const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_err_pf - Message handler for error reply
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler will capture the data for any error replies to previous
+ * messages that the PF has sent.
+ **/
+s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_swapi_error err_msg;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_err_pf");
+
+ /* extract structure from message */
+ err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
+ &err_msg, sizeof(err_msg));
+ if (err)
+ return err;
+
+ /* record table status */
+ fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
+ fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
+ fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
+
+ /* record SW API status value */
+ hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status);
+
+ return FM10K_SUCCESS;
+}
+
+/* currently there is no shared 1588 timestamp handler */
+
+const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
+ sizeof(struct fm10k_swapi_1588_timestamp)),
+ FM10K_TLV_ATTR_LAST
+};
+
+const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER,
+ sizeof(struct fm10k_swapi_1588_clock_owner)),
+ FM10K_TLV_ATTR_LAST
+};
+
+const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = {
+ FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to the vf info structure
+ * @offset: 64bit unsigned offset from hardware SYSTIME
+ *
+ * This function sends a message to a given VF to notify it of PTP offset
+ * changes.
+ **/
+STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info,
+ u64 offset)
+{
+ u32 msg[4];
+
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
+ fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset);
+
+ if (vf_info->mbx.ops.enqueue_tx)
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+}
+
+/**
+ * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM
+ * @hw: pointer to hardware structure
+ * @results: pointer to array containing parsed data,
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF
+ */
+s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_swapi_1588_clock_owner msg;
+ u16 glort;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_1588_clock_owner");
+
+ err = fm10k_tlv_attr_get_le_struct(
+ results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER],
+ &msg, sizeof(msg));
+ if (err)
+ return err;
+
+ /* We own the clock iff the glort matches us and the enabled field is
+ * true. Otherwise, the clock must belong to some other port.
+ */
+ glort = le16_to_cpu(msg.glort);
+ if (fm10k_glort_valid_pf(hw, glort) && msg.enabled)
+ hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER;
+ else
+ hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_adjust_systime_pf - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function will adjust the SYSTIME_CFG register contained in BAR 4
+ * if this function is supported for BAR 4 access. The adjustment amount
+ * is based on the parts per billion value provided and adjusted to a
+ * value based on parts per 2^48 clock cycles.
+ *
+ * If adjustment is not supported or the requested value is too large
+ * we will return an error.
+ **/
+STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
+{
+ u64 systime_adjust;
+
+ DEBUGFUNC("fm10k_adjust_systime_pf");
+
+ /* ensure that we control the clock */
+ if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+
+ /* if sw_addr is not set we don't have switch register access */
+ if (!hw->sw_addr)
+ return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
+
+ /* we must convert the value from parts per billion to parts per
+ * 2^48 cycles. In addition I have opted to only use the 30 most
+ * significant bits of the adjustment value as the 8 least
+ * significant bits are located in another register and represent
+ * a value significantly less than a part per billion, the result
+ * of dropping the 8 least significant bits is that the adjustment
+ * value is effectively multiplied by 2^8 when we write it.
+ *
+ * As a result of all this the math for this breaks down as follows:
+ * ppb / 10^9 == adjust * 2^8 / 2^48
+ * If we solve this for adjust, and simplify it comes out as:
+ * ppb * 2^31 / 5^9 == adjust
+ */
+ systime_adjust = (ppb < 0) ? -ppb : ppb;
+ systime_adjust <<= 31;
+ do_div(systime_adjust, 1953125);
+
+ /* verify the requested adjustment value is in range */
+ if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
+ return FM10K_ERR_PARAM;
+
+ if (ppb > 0)
+ systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
+
+ FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_notify_offset_pf - Notify switch of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @offset: 64bit unsigned offset of SYSTIME
+ *
+ * This function sends a message to the switch to indicate a change in the
+ * offset of the hardware SYSTIME registers. The switch manager is
+ * responsible for transmitting this message to other hosts.
+ */
+STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[4];
+
+ DEBUGFUNC("fm10k_notify_offset_pf");
+
+ /* ensure that we control the clock */
+ if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET);
+ fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_read_systime_pf - Reads value of systime registers
+ * @hw: pointer to the hardware structure
+ *
+ * Function reads the content of 2 registers, combined to represent a 64 bit
+ * value measured in nanosecods. In order to guarantee the value is accurate
+ * we check the 32 most significant bits both before and after reading the
+ * 32 least significant bits to verify they didn't change as we were reading
+ * the registers.
+ **/
+static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
+{
+ u32 systime_l, systime_h, systime_tmp;
+
+ systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
+
+ do {
+ systime_tmp = systime_h;
+ systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
+ systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
+ } while (systime_tmp != systime_h);
+
+ return ((u64)systime_h << 32) | systime_l;
+}
+
+static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
+ FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
+ FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_init_ops_pf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for PF.
+ * Does not touch the hardware.
+ **/
+s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+ struct fm10k_iov_info *iov = &hw->iov;
+
+ DEBUGFUNC("fm10k_init_ops_pf");
+
+ fm10k_init_ops_generic(hw);
+
+ mac->ops.reset_hw = &fm10k_reset_hw_pf;
+ mac->ops.init_hw = &fm10k_init_hw_pf;
+ mac->ops.start_hw = &fm10k_start_hw_generic;
+ mac->ops.stop_hw = &fm10k_stop_hw_generic;
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+ mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf;
+#endif
+ mac->ops.update_vlan = &fm10k_update_vlan_pf;
+ mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf;
+ mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf;
+ mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf;
+ mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf;
+ mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf;
+ mac->ops.update_lport_state = &fm10k_update_lport_state_pf;
+ mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf;
+ mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf;
+ mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf;
+ mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
+ mac->ops.get_fault = &fm10k_get_fault_pf;
+ mac->ops.get_host_state = &fm10k_get_host_state_pf;
+ mac->ops.request_lport_map = &fm10k_request_lport_map_pf;
+ mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
+ mac->ops.notify_offset = &fm10k_notify_offset_pf;
+ mac->ops.read_systime = &fm10k_read_systime_pf;
+
+ mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
+
+ iov->ops.assign_resources = &fm10k_iov_assign_resources_pf;
+ iov->ops.configure_tc = &fm10k_iov_configure_tc_pf;
+ iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf;
+ iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf;
+ iov->ops.reset_resources = &fm10k_iov_reset_resources_pf;
+ iov->ops.set_lport = &fm10k_iov_set_lport_pf;
+ iov->ops.reset_lport = &fm10k_iov_reset_lport_pf;
+ iov->ops.update_stats = &fm10k_iov_update_stats_pf;
+ iov->ops.notify_offset = &fm10k_iov_notify_offset_pf;
+
+ return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h
new file mode 100644
index 000000000..1c2e9994d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_PF_H_
+#define _FM10K_PF_H_
+
+#include "fm10k_type.h"
+#include "fm10k_common.h"
+
+bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort);
+u16 fm10k_queues_per_pool(struct fm10k_hw *hw);
+u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx);
+
+enum fm10k_pf_tlv_msg_id_v1 {
+ FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */
+ FM10K_PF_MSG_ID_XCAST_MODES = 0x001,
+ FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002,
+ FM10K_PF_MSG_ID_LPORT_MAP = 0x100,
+ FM10K_PF_MSG_ID_LPORT_CREATE = 0x200,
+ FM10K_PF_MSG_ID_LPORT_DELETE = 0x201,
+ FM10K_PF_MSG_ID_CONFIG = 0x300,
+ FM10K_PF_MSG_ID_UPDATE_PVID = 0x400,
+ FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501,
+ FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502,
+ FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
+ FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
+ FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
+ FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
+ FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
+ FM10K_PF_MSG_ID_1588_CLOCK_OWNER = 0x702,
+ FM10K_PF_MSG_ID_MASTER_CLK_OFFSET = 0x703,
+};
+
+enum fm10k_pf_tlv_attr_id_v1 {
+ FM10K_PF_ATTR_ID_ERR = 0x00,
+ FM10K_PF_ATTR_ID_LPORT_MAP = 0x01,
+ FM10K_PF_ATTR_ID_XCAST_MODE = 0x02,
+ FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03,
+ FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04,
+ FM10K_PF_ATTR_ID_CONFIG = 0x05,
+ FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06,
+ FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07,
+ FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08,
+ FM10K_PF_ATTR_ID_FLOW_STATE = 0x09,
+ FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A,
+ FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
+ FM10K_PF_ATTR_ID_PORT = 0x0C,
+ FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
+ FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
+ FM10K_PF_ATTR_ID_1588_CLOCK_OWNER = 0x12,
+ FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET = 0x14,
+};
+
+#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
+#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16
+#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16
+#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16
+
+#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0
+#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16
+#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
+#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
+/* The following data structures are overlayed directly onto TLV mailbox
+ * messages, and must not break 4 byte alignment. Ensure the structures line
+ * up correctly as per their TLV definition.
+ */
+#ifdef C99
+#pragma pack(push, 4)
+#else
+#pragma pack(4)
+#endif /* C99 */
+
+struct fm10k_mac_update {
+ __le32 mac_lower;
+ __le16 mac_upper;
+ __le16 vlan;
+ __le16 glort;
+ u8 flags;
+ u8 action;
+};
+
+struct fm10k_global_table_data {
+ __le32 used;
+ __le32 avail;
+};
+
+struct fm10k_swapi_error {
+ __le32 status;
+ struct fm10k_global_table_data mac;
+ struct fm10k_global_table_data nexthop;
+ struct fm10k_global_table_data ffu;
+};
+
+struct fm10k_swapi_1588_timestamp {
+ __le64 egress;
+ __le64 ingress;
+ __le16 dglort;
+ __le16 sglort;
+};
+
+struct fm10k_swapi_1588_clock_owner {
+ __le16 glort;
+ __le16 enabled;
+};
+
+#ifdef C99
+#pragma pack(pop)
+#else
+#pragma pack()
+#endif /* C99 */
+
+s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
+#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \
+ fm10k_lport_map_msg_attr, func)
+extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[];
+#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \
+ fm10k_update_pvid_msg_attr, func)
+
+s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
+#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
+#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
+ fm10k_1588_timestamp_msg_attr, func)
+
+s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[];
+#define FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_CLOCK_OWNER, \
+ fm10k_1588_clock_owner_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[];
+#define FM10K_PF_MSG_MASTER_CLK_OFFSET_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_MASTER_CLK_OFFSET, \
+ fm10k_master_clk_offset_attr, func)
+
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS
+extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[];
+#endif
+
+s32 fm10k_init_ops_pf(struct fm10k_hw *hw);
+
+void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats);
+
+void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats);
+#endif /* _FM10K_PF_H */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c
new file mode 100644
index 000000000..adffc1bce
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#include "fm10k_tlv.h"
+
+/**
+ * fm10k_tlv_msg_init - Initialize message block for TLV data storage
+ * @msg: Pointer to message block
+ * @msg_id: Message ID indicating message type
+ *
+ * This function return success if provided with a valid message pointer
+ **/
+s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id)
+{
+ DEBUGFUNC("fm10k_tlv_msg_init");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_null_string - Place null terminated string on message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @string: Pointer to string to be stored in attribute
+ *
+ * This function will reorder a string to be CPU endian and store it in
+ * the attribute buffer. It will return success if provided with a valid
+ * pointers.
+ **/
+static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
+ const unsigned char *string)
+{
+ u32 attr_data = 0, len = 0;
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_null_string");
+
+ /* verify pointers are not NULL */
+ if (!string || !msg)
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* copy string into local variable and then write to msg */
+ do {
+ /* write data to message */
+ if (len && !(len % 4)) {
+ attr[len / 4] = attr_data;
+ attr_data = 0;
+ }
+
+ /* record character to offset location */
+ attr_data |= (u32)(*string) << (8 * (len % 4));
+ len++;
+
+ /* test for NULL and then increment */
+ } while (*(string++));
+
+ /* write last piece of data to message */
+ attr[(len + 3) / 4] = attr_data;
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute
+ * @attr: Pointer to attribute
+ * @string: Pointer to location of destination string
+ *
+ * This function pulls the string back out of the attribute and will place
+ * it in the array pointed by by string. It will return success if provided
+ * with a valid pointers.
+ **/
+static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
+{
+ u32 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_get_null_string");
+
+ /* verify pointers are not NULL */
+ if (!string || !attr)
+ return FM10K_ERR_PARAM;
+
+ len = *attr >> FM10K_TLV_LEN_SHIFT;
+ attr++;
+
+ while (len--)
+ string[len] = (u8)(attr[len / 4] >> (8 * (len % 4)));
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @mac_addr: MAC address to be stored
+ *
+ * This function will reorder a MAC address to be CPU endian and store it
+ * in the attribute buffer. It will return success if provided with a
+ * valid pointers.
+ **/
+s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
+ const u8 *mac_addr, u16 vlan)
+{
+ u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT;
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_mac_vlan");
+
+ /* verify pointers are not NULL */
+ if (!msg || !mac_addr)
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* record attribute header, update message length */
+ attr[0] = len | attr_id;
+
+ /* copy value into local variable and then write to msg */
+ attr[1] = FM10K_LE32_TO_CPU(*(const __le32 *)&mac_addr[0]);
+ attr[2] = FM10K_LE16_TO_CPU(*(const __le16 *)&mac_addr[4]);
+ attr[2] |= (u32)vlan << 16;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute
+ * @attr: Pointer to attribute
+ * @attr_id: Attribute ID
+ * @mac_addr: location of buffer to store MAC address
+ *
+ * This function pulls the MAC address back out of the attribute and will
+ * place it in the array pointed by by mac_addr. It will return success
+ * if provided with a valid pointers.
+ **/
+s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
+{
+ DEBUGFUNC("fm10k_tlv_attr_get_mac_vlan");
+
+ /* verify pointers are not NULL */
+ if (!mac_addr || !attr)
+ return FM10K_ERR_PARAM;
+
+ *(__le32 *)&mac_addr[0] = FM10K_CPU_TO_LE32(attr[1]);
+ *(__le16 *)&mac_addr[4] = FM10K_CPU_TO_LE16((u16)(attr[2]));
+ *vlan = (u16)(attr[2] >> 16);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_bool - Add header indicating value "true"
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ *
+ * This function will simply add an attribute header, the fact
+ * that the header is here means the attribute value is true, else
+ * it is false. The function will return success if provided with a
+ * valid pointers.
+ **/
+s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id)
+{
+ DEBUGFUNC("fm10k_tlv_attr_put_bool");
+
+ /* verify pointers are not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ /* record attribute header */
+ msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id;
+
+ /* add header length to length */
+ *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_value - Store integer value attribute in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @value: Value to be written
+ * @len: Size of value
+ *
+ * This function will place an integer value of up to 8 bytes in size
+ * in a message attribute. The function will return success provided
+ * that msg is a valid pointer, and len is 1, 2, 4, or 8.
+ **/
+s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
+{
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_value");
+
+ /* verify non-null msg and len is 1, 2, 4, or 8 */
+ if (!msg || !len || len > 8 || (len & (len - 1)))
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ if (len < 4) {
+ attr[1] = (u32)value & (BIT(8 * len) - 1);
+ } else {
+ attr[1] = (u32)value;
+ if (len > 4)
+ attr[2] = (u32)(value >> 32);
+ }
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_value - Get integer value stored in attribute
+ * @attr: Pointer to attribute
+ * @value: Pointer to destination buffer
+ * @len: Size of value
+ *
+ * This function will place an integer value of up to 8 bytes in size
+ * in the offset pointed to by value. The function will return success
+ * provided that pointers are valid and the len value matches the
+ * attribute length.
+ **/
+s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len)
+{
+ DEBUGFUNC("fm10k_tlv_attr_get_value");
+
+ /* verify pointers are not NULL */
+ if (!attr || !value)
+ return FM10K_ERR_PARAM;
+
+ if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
+ return FM10K_ERR_PARAM;
+
+ if (len == 8)
+ *(u64 *)value = ((u64)attr[2] << 32) | attr[1];
+ else if (len == 4)
+ *(u32 *)value = attr[1];
+ else if (len == 2)
+ *(u16 *)value = (u16)attr[1];
+ else
+ *(u8 *)value = (u8)attr[1];
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_le_struct - Store little endian structure in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @le_struct: Pointer to structure to be written
+ * @len: Size of le_struct
+ *
+ * This function will place a little endian structure value in a message
+ * attribute. The function will return success provided that all pointers
+ * are valid and length is a non-zero multiple of 4.
+ **/
+s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id,
+ const void *le_struct, u32 len)
+{
+ const __le32 *le32_ptr = (const __le32 *)le_struct;
+ u32 *attr;
+ u32 i;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_le_struct");
+
+ /* verify non-null msg and len is in 32 bit words */
+ if (!msg || !len || (len % 4))
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* copy le32 structure into host byte order at 32b boundaries */
+ for (i = 0; i < (len / 4); i++)
+ attr[i + 1] = FM10K_LE32_TO_CPU(le32_ptr[i]);
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute
+ * @attr: Pointer to attribute
+ * @le_struct: Pointer to structure to be written
+ * @len: Size of structure
+ *
+ * This function will place a little endian structure in the buffer
+ * pointed to by le_struct. The function will return success
+ * provided that pointers are valid and the len value matches the
+ * attribute length.
+ **/
+s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len)
+{
+ __le32 *le32_ptr = (__le32 *)le_struct;
+ u32 i;
+
+ DEBUGFUNC("fm10k_tlv_attr_get_le_struct");
+
+ /* verify pointers are not NULL */
+ if (!le_struct || !attr)
+ return FM10K_ERR_PARAM;
+
+ if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
+ return FM10K_ERR_PARAM;
+
+ attr++;
+
+ for (i = 0; len; i++, len -= 4)
+ le32_ptr[i] = FM10K_CPU_TO_LE32(attr[i]);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_nest_start - Start a set of nested attributes
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ *
+ * This function will mark off a new nested region for encapsulating
+ * a given set of attributes. The idea is if you wish to place a secondary
+ * structure within the message this mechanism allows for that. The
+ * function will return NULL on failure, and a pointer to the start
+ * of the nested attributes on success.
+ **/
+static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
+{
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_nest_start");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return NULL;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ attr[0] = attr_id;
+
+ /* return pointer to nest header */
+ return attr;
+}
+
+/**
+ * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes
+ * @msg: Pointer to message block
+ *
+ * This function closes off an existing set of nested attributes. The
+ * message pointer should be pointing to the parent of the nest. So in
+ * the case of a nest within the nest this would be the outer nest pointer.
+ * This function will return success provided all pointers are valid.
+ **/
+static s32 fm10k_tlv_attr_nest_stop(u32 *msg)
+{
+ u32 *attr;
+ u32 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_nest_stop");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ /* locate the nested header and retrieve its length */
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+ len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT;
+
+ /* only include nest if data was added to it */
+ if (len) {
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += len;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_validate - Validate attribute metadata
+ * @attr: Pointer to attribute
+ * @tlv_attr: Type and length info for attribute
+ *
+ * This function does some basic validation of the input TLV. It
+ * verifies the length, and in the case of null terminated strings
+ * it verifies that the last byte is null. The function will
+ * return FM10K_ERR_PARAM if any attribute is malformed, otherwise
+ * it returns 0.
+ **/
+STATIC s32 fm10k_tlv_attr_validate(u32 *attr,
+ const struct fm10k_tlv_attr *tlv_attr)
+{
+ u32 attr_id = *attr & FM10K_TLV_ID_MASK;
+ u16 len = *attr >> FM10K_TLV_LEN_SHIFT;
+
+ DEBUGFUNC("fm10k_tlv_attr_validate");
+
+ /* verify this is an attribute and not a message */
+ if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))
+ return FM10K_ERR_PARAM;
+
+ /* search through the list of attributes to find a matching ID */
+ while (tlv_attr->id < attr_id)
+ tlv_attr++;
+
+ /* if didn't find a match then we should exit */
+ if (tlv_attr->id != attr_id)
+ return FM10K_NOT_IMPLEMENTED;
+
+ /* move to start of attribute data */
+ attr++;
+
+ switch (tlv_attr->type) {
+ case FM10K_TLV_NULL_STRING:
+ if (!len ||
+ (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4)))))
+ return FM10K_ERR_PARAM;
+ if (len > tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_MAC_ADDR:
+ if (len != ETH_ALEN)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_BOOL:
+ if (len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_UNSIGNED:
+ case FM10K_TLV_SIGNED:
+ if (len != tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_LE_STRUCT:
+ /* struct must be 4 byte aligned */
+ if ((len % 4) || len != tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_NESTED:
+ /* nested attributes must be 4 byte aligned */
+ if (len % 4)
+ return FM10K_ERR_PARAM;
+ break;
+ default:
+ /* attribute id is mapped to bad value */
+ return FM10K_ERR_PARAM;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_parse - Parses stream of attribute data
+ * @attr: Pointer to attribute list
+ * @results: Pointer array to store pointers to attributes
+ * @tlv_attr: Type and length info for attributes
+ *
+ * This function validates a stream of attributes and parses them
+ * up into an array of pointers stored in results. The function will
+ * return FM10K_ERR_PARAM on any input or message error,
+ * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
+ **/
+static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
+ const struct fm10k_tlv_attr *tlv_attr)
+{
+ u32 i, attr_id, offset = 0;
+ s32 err = 0;
+ u16 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_parse");
+
+ /* verify pointers are not NULL */
+ if (!attr || !results)
+ return FM10K_ERR_PARAM;
+
+ /* initialize results to NULL */
+ for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++)
+ results[i] = NULL;
+
+ /* pull length from the message header */
+ len = *attr >> FM10K_TLV_LEN_SHIFT;
+
+ /* no attributes to parse if there is no length */
+ if (!len)
+ return FM10K_SUCCESS;
+
+ /* no attributes to parse, just raw data, message becomes attribute */
+ if (!tlv_attr) {
+ results[0] = attr;
+ return FM10K_SUCCESS;
+ }
+
+ /* move to start of attribute data */
+ attr++;
+
+ /* run through list parsing all attributes */
+ while (offset < len) {
+ attr_id = *attr & FM10K_TLV_ID_MASK;
+
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
+
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
+ return err;
+ else
+ results[attr_id] = attr;
+
+ /* update offset */
+ offset += FM10K_TLV_DWORD_LEN(*attr) * 4;
+
+ /* move to next attribute */
+ attr = &attr[FM10K_TLV_DWORD_LEN(*attr)];
+ }
+
+ /* we should find ourselves at the end of the list */
+ if (offset != len)
+ return FM10K_ERR_PARAM;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_msg_parse - Parses message header and calls function handler
+ * @hw: Pointer to hardware structure
+ * @msg: Pointer to message
+ * @mbx: Pointer to mailbox information structure
+ * @func: Function array containing list of message handling functions
+ *
+ * This function should be the first function called upon receiving a
+ * message. The handler will identify the message type and call the correct
+ * handler for the given message. It will return the value from the function
+ * call on a recognized message type, otherwise it will return
+ * FM10K_NOT_IMPLEMENTED on an unrecognized type.
+ **/
+s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
+ struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *data)
+{
+ u32 *results[FM10K_TLV_RESULTS_MAX];
+ u32 msg_id;
+ s32 err;
+
+ DEBUGFUNC("fm10k_tlv_msg_parse");
+
+ /* verify pointer is not NULL */
+ if (!msg || !data)
+ return FM10K_ERR_PARAM;
+
+ /* verify this is a message and not an attribute */
+ if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)))
+ return FM10K_ERR_PARAM;
+
+ /* grab message ID */
+ msg_id = *msg & FM10K_TLV_ID_MASK;
+
+ while (data->id < msg_id)
+ data++;
+
+ /* if we didn't find it then pass it up as an error */
+ if (data->id != msg_id) {
+ while (data->id != FM10K_TLV_ERROR)
+ data++;
+ }
+
+ /* parse the attributes into the results list */
+ err = fm10k_tlv_attr_parse(msg, results, data->attr);
+ if (err < 0)
+ return err;
+
+ return data->func(hw, results, mbx);
+}
+
+/**
+ * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Unused mailbox pointer
+ *
+ * This function is a default handler for unrecognized messages. At a
+ * a minimum it just indicates that the message requested was
+ * unimplemented.
+ **/
+s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ UNREFERENCED_3PARAMETER(hw, results, mbx);
+ DEBUGOUT1("Unknown message ID %u\n", **results & FM10K_TLV_ID_MASK);
+ return FM10K_NOT_IMPLEMENTED;
+}
+
+STATIC const unsigned char test_str[] = "fm10k";
+STATIC const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56,
+ 0x78, 0x9a, 0xbc };
+STATIC const u16 test_vlan = 0x0FED;
+STATIC const u64 test_u64 = 0xfedcba9876543210ull;
+STATIC const u32 test_u32 = 0x87654321;
+STATIC const u16 test_u16 = 0x8765;
+STATIC const u8 test_u8 = 0x87;
+STATIC const s64 test_s64 = -0x123456789abcdef0ll;
+STATIC const s32 test_s32 = -0x1235678;
+STATIC const s16 test_s16 = -0x1234;
+STATIC const s8 test_s8 = -0x12;
+STATIC const __le32 test_le[2] = { FM10K_CPU_TO_LE32(0x12345678),
+ FM10K_CPU_TO_LE32(0x9abcdef0)};
+
+/* The message below is meant to be used as a test message to demonstrate
+ * how to use the TLV interface and to test the types. Normally this code
+ * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG
+ */
+const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
+ FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR),
+ FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8),
+ FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16),
+ FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32),
+ FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64),
+ FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8),
+ FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16),
+ FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32),
+ FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64),
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8),
+ FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED),
+ FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_tlv_msg_test_generate_data - Stuff message with data
+ * @msg: Pointer to message
+ * @attr_flags: List of flags indicating what attributes to add
+ *
+ * This function is meant to load a message buffer with attribute data
+ **/
+STATIC void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
+{
+ DEBUGFUNC("fm10k_tlv_msg_test_generate_data");
+
+ if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
+ fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
+ test_str);
+ if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
+ test_mac, test_vlan);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U8))
+ fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U16))
+ fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U32))
+ fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
+ if (attr_flags & BIT(FM10K_TEST_MSG_U64))
+ fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S8))
+ fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S16))
+ fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S32))
+ fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
+ if (attr_flags & BIT(FM10K_TEST_MSG_S64))
+ fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
+ if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
+ fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
+ test_le, 8);
+}
+
+/**
+ * fm10k_tlv_msg_test_create - Create a test message testing all attributes
+ * @msg: Pointer to message
+ * @attr_flags: List of flags indicating what attributes to add
+ *
+ * This function is meant to load a message buffer with all attribute types
+ * including a nested attribute.
+ **/
+void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
+{
+ u32 *nest = NULL;
+
+ DEBUGFUNC("fm10k_tlv_msg_test_create");
+
+ fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST);
+
+ fm10k_tlv_msg_test_generate_data(msg, attr_flags);
+
+ /* check for nested attributes */
+ attr_flags >>= FM10K_TEST_MSG_NESTED;
+
+ if (attr_flags) {
+ nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED);
+
+ fm10k_tlv_msg_test_generate_data(nest, attr_flags);
+
+ fm10k_tlv_attr_nest_stop(msg);
+ }
+}
+
+/**
+ * fm10k_tlv_msg_test - Validate all results on test message receive
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to attributes in the message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function does a check to verify all attributes match what the test
+ * message placed in the message buffer. It is the default handler
+ * for TLV test messages.
+ **/
+s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u32 *nest_results[FM10K_TLV_RESULTS_MAX];
+ unsigned char result_str[80];
+ unsigned char result_mac[ETH_ALEN];
+ s32 err = FM10K_SUCCESS;
+ __le32 result_le[2];
+ u16 result_vlan;
+ u64 result_u64;
+ u32 result_u32;
+ u16 result_u16;
+ u8 result_u8;
+ s64 result_s64;
+ s32 result_s32;
+ s16 result_s16;
+ s8 result_s8;
+ u32 reply[3];
+
+ DEBUGFUNC("fm10k_tlv_msg_test");
+
+ /* retrieve results of a previous test */
+ if (!!results[FM10K_TEST_MSG_RESULT])
+ return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT],
+ &mbx->test_result);
+
+parse_nested:
+ if (!!results[FM10K_TEST_MSG_STRING]) {
+ err = fm10k_tlv_attr_get_null_string(
+ results[FM10K_TEST_MSG_STRING],
+ result_str);
+ if (!err && memcmp(test_str, result_str, sizeof(test_str)))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_MAC_ADDR]) {
+ err = fm10k_tlv_attr_get_mac_vlan(
+ results[FM10K_TEST_MSG_MAC_ADDR],
+ result_mac, &result_vlan);
+ if (!err && memcmp(test_mac, result_mac, ETH_ALEN))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (!err && test_vlan != result_vlan)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U8]) {
+ err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8],
+ &result_u8);
+ if (!err && test_u8 != result_u8)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U16]) {
+ err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16],
+ &result_u16);
+ if (!err && test_u16 != result_u16)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U32]) {
+ err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32],
+ &result_u32);
+ if (!err && test_u32 != result_u32)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U64]) {
+ err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64],
+ &result_u64);
+ if (!err && test_u64 != result_u64)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S8]) {
+ err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8],
+ &result_s8);
+ if (!err && test_s8 != result_s8)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S16]) {
+ err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16],
+ &result_s16);
+ if (!err && test_s16 != result_s16)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S32]) {
+ err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32],
+ &result_s32);
+ if (!err && test_s32 != result_s32)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S64]) {
+ err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64],
+ &result_s64);
+ if (!err && test_s64 != result_s64)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_LE_STRUCT]) {
+ err = fm10k_tlv_attr_get_le_struct(
+ results[FM10K_TEST_MSG_LE_STRUCT],
+ result_le,
+ sizeof(result_le));
+ if (!err && memcmp(test_le, result_le, sizeof(test_le)))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+
+ if (!!results[FM10K_TEST_MSG_NESTED]) {
+ /* clear any pointers */
+ memset(nest_results, 0, sizeof(nest_results));
+
+ /* parse the nested attributes into the nest results list */
+ err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED],
+ nest_results,
+ fm10k_tlv_msg_test_attr);
+ if (err)
+ goto report_result;
+
+ /* loop back through to the start */
+ results = nest_results;
+ goto parse_nested;
+ }
+
+report_result:
+ /* generate reply with test result */
+ fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST);
+ fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, reply);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h
new file mode 100644
index 000000000..af2e4c76a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_TLV_H_
+#define _FM10K_TLV_H_
+
+/* forward declaration */
+struct fm10k_msg_data;
+
+#include "fm10k_type.h"
+
+/* Message / Argument header format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length | Flags | Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The message header format described here is used for messages that are
+ * passed between the PF and the VF. To allow for messages larger then
+ * mailbox size we will provide a message with the above header and it
+ * will be segmented and transported to the mailbox to the other side where
+ * it is reassembled. It contains the following fields:
+ * Length: Length of the message in bytes excluding the message header
+ * Flags: TBD
+ * Type/ID: These will be the message/argument types we pass
+ */
+/* message data header */
+#define FM10K_TLV_ID_SHIFT 0
+#define FM10K_TLV_ID_SIZE 16
+#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1)
+#define FM10K_TLV_FLAGS_SHIFT 16
+#define FM10K_TLV_FLAGS_MSG 0x1
+#define FM10K_TLV_FLAGS_SIZE 4
+#define FM10K_TLV_LEN_SHIFT 20
+#define FM10K_TLV_LEN_SIZE 12
+
+#define FM10K_TLV_HDR_LEN 4ul
+#define FM10K_TLV_LEN_ALIGN_MASK \
+ ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT)
+#define FM10K_TLV_LEN_ALIGN(tlv) \
+ (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK)
+#define FM10K_TLV_DWORD_LEN(tlv) \
+ ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1)
+
+#define FM10K_TLV_RESULTS_MAX 32
+
+enum fm10k_tlv_type {
+ FM10K_TLV_NULL_STRING,
+ FM10K_TLV_MAC_ADDR,
+ FM10K_TLV_BOOL,
+ FM10K_TLV_UNSIGNED,
+ FM10K_TLV_SIGNED,
+ FM10K_TLV_LE_STRUCT,
+ FM10K_TLV_NESTED,
+ FM10K_TLV_MAX_TYPE
+};
+
+#define FM10K_TLV_ERROR (~0u)
+
+struct fm10k_tlv_attr {
+ unsigned int id;
+ enum fm10k_tlv_type type;
+ u16 len;
+};
+
+#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len }
+#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 }
+#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 }
+#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 }
+#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 }
+#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 }
+#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 }
+#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 }
+#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 }
+#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
+#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
+#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+
+struct fm10k_msg_data {
+ unsigned int id;
+ const struct fm10k_tlv_attr *attr;
+ s32 (*func)(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+};
+
+#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func }
+
+s32 fm10k_tlv_msg_init(u32 *, u16);
+s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16);
+s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *);
+s32 fm10k_tlv_attr_put_bool(u32 *, u16);
+s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32);
+#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
+#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
+#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
+#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
+#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
+#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
+#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
+#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
+s32 fm10k_tlv_attr_get_value(u32 *, void *, u32);
+#define fm10k_tlv_attr_get_u8(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8))
+#define fm10k_tlv_attr_get_u16(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16))
+#define fm10k_tlv_attr_get_u32(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32))
+#define fm10k_tlv_attr_get_u64(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64))
+#define fm10k_tlv_attr_get_s8(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8))
+#define fm10k_tlv_attr_get_s16(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16))
+#define fm10k_tlv_attr_get_s32(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32))
+#define fm10k_tlv_attr_get_s64(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64))
+s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32);
+s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32);
+s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *);
+
+#define FM10K_TLV_MSG_ID_TEST 0
+
+enum fm10k_tlv_test_attr_id {
+ FM10K_TEST_MSG_UNSET,
+ FM10K_TEST_MSG_STRING,
+ FM10K_TEST_MSG_MAC_ADDR,
+ FM10K_TEST_MSG_U8,
+ FM10K_TEST_MSG_U16,
+ FM10K_TEST_MSG_U32,
+ FM10K_TEST_MSG_U64,
+ FM10K_TEST_MSG_S8,
+ FM10K_TEST_MSG_S16,
+ FM10K_TEST_MSG_S32,
+ FM10K_TEST_MSG_S64,
+ FM10K_TEST_MSG_LE_STRUCT,
+ FM10K_TEST_MSG_NESTED,
+ FM10K_TEST_MSG_RESULT,
+ FM10K_TEST_MSG_MAX
+};
+
+extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[];
+void fm10k_tlv_msg_test_create(u32 *, u32);
+s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+
+#define FM10K_TLV_MSG_TEST_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func)
+#define FM10K_TLV_MSG_ERROR_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func)
+#endif /* _FM10K_MSG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h
new file mode 100644
index 000000000..84781ba9b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h
@@ -0,0 +1,854 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_TYPE_H_
+#define _FM10K_TYPE_H_
+
+/* forward declaration */
+struct fm10k_hw;
+
+#include "fm10k_osdep.h"
+#include "fm10k_mbx.h"
+
+#define FM10K_INTEL_VENDOR_ID 0x8086
+#define FM10K_DEV_ID_PF 0x15A4
+#define FM10K_DEV_ID_VF 0x15A5
+#ifdef BOULDER_RAPIDS_HW
+#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0
+#endif /* BOULDER_RAPIDS_HW */
+#ifdef ATWOOD_CHANNEL_HW
+#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5
+#endif /* ATWOOD_CHANNEL_HW */
+
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif
+#endif /* LINUX_MACROS */
+
+#define FM10K_MAX_QUEUES 256
+#define FM10K_MAX_QUEUES_PF 128
+#define FM10K_MAX_QUEUES_POOL 16
+
+#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull
+#define FM10K_STAT_VALID 0x80000000
+
+/* PCI Bus Info */
+#define FM10K_PCIE_LINK_CAP 0x7C
+#define FM10K_PCIE_LINK_STATUS 0x82
+#define FM10K_PCIE_LINK_WIDTH 0x3F0
+#define FM10K_PCIE_LINK_WIDTH_1 0x10
+#define FM10K_PCIE_LINK_WIDTH_2 0x20
+#define FM10K_PCIE_LINK_WIDTH_4 0x40
+#define FM10K_PCIE_LINK_WIDTH_8 0x80
+#define FM10K_PCIE_LINK_SPEED 0xF
+#define FM10K_PCIE_LINK_SPEED_2500 0x1
+#define FM10K_PCIE_LINK_SPEED_5000 0x2
+#define FM10K_PCIE_LINK_SPEED_8000 0x3
+
+/* PCIe payload size */
+#define FM10K_PCIE_DEV_CAP 0x74
+#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02
+#define FM10K_PCIE_DEV_CTRL 0x78
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40
+
+/* PCIe MSI-X Capability info */
+#define FM10K_PCI_MSIX_MSG_CTRL 0xB2
+#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF
+#define FM10K_MAX_MSIX_VECTORS 256
+#define FM10K_MAX_VECTORS_PF 256
+#define FM10K_MAX_VECTORS_POOL 32
+
+/* PCIe SR-IOV Info */
+#define FM10K_PCIE_SRIOV_CTRL 0x190
+#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
+
+#define FM10K_SUCCESS 0
+#define FM10K_ERR_DEVICE_NOT_SUPPORTED -1
+#define FM10K_ERR_PARAM -2
+#define FM10K_ERR_NO_RESOURCES -3
+#define FM10K_ERR_REQUESTS_PENDING -4
+#define FM10K_ERR_RESET_REQUESTED -5
+#define FM10K_ERR_DMA_PENDING -6
+#define FM10K_ERR_RESET_FAILED -7
+#define FM10K_ERR_INVALID_MAC_ADDR -8
+#define FM10K_ERR_INVALID_VALUE -9
+#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p)
+#define UNREFERENCED_2PARAMETER(_p, _q) do { (_p); (_q); } while (0)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { (_p); (_q); (_r); } while (0)
+
+/* Start of PF registers */
+#define FM10K_CTRL 0x0000
+#define FM10K_CTRL_BAR4_ALLOWED 0x00000004
+
+#define FM10K_CTRL_EXT 0x0001
+#define FM10K_GCR 0x0003
+#define FM10K_GCR_EXT 0x0005
+
+/* Interrupt control registers */
+#define FM10K_EICR 0x0006
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
+#define FM10K_EICR_FAULT_MASK 0x0000003F
+#define FM10K_EICR_MAILBOX 0x00000040
+#define FM10K_EICR_SWITCHREADY 0x00000080
+#define FM10K_EICR_SWITCHNOTREADY 0x00000100
+#define FM10K_EICR_SWITCHINTERRUPT 0x00000200
+#define FM10K_EICR_SRAMERROR 0x00000400
+#define FM10K_EICR_VFLR 0x00000800
+#define FM10K_EICR_MAXHOLDTIME 0x00001000
+#define FM10K_EIMR 0x0007
+#define FM10K_EIMR_PCA_FAULT 0x00000001
+#define FM10K_EIMR_THI_FAULT 0x00000010
+#define FM10K_EIMR_FUM_FAULT 0x00000400
+#define FM10K_EIMR_MAILBOX 0x00001000
+#define FM10K_EIMR_SWITCHREADY 0x00004000
+#define FM10K_EIMR_SWITCHNOTREADY 0x00010000
+#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000
+#define FM10K_EIMR_SRAMERROR 0x00100000
+#define FM10K_EIMR_VFLR 0x00400000
+#define FM10K_EIMR_MAXHOLDTIME 0x01000000
+#define FM10K_EIMR_ALL 0x55555555
+#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0)
+#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1)
+#define FM10K_FAULT_ADDR_LO 0x0
+#define FM10K_FAULT_ADDR_HI 0x1
+#define FM10K_FAULT_SPECINFO 0x2
+#define FM10K_FAULT_FUNC 0x3
+#define FM10K_FAULT_SIZE 0x4
+#define FM10K_FAULT_FUNC_VALID 0x00008000
+#define FM10K_FAULT_FUNC_PF 0x00004000
+#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00
+#define FM10K_FAULT_FUNC_VF_SHIFT 8
+#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF
+
+#define FM10K_PCA_FAULT 0x0008
+#define FM10K_THI_FAULT 0x0010
+#define FM10K_FUM_FAULT 0x001C
+
+/* Rx queue timeout indicator */
+#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020)
+
+/* Switch Manager info */
+#define FM10K_SM_AREA(_n) ((_n) + 0x0028)
+
+/* GLORT mapping registers */
+#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030)
+#define FM10K_DGLORT_COUNT 8
+#define FM10K_DGLORTMAP_MASK_SHIFT 16
+#define FM10K_DGLORTMAP_ANY 0x00000000
+#define FM10K_DGLORTMAP_NONE 0x0000FFFF
+#define FM10K_DGLORTMAP_ZERO 0xFFFF0000
+#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038)
+#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4
+#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7
+#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14
+#define FM10K_DGLORTDEC_QBASE_SHIFT 16
+#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24
+#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
+#define FM10K_TUNNEL_CFG 0x0040
+#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
+#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
+#define FM10K_SWPRI_MAX 16
+#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
+#define FM10K_RSSRK_SIZE 10
+#define FM10K_RSSRK_ENTRIES_PER_REG 4
+#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000)
+#define FM10K_RETA_SIZE 32
+#define FM10K_RETA_ENTRIES_PER_REG 4
+#define FM10K_MAX_RSS_INDICES 128
+
+/* Rate limiting registers */
+#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000)
+#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF
+#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040)
+#define FM10K_TC_MAXCREDIT_64K 0x00010000
+#define FM10K_TC_RATE(_n) ((_n) + 0x2080)
+#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF
+#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000
+#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000
+#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000
+
+/* DMA control registers */
+#define FM10K_DMA_CTRL 0x20C3
+#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001
+#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008
+#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010
+#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080
+#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
+#define FM10K_DMA_CTRL_MINMSS_64 0x00008000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000
+#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000
+#define FM10K_DMA_CTRL_32_DESC 0x00000000
+
+#define FM10K_DMA_CTRL2 0x20C4
+#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000
+
+/* TSO flags configuration
+ * First packet contains all flags except for fin and psh
+ * Middle packet contains only urg and ack
+ * Last packet contains urg, ack, fin, and psh
+ */
+#define FM10K_TSO_FLAGS_LOW 0x00300FF6
+#define FM10K_TSO_FLAGS_HI 0x00000039
+#define FM10K_DTXTCPFLGL 0x20C5
+#define FM10K_DTXTCPFLGH 0x20C6
+
+#define FM10K_TPH_CTRL 0x20C7
+#define FM10K_MRQC(_n) ((_n) + 0x2100)
+#define FM10K_MRQC_TCP_IPV4 0x00000001
+#define FM10K_MRQC_IPV4 0x00000002
+#define FM10K_MRQC_IPV6 0x00000010
+#define FM10K_MRQC_TCP_IPV6 0x00000020
+#define FM10K_MRQC_UDP_IPV4 0x00000040
+#define FM10K_MRQC_UDP_IPV6 0x00000080
+
+#define FM10K_TQMAP(_n) ((_n) + 0x2800)
+#define FM10K_TQMAP_TABLE_SIZE 2048
+#define FM10K_RQMAP(_n) ((_n) + 0x3000)
+
+/* Hardware Statistics */
+#define FM10K_STATS_TIMEOUT 0x3800
+#define FM10K_STATS_UR 0x3801
+#define FM10K_STATS_CA 0x3802
+#define FM10K_STATS_UM 0x3803
+#define FM10K_STATS_XEC 0x3804
+#define FM10K_STATS_VLAN_DROP 0x3805
+#define FM10K_STATS_LOOPBACK_DROP 0x3806
+#define FM10K_STATS_NODESC_DROP 0x3807
+
+/* Timesync registers */
+#define FM10K_SYSTIME 0x3814
+#define FM10K_SYSTIME_CFG 0x3818
+#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
+
+/* PCIe state registers */
+#define FM10K_PHYADDR 0x381C
+
+/* Rx ring registers */
+#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000)
+#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001)
+#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002)
+#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003)
+#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020
+#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200
+#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000
+#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000
+#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004)
+#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005)
+#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006)
+#define FM10K_RXQCTL_ENABLE 0x00000001
+#define FM10K_RXQCTL_PF 0x000000FC
+#define FM10K_RXQCTL_VF_SHIFT 2
+#define FM10K_RXQCTL_VF 0x00000100
+#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF)
+#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007)
+#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001
+#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200
+#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008)
+#define FM10K_RXINT_TIMER_SHIFT 8
+#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009)
+#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */
+#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000
+#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000
+
+/* Rx Statistics */
+#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A)
+#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B)
+#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C)
+#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D)
+
+/* Rx GLORT register */
+#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E)
+
+/* Tx ring registers */
+#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000)
+#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001)
+#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002)
+/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR)
+ * scale which is based on the PCIe speed but the speed information in the PCI
+ * configuration space may not be accurate. The PF already knows the ITR scale
+ * but there is no defined method to pass that information from the PF to the
+ * VF. This is accomplished during VF initialization by temporarily co-opting
+ * the yet-to-be-used TDLEN register to have the PF store the ITR shift for
+ * the VF to retrieve before the VF needs to use the TDLEN register for its
+ * intended purpose, i.e. before the Tx resources are allocated.
+ */
+#define FM10K_TDLEN_ITR_SCALE_SHIFT 9
+#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00
+#define FM10K_TDLEN_ITR_SCALE_GEN1 2
+#define FM10K_TDLEN_ITR_SCALE_GEN2 1
+#define FM10K_TDLEN_ITR_SCALE_GEN3 0
+#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003)
+#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020
+#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200
+#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800
+#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000
+#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004)
+#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005)
+#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006)
+#define FM10K_TXDCTL_ENABLE 0x00004000
+#define FM10K_TXDCTL_MAX_TIME_SHIFT 16
+#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007)
+#define FM10K_TXQCTL_PF 0x0000003F
+#define FM10K_TXQCTL_VF 0x00000040
+#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF)
+#define FM10K_TXQCTL_PC_SHIFT 7
+#define FM10K_TXQCTL_PC_MASK 0x00000380
+#define FM10K_TXQCTL_TC_SHIFT 10
+#define FM10K_TXQCTL_VID_SHIFT 16
+#define FM10K_TXQCTL_VID_MASK 0x0FFF0000
+#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000
+#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008)
+#define FM10K_TXINT_TIMER_SHIFT 8
+
+/* Tx Statistics */
+#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009)
+#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A)
+#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B)
+
+/* Tx Push registers */
+#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C)
+#define FM10K_TQDLOC_BASE_32_DESC 0x08
+#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000
+
+/* Tx GLORT registers */
+#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D)
+#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E)
+#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001
+
+/* Interrupt moderation and control registers */
+#define FM10K_INT_MAP(_n) ((_n) + 0x10080)
+#define FM10K_INT_MAP_TIMER0 0x00000000
+#define FM10K_INT_MAP_TIMER1 0x00000100
+#define FM10K_INT_MAP_IMMEDIATE 0x00000200
+#define FM10K_INT_MAP_DISABLE 0x00000300
+#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003)
+#define FM10K_INT_CTRL 0x12000
+#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400
+#define FM10K_ITR(_n) ((_n) + 0x12400)
+#define FM10K_ITR_INTERVAL1_SHIFT 12
+#define FM10K_ITR_PENDING2 0x10000000
+#define FM10K_ITR_AUTOMASK 0x20000000
+#define FM10K_ITR_MASK_SET 0x40000000
+#define FM10K_ITR_MASK_CLEAR 0x80000000
+#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800)
+#define FM10K_ITR_REG_COUNT 768
+#define FM10K_ITR_REG_COUNT_PF 256
+
+/* Switch manager interrupt registers */
+#define FM10K_IP 0x13000
+#define FM10K_IP_NOTINRESET 0x00000100
+#define FM10K_SRAM_IP 0x13003
+
+/* VLAN registers */
+#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000)
+#define FM10K_VLAN_TABLE_SIZE 128
+
+/* VLAN specific message offsets */
+#define FM10K_VLAN_TABLE_VID_MAX 4096
+#define FM10K_VLAN_TABLE_VSI_MAX 64
+#define FM10K_VLAN_LENGTH_SHIFT 16
+#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
+#define FM10K_VLAN_ALL \
+ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
+
+/* VF FLR event notification registers */
+#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844)
+#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846)
+
+/* Defines for size of uncacheable and write-combining memories */
+#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */
+#define FM10K_WC_ADDR_START 0x100000 /* start of Tx Desc Cache */
+#define FM10K_DBI_ADDR_START 0x200000 /* start of debug registers */
+#define FM10K_UC_ADDR_SIZE (FM10K_WC_ADDR_START - FM10K_UC_ADDR_START)
+#define FM10K_WC_ADDR_SIZE (FM10K_DBI_ADDR_START - FM10K_WC_ADDR_START)
+
+/* Define timeouts for resets and disables */
+#define FM10K_QUEUE_DISABLE_TIMEOUT 100
+#define FM10K_RESET_TIMEOUT 150
+
+/* Maximum supported combined inner and outer header length for encapsulation */
+#define FM10K_TUNNEL_HEADER_LENGTH 184
+
+/* VF registers */
+#define FM10K_VFCTRL 0x00000
+#define FM10K_VFCTRL_RST 0x00000008
+#define FM10K_VFINT_MAP 0x00030
+#define FM10K_VFSYSTIME 0x00040
+#define FM10K_VFITR(_n) ((_n) + 0x00060)
+
+/* Registers contained in BAR 4 for Switch management */
+#define FM10K_SW_SYSTIME_ADJUST 0x0224D
+#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
+#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
+#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif /* ETH_ALEN */
+
+#ifndef IS_ZERO_ETHER_ADDR
+/* make certain address is not 0 */
+#define IS_ZERO_ETHER_ADDR(addr) \
+(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
+#endif
+
+#ifndef IS_MULTICAST_ETHER_ADDR
+#define IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#endif
+
+#ifndef IS_VALID_ETHER_ADDR
+/* make certain address is not multicast or 0 */
+#define IS_VALID_ETHER_ADDR(addr) \
+(!IS_MULTICAST_ETHER_ADDR(addr) && !IS_ZERO_ETHER_ADDR(addr))
+#endif
+
+enum fm10k_int_source {
+ fm10k_int_mailbox = 0,
+ fm10k_int_pcie_fault = 1,
+ fm10k_int_switch_up_down = 2,
+ fm10k_int_switch_event = 3,
+ fm10k_int_sram = 4,
+ fm10k_int_vflr = 5,
+ fm10k_int_max_hold_time = 6,
+ fm10k_int_sources_max_pf
+};
+
+/* PCIe bus speeds */
+enum fm10k_bus_speed {
+ fm10k_bus_speed_unknown = 0,
+ fm10k_bus_speed_2500 = 2500,
+ fm10k_bus_speed_5000 = 5000,
+ fm10k_bus_speed_8000 = 8000,
+ fm10k_bus_speed_reserved
+};
+
+/* PCIe bus widths */
+enum fm10k_bus_width {
+ fm10k_bus_width_unknown = 0,
+ fm10k_bus_width_pcie_x1 = 1,
+ fm10k_bus_width_pcie_x2 = 2,
+ fm10k_bus_width_pcie_x4 = 4,
+ fm10k_bus_width_pcie_x8 = 8,
+ fm10k_bus_width_reserved
+};
+
+/* PCIe payload sizes */
+enum fm10k_bus_payload {
+ fm10k_bus_payload_unknown = 0,
+ fm10k_bus_payload_128 = 1,
+ fm10k_bus_payload_256 = 2,
+ fm10k_bus_payload_512 = 3,
+ fm10k_bus_payload_reserved
+};
+
+/* Bus parameters */
+struct fm10k_bus_info {
+ enum fm10k_bus_speed speed;
+ enum fm10k_bus_width width;
+ enum fm10k_bus_payload payload;
+};
+
+/* Statistics related declarations */
+struct fm10k_hw_stat {
+ u64 count;
+ u32 base_l;
+ u32 base_h;
+};
+
+struct fm10k_hw_stats_q {
+ struct fm10k_hw_stat tx_bytes;
+ struct fm10k_hw_stat tx_packets;
+#define tx_stats_idx tx_packets.base_h
+ struct fm10k_hw_stat rx_bytes;
+ struct fm10k_hw_stat rx_packets;
+#define rx_stats_idx rx_packets.base_h
+ struct fm10k_hw_stat rx_drops;
+};
+
+struct fm10k_hw_stats {
+ struct fm10k_hw_stat timeout;
+#define stats_idx timeout.base_h
+ struct fm10k_hw_stat ur;
+ struct fm10k_hw_stat ca;
+ struct fm10k_hw_stat um;
+ struct fm10k_hw_stat xec;
+ struct fm10k_hw_stat vlan_drop;
+ struct fm10k_hw_stat loopback_drop;
+ struct fm10k_hw_stat nodesc_drop;
+ struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF];
+};
+
+/* Establish DGLORT feature priority */
+enum fm10k_dglortdec_idx {
+ fm10k_dglort_default = 0,
+ fm10k_dglort_vf_rsvd0 = 1,
+ fm10k_dglort_vf_rss = 2,
+ fm10k_dglort_pf_rsvd0 = 3,
+ fm10k_dglort_pf_queue = 4,
+ fm10k_dglort_pf_vsi = 5,
+ fm10k_dglort_pf_rsvd1 = 6,
+ fm10k_dglort_pf_rss = 7
+};
+
+struct fm10k_dglort_cfg {
+ u16 glort; /* GLORT base */
+ u16 queue_b; /* Base value for queue */
+ u8 vsi_b; /* Base value for VSI */
+ u8 idx; /* index of DGLORTDEC entry */
+ u8 rss_l; /* RSS indices */
+ u8 pc_l; /* Priority Class indices */
+ u8 vsi_l; /* Number of bits from GLORT used to determine VSI */
+ u8 queue_l; /* Number of bits from GLORT used to determine queue */
+ u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */
+ u8 inner_rss; /* Boolean value if inner header is used for RSS */
+};
+
+enum fm10k_pca_fault {
+ PCA_NO_FAULT,
+ PCA_UNMAPPED_ADDR,
+ PCA_BAD_QACCESS_PF,
+ PCA_BAD_QACCESS_VF,
+ PCA_MALICIOUS_REQ,
+ PCA_POISONED_TLP,
+ PCA_TLP_ABORT,
+ __PCA_MAX
+};
+
+enum fm10k_thi_fault {
+ THI_NO_FAULT,
+ THI_MAL_DIS_Q_FAULT,
+ __THI_MAX
+};
+
+enum fm10k_fum_fault {
+ FUM_NO_FAULT,
+ FUM_UNMAPPED_ADDR,
+ FUM_POISONED_TLP,
+ FUM_BAD_VF_QACCESS,
+ FUM_ADD_DECODE_ERR,
+ FUM_RO_ERROR,
+ FUM_QPRC_CRC_ERROR,
+ FUM_CSR_TIMEOUT,
+ FUM_INVALID_TYPE,
+ FUM_INVALID_LENGTH,
+ FUM_INVALID_BE,
+ FUM_INVALID_ALIGN,
+ __FUM_MAX
+};
+
+struct fm10k_fault {
+ u64 address; /* Address at the time fault was detected */
+ u32 specinfo; /* Extra info on this fault (fault dependent) */
+ u8 type; /* Fault value dependent on subunit */
+ u8 func; /* Function number of the fault */
+};
+
+struct fm10k_mac_ops {
+ /* basic bring-up and tear-down */
+ s32 (*reset_hw)(struct fm10k_hw *);
+ s32 (*init_hw)(struct fm10k_hw *);
+ s32 (*start_hw)(struct fm10k_hw *);
+ s32 (*stop_hw)(struct fm10k_hw *);
+ s32 (*get_bus_info)(struct fm10k_hw *);
+ s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ s32 (*request_lport_map)(struct fm10k_hw *);
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+ bool (*is_slot_appropriate)(struct fm10k_hw *);
+#endif
+ s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
+ s32 (*read_mac_addr)(struct fm10k_hw *);
+ s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
+ u16, bool, u8);
+ s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool);
+ s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8);
+ void (*update_int_moderator)(struct fm10k_hw *);
+ s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool);
+ void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
+ void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
+ s32 (*configure_dglort_map)(struct fm10k_hw *,
+ struct fm10k_dglort_cfg *);
+ void (*set_dma_mask)(struct fm10k_hw *, u64);
+ s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
+ s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
+ s32 (*notify_offset)(struct fm10k_hw *, u64 offset);
+ u64 (*read_systime)(struct fm10k_hw *);
+};
+
+enum fm10k_mac_type {
+ fm10k_mac_unknown = 0,
+ fm10k_mac_pf,
+ fm10k_mac_vf,
+ fm10k_num_macs
+};
+
+struct fm10k_mac_info {
+ struct fm10k_mac_ops ops;
+ enum fm10k_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u16 default_vid;
+ u16 max_msix_vectors;
+ u16 max_queues;
+ bool vlan_override;
+ bool get_host_state;
+ bool tx_ready;
+ u32 dglort_map;
+ u8 itr_scale;
+ u64 reset_while_pending;
+};
+
+struct fm10k_swapi_table_info {
+ u32 used;
+ u32 avail;
+};
+
+struct fm10k_swapi_info {
+ u32 status;
+ struct fm10k_swapi_table_info mac;
+ struct fm10k_swapi_table_info nexthop;
+ struct fm10k_swapi_table_info ffu;
+};
+
+enum fm10k_xcast_modes {
+ FM10K_XCAST_MODE_ALLMULTI = 0,
+ FM10K_XCAST_MODE_MULTI = 1,
+ FM10K_XCAST_MODE_PROMISC = 2,
+ FM10K_XCAST_MODE_NONE = 3,
+ FM10K_XCAST_MODE_DISABLE = 4
+};
+
+enum fm10k_timestamp_modes {
+ FM10K_TIMESTAMP_MODE_NONE = 0,
+ FM10K_TIMESTAMP_MODE_PEP_TO_PEP = 1,
+ FM10K_TIMESTAMP_MODE_PEP_TO_ANY = 2,
+};
+
+#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */
+#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */
+
+struct fm10k_vf_info {
+ /* mbx must be first field in struct unless all default IOV message
+ * handlers are redone as the assumption is that vf_info starts
+ * at the same offset as the mailbox
+ */
+ struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ int rate; /* Tx BW cap as defined by OS */
+ u16 glort; /* resource tag for this VF */
+ u16 sw_vid; /* Switch API assigned VLAN */
+ u16 pf_vid; /* PF assigned Default VLAN */
+ u8 mac[ETH_ALEN]; /* PF Default MAC address */
+ u8 vsi; /* VSI identifier */
+ u8 vf_idx; /* which VF this is */
+ u8 vf_flags; /* flags indicating what modes
+ * are supported for the port
+ */
+#ifndef NO_FM10K_VF_TRUSTED_MODE
+ bool trusted; /* VF trust mode */
+#endif
+};
+
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
+#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
+#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
+#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
+#define FM10K_VF_FLAG_SET_MODE_NONE \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_MULTI_ENABLED \
+ (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC))
+
+struct fm10k_iov_ops {
+ /* IOV related bring-up and tear-down */
+ s32 (*assign_resources)(struct fm10k_hw *, u16, u16);
+ s32 (*configure_tc)(struct fm10k_hw *, u16, int);
+ s32 (*assign_int_moderator)(struct fm10k_hw *, u16);
+ s32 (*assign_default_mac_vlan)(struct fm10k_hw *,
+ struct fm10k_vf_info *);
+ s32 (*reset_resources)(struct fm10k_hw *,
+ struct fm10k_vf_info *);
+ s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
+ void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
+ void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
+ void (*notify_offset)(struct fm10k_hw *, struct fm10k_vf_info*, u64);
+};
+
+struct fm10k_iov_info {
+ struct fm10k_iov_ops ops;
+ u16 total_vfs;
+ u16 num_vfs;
+ u16 num_pools;
+};
+
+struct fm10k_hw {
+ u32 *hw_addr;
+ u32 *sw_addr;
+ void *back;
+ struct fm10k_mac_info mac;
+ struct fm10k_bus_info bus;
+ struct fm10k_bus_info bus_caps;
+ struct fm10k_iov_info iov;
+ struct fm10k_mbx_info mbx;
+ struct fm10k_swapi_info swapi;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u32 flags;
+#define FM10K_HW_FLAG_CLOCK_OWNER BIT(0)
+};
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Transmit Descriptor */
+struct fm10k_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 buflen; /* Length of data to be DMAed */
+ __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */
+ __le16 mss; /* MSS for segmentation offload */
+ u8 hdrlen; /* Header size for segmentation offload */
+ u8 flags; /* Status and offload request flags */
+};
+
+/* Transmit Descriptor Cache Structure */
+struct fm10k_tx_desc_cache {
+ struct fm10k_tx_desc tx_desc[256];
+};
+
+#define FM10K_TXD_FLAG_INT 0x01
+#define FM10K_TXD_FLAG_TIME 0x02
+#define FM10K_TXD_FLAG_CSUM 0x04
+#define FM10K_TXD_FLAG_FTAG 0x10
+#define FM10K_TXD_FLAG_RS 0x20
+#define FM10K_TXD_FLAG_LAST 0x40
+#define FM10K_TXD_FLAG_DONE 0x80
+
+
+/* These macros are meant to enable optimal placement of the RS and INT
+ * bits. It will point us to the last descriptor in the cache for either the
+ * start of the packet, or the end of the packet. If the index is actually
+ * at the start of the FIFO it will point to the offset for the last index
+ * in the FIFO to prevent an unnecessary write.
+ */
+#define FM10K_TXD_WB_FIFO_SIZE 4
+
+/* Receive Descriptor - 32B */
+union fm10k_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ __le64 reserved; /* Empty space, RSS hash */
+ __le64 timestamp;
+ } q; /* Read, Writeback, 64b quad-words */
+ struct {
+ __le32 data; /* RSS and header data */
+ __le32 rss; /* RSS Hash */
+ __le32 staterr;
+ __le32 vlan_len;
+ __le32 glort; /* sglort/dglort */
+ } d; /* Writeback, 32b double-words */
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen, xC */
+ __le16 rss_lower;
+ __le16 rss_upper;
+ __le16 status; /* status/error */
+ __le16 csum_err; /* checksum or extended error value */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ __le16 dglort;
+ __le16 sglort;
+ } w; /* Writeback, 16b words */
+};
+
+#define FM10K_RXD_RSSTYPE_MASK 0x000F
+enum fm10k_rdesc_rss_type {
+ FM10K_RSSTYPE_NONE = 0x0,
+ FM10K_RSSTYPE_IPV4_TCP = 0x1,
+ FM10K_RSSTYPE_IPV4 = 0x2,
+ FM10K_RSSTYPE_IPV6_TCP = 0x3,
+ /* Reserved 0x4 */
+ FM10K_RSSTYPE_IPV6 = 0x5,
+ /* Reserved 0x6 */
+ FM10K_RSSTYPE_IPV4_UDP = 0x7,
+ FM10K_RSSTYPE_IPV6_UDP = 0x8
+ /* Reserved 0x9 - 0xF */
+};
+
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+enum fm10k_rdesc_pkt_type {
+ /* L3 type */
+ FM10K_PKTTYPE_OTHER = 0x00,
+ FM10K_PKTTYPE_IPV4 = 0x01,
+ FM10K_PKTTYPE_IPV4_EX = 0x02,
+ FM10K_PKTTYPE_IPV6 = 0x03,
+ FM10K_PKTTYPE_IPV6_EX = 0x04,
+
+ /* L4 type */
+ FM10K_PKTTYPE_TCP = 0x08,
+ FM10K_PKTTYPE_UDP = 0x10,
+ FM10K_PKTTYPE_GRE = 0x18,
+ FM10K_PKTTYPE_VXLAN = 0x20,
+ FM10K_PKTTYPE_NVGRE = 0x28,
+ FM10K_PKTTYPE_GENEVE = 0x30
+};
+
+#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006
+enum fm10k_rxdesc_xc {
+ FM10K_XC_UNICAST = 0x0,
+ FM10K_XC_MULTICAST = 0x4,
+ FM10K_XC_BROADCAST = 0x6
+};
+
+
+#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */
+#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
+#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */
+#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */
+#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */
+#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */
+#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */
+#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */
+#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */
+
+#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */
+#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */
+#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */
+#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */
+#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */
+
+
+struct fm10k_ftag {
+ __be16 swpri_type_user;
+ __be16 vlan;
+ __be16 sglort;
+ __be16 dglort;
+};
+
+#endif /* _FM10K_TYPE_H */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c
new file mode 100644
index 000000000..6809c3cfd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#include "fm10k_vf.h"
+
+/**
+ * fm10k_stop_hw_vf - Stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
+{
+ u8 *perm_addr = hw->mac.perm_addr;
+ u32 bal = 0, bah = 0, tdlen;
+ s32 err;
+ u16 i;
+
+ DEBUGFUNC("fm10k_stop_hw_vf");
+
+ /* we need to disable the queues before taking further steps */
+ err = fm10k_stop_hw_generic(hw);
+ if (err && err != FM10K_ERR_REQUESTS_PENDING)
+ return err;
+
+ /* If permanent address is set then we need to restore it */
+ if (IS_VALID_ETHER_ADDR(perm_addr)) {
+ bal = (((u32)perm_addr[3]) << 24) |
+ (((u32)perm_addr[4]) << 16) |
+ (((u32)perm_addr[5]) << 8);
+ bah = (((u32)0xFF) << 24) |
+ (((u32)perm_addr[0]) << 16) |
+ (((u32)perm_addr[1]) << 8) |
+ ((u32)perm_addr[2]);
+ }
+
+ /* restore default itr_scale for next VF initialization */
+ tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT;
+
+ /* The queues have already been disabled so we just need to
+ * update their base address registers
+ */
+ for (i = 0; i < hw->mac.max_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i), bal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i), bah);
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i), bal);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i), bah);
+ /* Restore ITR scale in software-defined mechanism in TDLEN
+ * for next VF initialization. See definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of
+ * TDLEN here.
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen);
+ }
+
+ return err;
+}
+
+/**
+ * fm10k_reset_hw_vf - VF hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after just being initialized.
+ **/
+STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_reset_hw_vf");
+
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_stop_hw_vf(hw);
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ hw->mac.reset_while_pending++;
+ else if (err)
+ return err;
+
+ /* Inititate VF reset */
+ FM10K_WRITE_REG(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST);
+
+ /* Flush write and allow 100us for reset to complete */
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(FM10K_RESET_TIMEOUT);
+
+ /* Clear reset bit and verify it was cleared */
+ FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0);
+ if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
+ return FM10K_ERR_RESET_FAILED;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_init_hw_vf - VF hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
+{
+ u32 tqdloc, tqdloc0 = ~FM10K_READ_REG(hw, FM10K_TQDLOC(0));
+ s32 err;
+ u16 i;
+
+ DEBUGFUNC("fm10k_init_hw_vf");
+
+ /* verify we have at least 1 queue */
+ if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(0)) ||
+ !~FM10K_READ_REG(hw, FM10K_RXQCTL(0))) {
+ err = FM10K_ERR_NO_RESOURCES;
+ goto reset_max_queues;
+ }
+
+ /* determine how many queues we have */
+ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
+ /* verify the Descriptor cache offsets are increasing */
+ tqdloc = ~FM10K_READ_REG(hw, FM10K_TQDLOC(i));
+ if (!tqdloc || (tqdloc == tqdloc0))
+ break;
+
+ /* check to verify the PF doesn't own any of our queues */
+ if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(i)) ||
+ !~FM10K_READ_REG(hw, FM10K_RXQCTL(i)))
+ break;
+ }
+
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_disable_queues_generic(hw, i);
+ if (err)
+ goto reset_max_queues;
+
+ /* record maximum queue count */
+ hw->mac.max_queues = i;
+
+ /* fetch default VLAN and ITR scale */
+ hw->mac.default_vid = (FM10K_READ_REG(hw, FM10K_TXQCTL(0)) &
+ FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+ /* Read the ITR scale from TDLEN. See the definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is
+ * used here.
+ */
+ hw->mac.itr_scale = (FM10K_READ_REG(hw, FM10K_TDLEN(0)) &
+ FM10K_TDLEN_ITR_SCALE_MASK) >>
+ FM10K_TDLEN_ITR_SCALE_SHIFT;
+
+ return FM10K_SUCCESS;
+
+reset_max_queues:
+ hw->mac.max_queues = 0;
+
+ return err;
+}
+
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+/**
+ * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device. Since the VF has no control over
+ * the "slot" it is in, always indicate that the slot is appropriate.
+ **/
+STATIC bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_is_slot_appropriate_vf");
+
+ return TRUE;
+}
+
+#endif
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
+ FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @vsi: Reserved, should always be 0
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for this VF.
+ **/
+STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[4];
+
+ /* verify the index is not set */
+ if (vsi)
+ return FM10K_ERR_PARAM;
+
+ /* clever trick to verify reserved bits in both vid and length */
+ if ((vid << 16 | vid) >> 28)
+ return FM10K_ERR_PARAM;
+
+ /* encode set bit into the VLAN ID */
+ if (!set)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message
+ * @hw: pointer to the HW structure
+ * @results: Attributes for message
+ * @mbx: unused mailbox data
+ *
+ * This function should determine the MAC address for the VF
+ **/
+s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u8 perm_addr[ETH_ALEN];
+ u16 vid;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_mac_vlan_vf");
+
+ /* record MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(
+ results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC],
+ perm_addr, &vid);
+ if (err)
+ return err;
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * This function should determine the MAC address for the VF
+ **/
+STATIC s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
+{
+ u8 perm_addr[ETH_ALEN];
+ u32 base_addr;
+
+ DEBUGFUNC("fm10k_read_mac_addr_vf");
+
+ base_addr = FM10K_READ_REG(hw, FM10K_TDBAL(0));
+
+ /* last byte should be 0 */
+ if (base_addr << 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[3] = (u8)(base_addr >> 24);
+ perm_addr[4] = (u8)(base_addr >> 16);
+ perm_addr[5] = (u8)(base_addr >> 8);
+
+ base_addr = FM10K_READ_REG(hw, FM10K_TDBAH(0));
+
+ /* first byte should be all 1's */
+ if ((~base_addr) >> 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[0] = (u8)(base_addr >> 16);
+ perm_addr[1] = (u8)(base_addr >> 8);
+ perm_addr[2] = (u8)(base_addr);
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_update_uc_addr_vf - Update device unicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure - unused
+ *
+ * This function is used to add or remove unicast MAC addresses for
+ * the VF.
+ **/
+STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[7];
+
+ DEBUGFUNC("fm10k_update_uc_addr_vf");
+
+ UNREFERENCED_2PARAMETER(glort, flags);
+
+ /* verify VLAN ID is valid */
+ if (vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* verify MAC address is valid */
+ if (!IS_VALID_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ /* verify we are not locked down on the MAC address */
+ if (IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
+ memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
+ return FM10K_ERR_PARAM;
+
+ /* add bit to notify us if this is a set or clear operation */
+ if (!add)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_mc_addr_vf - Update device multicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses for
+ * the VF.
+ **/
+STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[7];
+
+ DEBUGFUNC("fm10k_update_uc_addr_vf");
+
+ UNREFERENCED_1PARAMETER(glort);
+
+ /* verify VLAN ID is valid */
+ if (vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* verify multicast address is valid */
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ /* add bit to notify us if this is a set or clear operation */
+ if (!add)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST,
+ mac, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_int_moderator_vf - Request update of interrupt moderator list
+ * @hw: pointer to hardware structure
+ *
+ * This function will issue a request to the PF to rescan our MSI-X table
+ * and to update the interrupt moderator linked list.
+ **/
+STATIC void fm10k_update_int_moderator_vf(struct fm10k_hw *hw)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[1];
+
+ /* generate MSI-X request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX);
+
+ /* load onto outgoing mailbox */
+ mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
+ FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE),
+ FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE),
+ FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler is meant to capture the indication from the PF that we
+ * are ready to bring up the interface.
+ **/
+s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_lport_state_vf");
+
+ hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
+ FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_update_lport_state_vf - Update device state in lower device
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @count: number of logical ports to enable - unused (always 1)
+ * @enable: boolean value indicating if this is an enable or disable request
+ *
+ * Notify the lower device of a state change. If the lower device is
+ * enabled we can add filters, if it is disabled all filters for this
+ * logical port are flushed.
+ **/
+STATIC s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
+ u16 count, bool enable)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[2];
+
+ UNREFERENCED_2PARAMETER(glort, count);
+ DEBUGFUNC("fm10k_update_lport_state_vf");
+
+ /* reset glort mask 0 as we have to wait to be enabled */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ /* generate port state request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ if (!enable)
+ fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_xcast_mode_vf - Request update of multicast mode
+ * @hw: pointer to hardware structure
+ * @glort: unused
+ * @mode: integer value indicating mode being requested
+ *
+ * This function will attempt to request a higher mode for the port
+ * so that it can enable either multicast, multicast promiscuous, or
+ * promiscuous mode of operation.
+ **/
+STATIC s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3];
+
+ UNREFERENCED_1PARAMETER(glort);
+ DEBUGFUNC("fm10k_update_xcast_mode_vf");
+
+ if (mode > FM10K_XCAST_MODE_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* generate message requesting to change xcast mode */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
+ FM10K_TLV_ATTR_U64(FM10K_1588_MSG_CLK_OFFSET),
+ FM10K_TLV_ATTR_LAST
+};
+
+/* currently there is no shared 1588 message handler */
+
+/**
+ * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ *
+ * This function collects and aggregates per queue hardware statistics.
+ **/
+void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_update_hw_stats_vf");
+
+ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function resets the base for queue hardware statistics.
+ **/
+void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_rebind_hw_stats_vf");
+
+ /* Unbind Queue Statistics */
+ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+
+ /* Reinitialize bases for all stats */
+ fm10k_update_hw_stats_vf(hw, stats);
+}
+
+/**
+ * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+STATIC s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_configure_dglort_map_vf");
+
+ /* verify the dglort pointer */
+ if (!dglort)
+ return FM10K_ERR_PARAM;
+
+ /* stub for now until we determine correct message for this */
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_adjust_systime_vf - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function takes an adjustment rate in parts per billion and will
+ * verify that this value is 0 as the VF cannot support adjusting the
+ * systime clock.
+ *
+ * If the ppb value is non-zero the return is ERR_PARAM else success
+ **/
+STATIC s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_adjust_systime_vf");
+
+ /* The VF cannot adjust the clock frequency, however it should
+ * already have a syntonic clock with whichever host interface is
+ * running as the master for the host interface clock domain so
+ * there should be not frequency adjustment necessary.
+ */
+ return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_systime_vf - Reads value of systime registers
+ * @hw: pointer to the hardware structure
+ *
+ * Function reads the content of 2 registers, combined to represent a 64 bit
+ * value measured in nanoseconds. In order to guarantee the value is accurate
+ * we check the 32 most significant bits both before and after reading the
+ * 32 least significant bits to verify they didn't change as we were reading
+ * the registers.
+ **/
+static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
+{
+ u32 systime_l, systime_h, systime_tmp;
+
+ systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
+
+ do {
+ systime_tmp = systime_h;
+ systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
+ systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
+ } while (systime_tmp != systime_h);
+
+ return ((u64)systime_h << 32) | systime_l;
+}
+
+static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_init_ops_vf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for VF.
+ * Does not touch the hardware.
+ **/
+s32 fm10k_init_ops_vf(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("fm10k_init_ops_vf");
+
+ fm10k_init_ops_generic(hw);
+
+ mac->ops.reset_hw = &fm10k_reset_hw_vf;
+ mac->ops.init_hw = &fm10k_init_hw_vf;
+ mac->ops.start_hw = &fm10k_start_hw_generic;
+ mac->ops.stop_hw = &fm10k_stop_hw_vf;
+#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
+ mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_vf;
+#endif
+ mac->ops.update_vlan = &fm10k_update_vlan_vf;
+ mac->ops.read_mac_addr = &fm10k_read_mac_addr_vf;
+ mac->ops.update_uc_addr = &fm10k_update_uc_addr_vf;
+ mac->ops.update_mc_addr = &fm10k_update_mc_addr_vf;
+ mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_vf;
+ mac->ops.update_int_moderator = &fm10k_update_int_moderator_vf;
+ mac->ops.update_lport_state = &fm10k_update_lport_state_vf;
+ mac->ops.update_hw_stats = &fm10k_update_hw_stats_vf;
+ mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_vf;
+ mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_vf;
+ mac->ops.get_host_state = &fm10k_get_host_state_generic;
+ mac->ops.adjust_systime = &fm10k_adjust_systime_vf;
+ mac->ops.read_systime = &fm10k_read_systime_vf;
+
+ mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
+
+ return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h
new file mode 100644
index 000000000..c90880df1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013 - 2015 Intel Corporation
+ */
+
+#ifndef _FM10K_VF_H_
+#define _FM10K_VF_H_
+
+#include "fm10k_type.h"
+#include "fm10k_common.h"
+
+enum fm10k_vf_tlv_msg_id {
+ FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */
+ FM10K_VF_MSG_ID_MSIX,
+ FM10K_VF_MSG_ID_MAC_VLAN,
+ FM10K_VF_MSG_ID_LPORT_STATE,
+ FM10K_VF_MSG_ID_1588,
+ FM10K_VF_MSG_ID_MAX,
+};
+
+enum fm10k_tlv_mac_vlan_attr_id {
+ FM10K_MAC_VLAN_MSG_VLAN,
+ FM10K_MAC_VLAN_MSG_SET,
+ FM10K_MAC_VLAN_MSG_MAC,
+ FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
+ FM10K_MAC_VLAN_MSG_MULTICAST,
+ FM10K_MAC_VLAN_MSG_ID_MAX
+};
+
+enum fm10k_tlv_lport_state_attr_id {
+ FM10K_LPORT_STATE_MSG_DISABLE,
+ FM10K_LPORT_STATE_MSG_XCAST_MODE,
+ FM10K_LPORT_STATE_MSG_READY,
+ FM10K_LPORT_STATE_MSG_MAX
+};
+
+enum fm10k_tlv_1588_attr_id {
+ FM10K_1588_MSG_TIMESTAMP = 0, /* deprecated */
+ FM10K_1588_MSG_CLK_OFFSET,
+ FM10K_1588_MSG_MAX
+};
+
+#define FM10K_VF_MSG_MSIX_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
+
+s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[];
+#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \
+ fm10k_mac_vlan_msg_attr, func)
+
+s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
+#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
+ fm10k_lport_state_msg_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
+#define FM10K_VF_MSG_1588_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
+
+s32 fm10k_init_ops_vf(struct fm10k_hw *hw);
+
+void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats);
+void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats);
+#endif /* _FM10K_VF_H */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/meson.build b/src/spdk/dpdk/drivers/net/fm10k/base/meson.build
new file mode 100644
index 000000000..6ac11b201
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/base/meson.build
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'fm10k_api.c',
+ 'fm10k_common.c',
+ 'fm10k_mbx.c',
+ 'fm10k_pf.c',
+ 'fm10k_tlv.c',
+ 'fm10k_vf.c'
+]
+
+error_cflags = ['-Wno-unused-parameter', '-Wno-unused-value',
+ '-Wno-strict-aliasing', '-Wno-format-extra-args',
+ '-Wno-unused-variable',
+ '-Wno-implicit-fallthrough'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('fm10k_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k.h b/src/spdk/dpdk/drivers/net/fm10k/fm10k.h
new file mode 100644
index 000000000..916b856ac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
+ */
+
+#ifndef _FM10K_H_
+#define _FM10K_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include "fm10k_logs.h"
+#include "base/fm10k_type.h"
+
+/* descriptor ring base addresses must be aligned to the following */
+#define FM10K_ALIGN_RX_DESC 128
+#define FM10K_ALIGN_TX_DESC 128
+
+/* The maximum packet size that FM10K supports */
+#define FM10K_MAX_PKT_SIZE (15 * 1024)
+
+/* Minimum size of RX buffer FM10K supported */
+#define FM10K_MIN_RX_BUF_SIZE 256
+
+/* The maximum of SRIOV VFs per port supported */
+#define FM10K_MAX_VF_NUM 64
+
+/* number of descriptors must be a multiple of the following */
+#define FM10K_MULT_RX_DESC FM10K_REQ_RX_DESCRIPTOR_MULTIPLE
+#define FM10K_MULT_TX_DESC FM10K_REQ_TX_DESCRIPTOR_MULTIPLE
+
+/* maximum size of descriptor rings */
+#define FM10K_MAX_RX_RING_SZ (512 * 1024)
+#define FM10K_MAX_TX_RING_SZ (512 * 1024)
+
+/* minimum and maximum number of descriptors in a ring */
+#define FM10K_MIN_RX_DESC 32
+#define FM10K_MIN_TX_DESC 32
+#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc))
+#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc))
+
+#define FM10K_TX_MAX_SEG UINT8_MAX
+#define FM10K_TX_MAX_MTU_SEG UINT8_MAX
+
+/*
+ * byte aligment for HW RX data buffer
+ * Datasheet requires RX buffer addresses shall either be 512-byte aligned or
+ * be 8-byte aligned but without crossing host memory pages (4KB alignment
+ * boundaries). Satisfy first option.
+ */
+#define FM10K_RX_DATABUF_ALIGN 512
+
+/*
+ * threshold default, min, max, and divisor constraints
+ * the configured values must satisfy the following:
+ * MIN <= value <= MAX
+ * DIV % value == 0
+ */
+#define FM10K_RX_FREE_THRESH_DEFAULT(rxq) 32
+#define FM10K_RX_FREE_THRESH_MIN(rxq) 1
+#define FM10K_RX_FREE_THRESH_MAX(rxq) ((rxq)->nb_desc - 1)
+#define FM10K_RX_FREE_THRESH_DIV(rxq) ((rxq)->nb_desc)
+
+#define FM10K_TX_FREE_THRESH_DEFAULT(txq) 32
+#define FM10K_TX_FREE_THRESH_MIN(txq) 1
+#define FM10K_TX_FREE_THRESH_MAX(txq) ((txq)->nb_desc - 3)
+#define FM10K_TX_FREE_THRESH_DIV(txq) 0
+
+#define FM10K_DEFAULT_RX_PTHRESH 8
+#define FM10K_DEFAULT_RX_HTHRESH 8
+#define FM10K_DEFAULT_RX_WTHRESH 0
+
+#define FM10K_DEFAULT_TX_PTHRESH 32
+#define FM10K_DEFAULT_TX_HTHRESH 0
+#define FM10K_DEFAULT_TX_WTHRESH 0
+
+#define FM10K_TX_RS_THRESH_DEFAULT(txq) 32
+#define FM10K_TX_RS_THRESH_MIN(txq) 1
+#define FM10K_TX_RS_THRESH_MAX(txq) \
+ RTE_MIN(((txq)->nb_desc - 2), (txq)->free_thresh)
+#define FM10K_TX_RS_THRESH_DIV(txq) ((txq)->nb_desc)
+
+#define FM10K_VLAN_TAG_SIZE 4
+
+/* Maximum number of MAC addresses per PF/VF */
+#define FM10K_MAX_MACADDR_NUM 64
+
+#define FM10K_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define FM10K_VFTA_SIZE (4096 / FM10K_UINT32_BIT_SIZE)
+
+/* vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define FM10K_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define FM10K_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+#define RTE_FM10K_RXQ_REARM_THRESH 32
+#define RTE_FM10K_VPMD_TX_BURST 32
+#define RTE_FM10K_MAX_RX_BURST RTE_FM10K_RXQ_REARM_THRESH
+#define RTE_FM10K_TX_MAX_FREE_BUF_SZ 64
+#define RTE_FM10K_DESCS_PER_LOOP 4
+
+#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+struct fm10k_macvlan_filter_info {
+ uint16_t vlan_num; /* Total VLAN number */
+ uint16_t mac_num; /* Total mac number */
+ uint16_t nb_queue_pools; /* Active queue pools number */
+ /* VMDQ ID for each MAC address */
+ uint8_t mac_vmdq_id[FM10K_MAX_MACADDR_NUM];
+ uint32_t vfta[FM10K_VFTA_SIZE]; /* VLAN bitmap */
+};
+
+struct fm10k_dev_info {
+ volatile uint32_t enable;
+ volatile uint32_t glort;
+ /* Protect the mailbox to avoid race condition */
+ rte_spinlock_t mbx_lock;
+ struct fm10k_macvlan_filter_info macvlan;
+ /* Flag to indicate if RX vector conditions satisfied */
+ bool rx_vec_allowed;
+ bool sm_down;
+};
+
+/*
+ * Structure to store private data for each driver instance.
+ */
+struct fm10k_adapter {
+ struct fm10k_hw hw;
+ struct fm10k_hw_stats stats;
+ struct fm10k_dev_info info;
+};
+
+#define FM10K_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct fm10k_adapter *)adapter)->hw)
+
+#define FM10K_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct fm10k_adapter *)adapter)->stats)
+
+#define FM10K_DEV_PRIVATE_TO_INFO(adapter) \
+ (&((struct fm10k_adapter *)adapter)->info)
+
+#define FM10K_DEV_PRIVATE_TO_MBXLOCK(adapter) \
+ (&(((struct fm10k_adapter *)adapter)->info.mbx_lock))
+
+#define FM10K_DEV_PRIVATE_TO_MACVLAN(adapter) \
+ (&(((struct fm10k_adapter *)adapter)->info.macvlan))
+
+struct fm10k_rx_queue {
+ struct rte_mempool *mp;
+ struct rte_mbuf **sw_ring;
+ volatile union fm10k_rx_desc *hw_ring;
+ struct rte_mbuf *pkt_first_seg; /* First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */
+ uint64_t hw_ring_phys_addr;
+ uint64_t mbuf_initializer; /* value to init mbufs */
+ /* need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ uint16_t next_dd;
+ uint16_t next_alloc;
+ uint16_t next_trigger;
+ uint16_t alloc_thresh;
+ volatile uint32_t *tail_ptr;
+ uint16_t nb_desc;
+ /* Number of faked desc added at the tail for Vector RX function */
+ uint16_t nb_fake_desc;
+ uint16_t queue_id;
+ /* Below 2 fields only valid in case vPMD is applied. */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint16_t rx_using_sse; /* indicates that vector RX is in use */
+ uint16_t port_id;
+ uint8_t drop_en;
+ uint8_t rx_deferred_start; /* don't start this queue in dev start. */
+ uint16_t rx_ftag_en; /* indicates FTAG RX supported */
+ uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
+};
+
+/*
+ * a FIFO is used to track which descriptors have their RS bit set for Tx
+ * queues which are configured to allow multiple descriptors per packet
+ */
+struct fifo {
+ uint16_t *list;
+ uint16_t *head;
+ uint16_t *tail;
+ uint16_t *endp;
+};
+
+struct fm10k_txq_ops;
+
+struct fm10k_tx_queue {
+ struct rte_mbuf **sw_ring;
+ struct fm10k_tx_desc *hw_ring;
+ uint64_t hw_ring_phys_addr;
+ struct fifo rs_tracker;
+ const struct fm10k_txq_ops *ops; /* txq ops */
+ uint16_t last_free;
+ uint16_t next_free;
+ uint16_t nb_free;
+ uint16_t nb_used;
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+ /* Below 2 fields only valid in case vPMD is applied. */
+ uint16_t next_rs; /* Next pos to set RS flag */
+ uint16_t next_dd; /* Next pos to check DD flag */
+ volatile uint32_t *tail_ptr;
+ uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
+ uint16_t nb_desc;
+ uint16_t port_id;
+ uint8_t tx_deferred_start; /** don't start this queue in dev start. */
+ uint16_t queue_id;
+ uint16_t tx_ftag_en; /* indicates FTAG TX supported */
+};
+
+struct fm10k_txq_ops {
+ void (*reset)(struct fm10k_tx_queue *txq);
+};
+
+#define MBUF_DMA_ADDR(mb) \
+ ((uint64_t) ((mb)->buf_iova + (mb)->data_off))
+
+/* enforce 512B alignment on default Rx DMA addresses */
+#define MBUF_DMA_ADDR_DEFAULT(mb) \
+ ((uint64_t) RTE_ALIGN(((mb)->buf_iova + RTE_PKTMBUF_HEADROOM),\
+ FM10K_RX_DATABUF_ALIGN))
+
+static inline void fifo_reset(struct fifo *fifo, uint32_t len)
+{
+ fifo->head = fifo->tail = fifo->list;
+ fifo->endp = fifo->list + len;
+}
+
+static inline void fifo_insert(struct fifo *fifo, uint16_t val)
+{
+ *fifo->head = val;
+ if (++fifo->head == fifo->endp)
+ fifo->head = fifo->list;
+}
+
+/* do not worry about list being empty since we only check it once we know
+ * we have used enough descriptors to set the RS bit at least once */
+static inline uint16_t fifo_peek(struct fifo *fifo)
+{
+ return *fifo->tail;
+}
+
+static inline uint16_t fifo_remove(struct fifo *fifo)
+{
+ uint16_t val;
+ val = *fifo->tail;
+ if (++fifo->tail == fifo->endp)
+ fifo->tail = fifo->list;
+ return val;
+}
+
+static inline void
+fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port)
+{
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->nb_segs = 1;
+
+ /* enforce 512B alignment on default Rx virtual addresses */
+ mb->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb->buf_addr +
+ RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb->buf_addr);
+ mb->port = in_port;
+}
+
+/*
+ * Verify Rx packet buffer alignment is valid.
+ *
+ * Hardware requires specific alignment for Rx packet buffers. At
+ * least one of the following two conditions must be satisfied.
+ * 1. Address is 512B aligned
+ * 2. Address is 8B aligned and buffer does not cross 4K boundary.
+ *
+ * Return 1 if buffer alignment satisfies at least one condition,
+ * otherwise return 0.
+ *
+ * Note: Alignment is checked by the driver when the Rx queue is reset. It
+ * is assumed that if an entire descriptor ring can be filled with
+ * buffers containing valid alignment, then all buffers in that mempool
+ * have valid address alignment. It is the responsibility of the user
+ * to ensure all buffers have valid alignment, as it is the user who
+ * creates the mempool.
+ * Note: It is assumed the buffer needs only to store a maximum size Ethernet
+ * frame.
+ */
+static inline int
+fm10k_addr_alignment_valid(struct rte_mbuf *mb)
+{
+ uint64_t addr = MBUF_DMA_ADDR_DEFAULT(mb);
+ uint64_t boundary1, boundary2;
+
+ /* 512B aligned? */
+ if (RTE_ALIGN(addr, FM10K_RX_DATABUF_ALIGN) == addr)
+ return 1;
+
+ /* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */
+ if (RTE_ALIGN(addr, 8) == addr) {
+ boundary1 = RTE_ALIGN_FLOOR(addr, 4096);
+ boundary2 = RTE_ALIGN_FLOOR(addr + RTE_ETHER_MAX_VLAN_FRAME_LEN,
+ 4096);
+ if (boundary1 == boundary2)
+ return 1;
+ }
+
+ PMD_INIT_LOG(ERR, "Error: Invalid buffer alignment!");
+
+ return 0;
+}
+
+/* Rx and Tx prototypes */
+uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint32_t
+fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int
+fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+
+int
+fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset);
+
+
+uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
+int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
+void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
+uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **,
+ uint16_t);
+uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
+int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq);
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c
new file mode 100644
index 000000000..f537ab286
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c
@@ -0,0 +1,3348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2016 Intel Corporation
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <rte_spinlock.h>
+#include <rte_kvargs.h>
+
+#include "fm10k.h"
+#include "base/fm10k_api.h"
+
+/* Default delay to acquire mailbox lock */
+#define FM10K_MBXLOCK_DELAY_US 20
+#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
+
+#define MAIN_VSI_POOL_NUMBER 0
+
+/* Max try times to acquire switch status */
+#define MAX_QUERY_SWITCH_STATE_TIMES 10
+/* Wait interval to get switch status */
+#define WAIT_SWITCH_MSG_US 100000
+/* A period of quiescence for switch */
+#define FM10K_SWITCH_QUIESCE_US 100000
+/* Number of chars per uint32 type */
+#define CHARS_PER_UINT32 (sizeof(uint32_t))
+#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
+
+/* default 1:1 map from queue ID to interrupt vector ID */
+#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
+
+/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
+#define MAX_LPORT_NUM 128
+#define GLORT_FD_Q_BASE 0x40
+#define GLORT_PF_MASK 0xFFC0
+#define GLORT_FD_MASK GLORT_PF_MASK
+#define GLORT_FD_INDEX GLORT_FD_Q_BASE
+
+int fm10k_logtype_init;
+int fm10k_logtype_driver;
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+int fm10k_logtype_rx;
+#endif
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX
+int fm10k_logtype_tx;
+#endif
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+int fm10k_logtype_tx_free;
+#endif
+
+static void fm10k_close_mbx_service(struct fm10k_hw *hw);
+static int fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static inline int fm10k_glort_valid(struct fm10k_hw *hw);
+static int
+fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool);
+static void fm10k_tx_queue_release(void *queue);
+static void fm10k_rx_queue_release(void *queue);
+static void fm10k_set_rx_function(struct rte_eth_dev *dev);
+static void fm10k_set_tx_function(struct rte_eth_dev *dev);
+static int fm10k_check_ftag(struct rte_devargs *devargs);
+static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+
+static int fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
+struct fm10k_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
+ {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
+ {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
+ {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
+ {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
+ {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
+ {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
+ {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
+ {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
+ nodesc_drop)},
+};
+
+#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
+ sizeof(fm10k_hw_stats_strings[0]))
+
+static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
+ {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
+ {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
+ {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
+};
+
+#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
+ sizeof(fm10k_hw_stats_rx_q_strings[0]))
+
+static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
+ {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
+ {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
+};
+
+#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
+ sizeof(fm10k_hw_stats_tx_q_strings[0]))
+
+#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
+ (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
+static int
+fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+
+static void
+fm10k_mbx_initlock(struct fm10k_hw *hw)
+{
+ rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
+}
+
+static void
+fm10k_mbx_lock(struct fm10k_hw *hw)
+{
+ while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
+ rte_delay_us(FM10K_MBXLOCK_DELAY_US);
+}
+
+static void
+fm10k_mbx_unlock(struct fm10k_hw *hw)
+{
+ rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
+}
+
+/* Stubs needed for linkage when vPMD is disabled */
+__rte_weak int
+fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
+{
+ return -1;
+}
+
+__rte_weak uint16_t
+fm10k_recv_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+__rte_weak uint16_t
+fm10k_recv_scattered_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+__rte_weak int
+fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
+
+{
+ return -1;
+}
+
+__rte_weak void
+fm10k_rx_queue_release_mbufs_vec(
+ __rte_unused struct fm10k_rx_queue *rxq)
+{
+ return;
+}
+
+__rte_weak void
+fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return;
+}
+
+__rte_weak int
+fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return -1;
+}
+
+__rte_weak uint16_t
+fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device.
+ * return 0 on success
+ * return -ENOMEM if buffers cannot be allocated
+ * return -EINVAL if buffers do not satisfy alignment condition
+ */
+static inline int
+rx_queue_reset(struct fm10k_rx_queue *q)
+{
+ static const union fm10k_rx_desc zero = {{0} };
+ uint64_t dma_addr;
+ int i, diag;
+ PMD_INIT_FUNC_TRACE();
+
+ diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
+ if (diag != 0)
+ return -ENOMEM;
+
+ for (i = 0; i < q->nb_desc; ++i) {
+ fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
+ if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
+ rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
+ q->nb_desc);
+ return -EINVAL;
+ }
+ dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
+ q->hw_ring[i].q.pkt_addr = dma_addr;
+ q->hw_ring[i].q.hdr_addr = dma_addr;
+ }
+
+ /* initialize extra software ring entries. Space for these extra
+ * entries is always allocated.
+ */
+ memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
+ for (i = 0; i < q->nb_fake_desc; ++i) {
+ q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
+ q->hw_ring[q->nb_desc + i] = zero;
+ }
+
+ q->next_dd = 0;
+ q->next_alloc = 0;
+ q->next_trigger = q->alloc_thresh - 1;
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
+ q->rxrearm_start = 0;
+ q->rxrearm_nb = 0;
+
+ return 0;
+}
+
+/*
+ * clean queue, descriptor rings, free software buffers used when stopping
+ * device.
+ */
+static inline void
+rx_queue_clean(struct fm10k_rx_queue *q)
+{
+ union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
+ uint32_t i;
+ PMD_INIT_FUNC_TRACE();
+
+ /* zero descriptor rings */
+ for (i = 0; i < q->nb_desc; ++i)
+ q->hw_ring[i] = zero;
+
+ /* zero faked descriptors */
+ for (i = 0; i < q->nb_fake_desc; ++i)
+ q->hw_ring[q->nb_desc + i] = zero;
+
+ /* vPMD driver has a different way of releasing mbufs. */
+ if (q->rx_using_sse) {
+ fm10k_rx_queue_release_mbufs_vec(q);
+ return;
+ }
+
+ /* free software buffers */
+ for (i = 0; i < q->nb_desc; ++i) {
+ if (q->sw_ring[i]) {
+ rte_pktmbuf_free_seg(q->sw_ring[i]);
+ q->sw_ring[i] = NULL;
+ }
+ }
+}
+
+/*
+ * free all queue memory used when releasing the queue (i.e. configure)
+ */
+static inline void
+rx_queue_free(struct fm10k_rx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (q) {
+ PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
+ rx_queue_clean(q);
+ if (q->sw_ring) {
+ rte_free(q->sw_ring);
+ q->sw_ring = NULL;
+ }
+ rte_free(q);
+ q = NULL;
+ }
+}
+
+/*
+ * disable RX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
+ reg & ~FM10K_RXQCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ if (!(reg & FM10K_RXQCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device
+ */
+static inline void
+tx_queue_reset(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ q->last_free = 0;
+ q->next_free = 0;
+ q->nb_used = 0;
+ q->nb_free = q->nb_desc - 1;
+ fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
+ FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
+}
+
+/*
+ * clean queue, descriptor rings, free software buffers used when stopping
+ * device
+ */
+static inline void
+tx_queue_clean(struct fm10k_tx_queue *q)
+{
+ struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
+ uint32_t i;
+ PMD_INIT_FUNC_TRACE();
+
+ /* zero descriptor rings */
+ for (i = 0; i < q->nb_desc; ++i)
+ q->hw_ring[i] = zero;
+
+ /* free software buffers */
+ for (i = 0; i < q->nb_desc; ++i) {
+ if (q->sw_ring[i]) {
+ rte_pktmbuf_free_seg(q->sw_ring[i]);
+ q->sw_ring[i] = NULL;
+ }
+ }
+}
+
+/*
+ * free all queue memory used when releasing the queue (i.e. configure)
+ */
+static inline void
+tx_queue_free(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (q) {
+ PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
+ tx_queue_clean(q);
+ if (q->rs_tracker.list) {
+ rte_free(q->rs_tracker.list);
+ q->rs_tracker.list = NULL;
+ }
+ if (q->sw_ring) {
+ rte_free(q->sw_ring);
+ q->sw_ring = NULL;
+ }
+ rte_free(q);
+ q = NULL;
+ }
+}
+
+/*
+ * disable TX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
+ reg & ~FM10K_TXDCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ if (!(reg & FM10K_TXDCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+static int
+fm10k_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ PMD_INIT_LOG(ERR, "DCB mode is not supported.");
+ return -EINVAL;
+ }
+
+ if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ return 0;
+
+ if (hw->mac.type == fm10k_mac_vf) {
+ PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
+ return -EINVAL;
+ }
+
+ /* Check VMDQ queue pool number */
+ if (vmdq_conf->nb_queue_pools >
+ sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
+ vmdq_conf->nb_queue_pools > nb_rx_q) {
+ PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
+ vmdq_conf->nb_queue_pools);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct fm10k_txq_ops def_txq_ops = {
+ .reset = tx_queue_reset,
+};
+
+static int
+fm10k_dev_configure(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ /* multipe queue mode checking */
+ ret = fm10k_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ dev->data->scattered_rx = 0;
+
+ return 0;
+}
+
+static void
+fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint32_t i;
+
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ if (!vmdq_conf->pool_map[i].pools)
+ continue;
+ fm10k_mbx_lock(hw);
+ fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+static void
+fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+}
+
+static void
+fm10k_dev_rss_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint32_t mrqc, *key, i, reta, j;
+ uint64_t hf;
+
+#define RSS_KEY_SIZE 40
+ static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+ };
+
+ if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+ dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
+ return;
+ }
+
+ /* random key is rss_intel_key (default) or user provided (rss_key) */
+ if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
+ key = (uint32_t *)rss_intel_key;
+ else
+ key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
+
+ /* Now fill our hash function seeds, 4 bytes at a time */
+ for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << CHAR_BIT) | j;
+ if ((i & 3) == 3)
+ FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
+ rte_bswap32(reta));
+ }
+
+ /*
+ * Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
+ mrqc = 0;
+ mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+
+ if (mrqc == 0) {
+ PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
+ "supported", hf);
+ return;
+ }
+
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
+}
+
+static void
+fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i;
+
+ for (i = 0; i < nb_lport_new; i++) {
+ /* Set unicast mode by default. App can change
+ * to other mode in other API func.
+ */
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+static void
+fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct fm10k_macvlan_filter_info *macvlan;
+ uint16_t nb_queue_pools = 0; /* pool number in configuration */
+ uint16_t nb_lport_new;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ fm10k_dev_rss_configure(dev);
+
+ /* only PF supports VMDQ */
+ if (hw->mac.type != fm10k_mac_pf)
+ return;
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ nb_queue_pools = vmdq_conf->nb_queue_pools;
+
+ /* no pool number change, no need to update logic port and VLAN/MAC */
+ if (macvlan->nb_queue_pools == nb_queue_pools)
+ return;
+
+ nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
+ fm10k_dev_logic_port_update(dev, nb_lport_new);
+
+ /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
+ memset(dev->data->mac_addrs, 0,
+ RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
+ rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+ memset(macvlan, 0, sizeof(*macvlan));
+ macvlan->nb_queue_pools = nb_queue_pools;
+
+ if (nb_queue_pools)
+ fm10k_dev_vmdq_rx_configure(dev);
+ else
+ fm10k_dev_pf_main_vsi_reset(dev);
+}
+
+static int
+fm10k_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret;
+ struct fm10k_tx_queue *txq;
+ uint64_t base_addr;
+ uint32_t size;
+
+ /* Disable TXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXINT(i),
+ 3 << FM10K_TXINT_TIMER_SHIFT);
+
+ /* Setup TX queue */
+ for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ txq = dev->data->tx_queues[i];
+ base_addr = txq->hw_ring_phys_addr;
+ size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = tx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+ /* Enable use of FTAG bit in TX descriptor, PFVTCTL
+ * register is read-only for VF.
+ */
+ if (fm10k_check_ftag(dev->device->devargs)) {
+ if (hw->mac.type == fm10k_mac_pf) {
+ FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
+ FM10K_PFVTCTL_FTAG_DESC_ENABLE);
+ PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
+ } else {
+ PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
+ return -ENOTSUP;
+ }
+ }
+
+ /* set location and size for descriptor ring */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
+
+ /* assign default SGLORT for each TX queue by PF */
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
+ }
+
+ /* set up vector or scalar TX function as appropriate */
+ fm10k_set_tx_function(dev);
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int i, ret;
+ struct fm10k_rx_queue *rxq;
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
+ uint32_t logic_port = hw->mac.dglort_map;
+ uint16_t buf_size;
+ uint16_t queue_stride = 0;
+
+ /* enable RXINT for interrupt mode */
+ i = 0;
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (; i < dev->data->nb_rx_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
+ FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
+ FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ }
+ }
+ /* Disable other RXINT to avoid possible interrupt */
+ for (; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ rxq = dev->data->rx_queues[i];
+ base_addr = rxq->hw_ring_phys_addr;
+ size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = rx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
+
+ /* Configure the Rx buffer size for one buff without split */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ /* As RX buffer is aligned to 512B within mbuf, some bytes are
+ * reserved for this purpose, and the worst case could be 511B.
+ * But SRR reg assumes all buffers have the same size. In order
+ * to fill the gap, we'll have to consider the worst case and
+ * assume 512B is reserved. If we don't do so, it's possible
+ * for HW to overwrite data to next mbuf.
+ */
+ buf_size -= FM10K_RX_DATABUF_ALIGN;
+
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
+ (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
+ FM10K_SRRCTL_LOOPBACK_SUPPRESS);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
+ rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ uint32_t reg;
+ dev->data->scattered_rx = 1;
+ reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
+ reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
+ }
+
+ /* Enable drop on empty, it's RO for VF */
+ if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
+ rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
+
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
+ FM10K_WRITE_FLUSH(hw);
+ }
+
+ /* Configure VMDQ/RSS if applicable */
+ fm10k_dev_mq_rx_configure(dev);
+
+ /* Decide the best RX function */
+ fm10k_set_rx_function(dev);
+
+ /* update RX_SGLORT for loopback suppress*/
+ if (hw->mac.type != fm10k_mac_pf)
+ return 0;
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ if (macvlan->nb_queue_pools)
+ queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ if (i && queue_stride && !(i % queue_stride))
+ logic_port++;
+ FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err;
+ uint32_t reg;
+ struct fm10k_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
+
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /** @todo - this should be defined in the shared code */
+#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
+ uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ q->ops->reset(q);
+
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline int fm10k_glort_valid(struct fm10k_hw *hw)
+{
+ return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
+ != FM10K_DGLORTMAP_NONE);
+}
+
+static int
+fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return 0;
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_PROMISC);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t mode;
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return 0;
+
+ if (dev->data->all_multicast == 1)
+ mode = FM10K_XCAST_MODE_ALLMULTI;
+ else
+ mode = FM10K_XCAST_MODE_NONE;
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ mode);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return 0;
+
+ /* If promiscuous mode is enabled, it doesn't make sense to enable
+ * allmulticast and disable promiscuous since fm10k only can select
+ * one of the modes.
+ */
+ if (dev->data->promiscuous) {
+ PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
+ "needn't enable allmulticast");
+ return 0;
+ }
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_ALLMULTI);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
+ return 0;
+
+ if (dev->data->promiscuous) {
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
+ "since promisc mode is enabled");
+ return -EINVAL;
+ }
+
+ fm10k_mbx_lock(hw);
+ /* Change mode to unicast mode */
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void
+fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
+ uint16_t nb_queue_pools;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ nb_queue_pools = macvlan->nb_queue_pools;
+ pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
+ rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
+
+ /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
+ dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
+ dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
+ hw->mac.dglort_map;
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
+ /* Configure VMDQ/RSS DGlort Decoder */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
+
+ /* Flow Director configurations, only queue number is valid. */
+ dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
+ dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
+ (hw->mac.dglort_map + GLORT_FD_Q_BASE);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 2; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
+ FM10K_DGLORTMAP_NONE);
+}
+
+#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
+static int
+fm10k_dev_start(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, diag;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* stop, init, then start the hw */
+ diag = fm10k_stop_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_start_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_dev_tx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
+ return diag;
+ }
+
+ if (fm10k_dev_rxq_interrupt_setup(dev))
+ return -EIO;
+
+ diag = fm10k_dev_rx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
+ return diag;
+ }
+
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_dglort_map_configure(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq;
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->rx_deferred_start)
+ continue;
+ diag = fm10k_dev_rx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq;
+ txq = dev->data->tx_queues[i];
+
+ if (txq->tx_deferred_start)
+ continue;
+ diag = fm10k_dev_tx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ tx_queue_clean(dev->data->tx_queues[j]);
+ for (j = 0; j < dev->data->nb_rx_queues; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ /* Update default vlan when not in VMDQ mode */
+ if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+
+ fm10k_link_update(dev, 0);
+
+ return 0;
+}
+
+static void
+fm10k_dev_stop(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ fm10k_dev_tx_queue_stop(dev, i);
+
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_dev_rx_queue_stop(dev, i);
+
+ /* Disable datapath event */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
+ FM10K_ITR_MASK_SET);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
+ FM10K_ITR_MASK_SET);
+ }
+ }
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+}
+
+static void
+fm10k_dev_queue_release(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
+
+ tx_queue_free(txq);
+ }
+ }
+
+ if (dev->data->rx_queues) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_rx_queue_release(dev->data->rx_queues[i]);
+ }
+}
+
+static int
+fm10k_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ PMD_INIT_FUNC_TRACE();
+
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
+ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_status =
+ dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+
+ return 0;
+}
+
+static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+ unsigned i, q;
+ unsigned count = 0;
+
+ if (xstats_names != NULL) {
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Global stats */
+ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", fm10k_hw_stats_strings[count].name);
+ count++;
+ }
+
+ /* PF queue stats */
+ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
+ for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", q,
+ fm10k_hw_stats_rx_q_strings[i].name);
+ count++;
+ }
+ for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", q,
+ fm10k_hw_stats_tx_q_strings[i].name);
+ count++;
+ }
+ }
+ }
+ return FM10K_NB_XSTATS;
+}
+
+static int
+fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i, q, count = 0;
+
+ if (n < FM10K_NB_XSTATS)
+ return FM10K_NB_XSTATS;
+
+ /* Global stats */
+ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ fm10k_hw_stats_strings[count].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ /* PF queue stats */
+ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
+ for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&hw_stats->q[q]) +
+ fm10k_hw_stats_rx_q_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&hw_stats->q[q]) +
+ fm10k_hw_stats_tx_q_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return FM10K_NB_XSTATS;
+}
+
+static int
+fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint64_t ipackets, opackets, ibytes, obytes, imissed;
+ struct fm10k_hw *hw =
+ FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fm10k_update_hw_stats(hw, hw_stats);
+
+ ipackets = opackets = ibytes = obytes = imissed = 0;
+ for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
+ (i < hw->mac.max_queues); ++i) {
+ stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
+ stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
+ stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
+ stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
+ stats->q_errors[i] = hw_stats->q[i].rx_drops.count;
+ ipackets += stats->q_ipackets[i];
+ opackets += stats->q_opackets[i];
+ ibytes += stats->q_ibytes[i];
+ obytes += stats->q_obytes[i];
+ imissed += stats->q_errors[i];
+ }
+ stats->ipackets = ipackets;
+ stats->opackets = opackets;
+ stats->ibytes = ibytes;
+ stats->obytes = obytes;
+ stats->imissed = imissed;
+ return 0;
+}
+
+static int
+fm10k_stats_reset(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(hw_stats, 0, sizeof(*hw_stats));
+ fm10k_rebind_hw_stats(hw, hw_stats);
+
+ return 0;
+}
+
+static int
+fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
+ dev_info->max_rx_queues = hw->mac.max_queues;
+ dev_info->max_tx_queues = hw->mac.max_queues;
+ dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = pdev->max_vfs;
+ dev_info->vmdq_pool_base = 0;
+ dev_info->vmdq_queue_base = 0;
+ dev_info->max_vmdq_pools = ETH_32_POOLS;
+ dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
+ dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
+ dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
+ dev_info->reta_size = FM10K_MAX_RSS_INDICES;
+ dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
+ ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX |
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = FM10K_DEFAULT_RX_PTHRESH,
+ .hthresh = FM10K_DEFAULT_RX_HTHRESH,
+ .wthresh = FM10K_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = FM10K_DEFAULT_TX_PTHRESH,
+ .hthresh = FM10K_DEFAULT_TX_HTHRESH,
+ .wthresh = FM10K_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
+ .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = FM10K_MAX_RX_DESC,
+ .nb_min = FM10K_MIN_RX_DESC,
+ .nb_align = FM10K_MULT_RX_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = FM10K_MAX_TX_DESC,
+ .nb_min = FM10K_MIN_TX_DESC,
+ .nb_align = FM10K_MULT_TX_DESC,
+ .nb_seg_max = FM10K_TX_MAX_SEG,
+ .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
+ };
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ if (dev->rx_pkt_burst == fm10k_recv_pkts ||
+ dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
+ static uint32_t ptypes[] = {
+ /* refers to rx_desc_to_ol_flags() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+ } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
+ dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
+ static uint32_t ptypes_vec[] = {
+ /* refers to fm10k_desc_to_pktype_v() */
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes_vec;
+ }
+
+ return NULL;
+}
+#else
+static const uint32_t *
+fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ return NULL;
+}
+#endif
+
+static int
+fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ s32 result;
+ uint16_t mac_num = 0;
+ uint32_t vid_idx, vid_bit, mac_index;
+ struct fm10k_hw *hw;
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_eth_dev_data *data = dev->data;
+
+ hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
+ PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
+ return -EINVAL;
+ }
+
+ if (vlan_id > ETH_VLAN_ID_MAX) {
+ PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
+ return -EINVAL;
+ }
+
+ vid_idx = FM10K_VFTA_IDX(vlan_id);
+ vid_bit = FM10K_VFTA_BIT(vlan_id);
+ /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
+ if (on && (macvlan->vfta[vid_idx] & vid_bit))
+ return 0;
+ /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
+ if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
+ PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
+ "in the VLAN filter table");
+ return -EINVAL;
+ }
+
+ fm10k_mbx_lock(hw);
+ result = fm10k_update_vlan(hw, vlan_id, 0, on);
+ fm10k_mbx_unlock(hw);
+ if (result != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
+ return -EIO;
+ }
+
+ for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
+ (result == FM10K_SUCCESS); mac_index++) {
+ if (rte_is_zero_ether_addr(&data->mac_addrs[mac_index]))
+ continue;
+ if (mac_num > macvlan->mac_num - 1) {
+ PMD_INIT_LOG(ERR, "MAC address number "
+ "not match");
+ break;
+ }
+ fm10k_mbx_lock(hw);
+ result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
+ data->mac_addrs[mac_index].addr_bytes,
+ vlan_id, on, 0);
+ fm10k_mbx_unlock(hw);
+ mac_num++;
+ }
+ if (result != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
+ return -EIO;
+ }
+
+ if (on) {
+ macvlan->vlan_num++;
+ macvlan->vfta[vid_idx] |= vid_bit;
+ } else {
+ macvlan->vlan_num--;
+ macvlan->vfta[vid_idx] &= ~vid_bit;
+ }
+ return 0;
+}
+
+static int
+fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP))
+ PMD_INIT_LOG(ERR, "VLAN stripping is "
+ "always on in fm10k");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
+ PMD_INIT_LOG(ERR, "VLAN QinQ is not "
+ "supported in fm10k");
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER))
+ PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
+ }
+
+ return 0;
+}
+
+/* Add/Remove a MAC address, and update filters to main VSI */
+static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ uint32_t i, j, k;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (pool != MAIN_VSI_POOL_NUMBER) {
+ PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
+ "mac to pool %u", pool);
+ return;
+ }
+ for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
+ if (!macvlan->vfta[j])
+ continue;
+ for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
+ if (!(macvlan->vfta[j] & (1 << k)))
+ continue;
+ if (i + 1 > macvlan->vlan_num) {
+ PMD_INIT_LOG(ERR, "vlan number not match");
+ return;
+ }
+ fm10k_mbx_lock(hw);
+ fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
+ j * FM10K_UINT32_BIT_SIZE + k, add, 0);
+ fm10k_mbx_unlock(hw);
+ i++;
+ }
+ }
+}
+
+/* Add/Remove a MAC address, and update filters to VMDQ */
+static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint32_t i;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ if (pool > macvlan->nb_queue_pools) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid."
+ " Max pool is %u",
+ pool, macvlan->nb_queue_pools);
+ return;
+ }
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
+ continue;
+ fm10k_mbx_lock(hw);
+ fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
+ vmdq_conf->pool_map[i].vlan_id, add, 0);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+/* Add/Remove a MAC address, and update filters */
+static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
+ fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
+ else
+ fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
+
+ if (add)
+ macvlan->mac_num++;
+ else
+ macvlan->mac_num--;
+}
+
+/* Add a MAC address, and update filters */
+static int
+fm10k_macaddr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
+ macvlan->mac_vmdq_id[index] = pool;
+ return 0;
+}
+
+/* Remove a MAC address, and update filters */
+static void
+fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
+ FALSE, macvlan->mac_vmdq_id[index]);
+ macvlan->mac_vmdq_id[index] = 0;
+}
+
+static inline int
+check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
+{
+ if ((request < min) || (request > max) || ((request % mult) != 0))
+ return -1;
+ else
+ return 0;
+}
+
+
+static inline int
+check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
+{
+ if ((request < min) || (request > max) || ((div % request) != 0))
+ return -1;
+ else
+ return 0;
+}
+
+static inline int
+handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
+{
+ uint16_t rx_free_thresh;
+
+ if (conf->rx_free_thresh == 0)
+ rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
+ else
+ rx_free_thresh = conf->rx_free_thresh;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
+ FM10K_RX_FREE_THRESH_MAX(q),
+ FM10K_RX_FREE_THRESH_DIV(q),
+ rx_free_thresh)) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
+ FM10K_RX_FREE_THRESH_MIN(q),
+ FM10K_RX_FREE_THRESH_DIV(q));
+ return -EINVAL;
+ }
+
+ q->alloc_thresh = rx_free_thresh;
+ q->drop_en = conf->rx_drop_en;
+ q->rx_deferred_start = conf->rx_deferred_start;
+
+ return 0;
+}
+
+/*
+ * Hardware requires specific alignment for Rx packet buffers. At
+ * least one of the following two conditions must be satisfied.
+ * 1. Address is 512B aligned
+ * 2. Address is 8B aligned and buffer does not cross 4K boundary.
+ *
+ * As such, the driver may need to adjust the DMA address within the
+ * buffer by up to 512B.
+ *
+ * return 1 if the element size is valid, otherwise return 0.
+ */
+static int
+mempool_element_size_valid(struct rte_mempool *mp)
+{
+ uint32_t min_size;
+
+ /* elt_size includes mbuf header and headroom */
+ min_size = mp->elt_size - sizeof(struct rte_mbuf) -
+ RTE_PKTMBUF_HEADROOM;
+
+ /* account for up to 512B of alignment */
+ min_size -= FM10K_RX_DATABUF_ALIGN;
+
+ /* sanity check for overflow */
+ if (min_size > mp->elt_size)
+ return 0;
+
+ /* size is valid */
+ return 1;
+}
+
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+}
+
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_RSS_HASH);
+}
+
+static int
+fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ struct fm10k_rx_queue *q;
+ const struct rte_memzone *mz;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /* make sure the mempool element size can account for alignment. */
+ if (!mempool_element_size_valid(mp)) {
+ PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
+ return -EINVAL;
+ }
+
+ /* make sure a valid number of descriptors have been requested */
+ if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
+ FM10K_MULT_RX_DESC, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
+ "less than or equal to %"PRIu32", "
+ "greater than or equal to %u, "
+ "and a multiple of %u",
+ nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
+ FM10K_MULT_RX_DESC);
+ return -EINVAL;
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->rx_queues[queue_id] != NULL) {
+ rx_queue_free(dev->data->rx_queues[queue_id]);
+ dev->data->rx_queues[queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (q == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return -ENOMEM;
+ }
+
+ /* setup queue */
+ q->mp = mp;
+ q->nb_desc = nb_desc;
+ q->nb_fake_desc = FM10K_MULT_RX_DESC;
+ q->port_id = dev->data->port_id;
+ q->queue_id = queue_id;
+ q->tail_ptr = (volatile uint32_t *)
+ &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ q->offloads = offloads;
+ if (handle_rxconf(q, conf))
+ return -EINVAL;
+
+ /* allocate memory for the software ring */
+ q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
+ (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+ FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
+ socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return -ENOMEM;
+ }
+ q->hw_ring = mz->addr;
+ q->hw_ring_phys_addr = mz->iova;
+
+ /* Check if number of descs satisfied Vector requirement */
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ q->queue_id, q->port_id);
+ dev_info->rx_vec_allowed = false;
+ } else
+ fm10k_rxq_vec_setup(q);
+
+ dev->data->rx_queues[queue_id] = q;
+ return 0;
+}
+
+static void
+fm10k_rx_queue_release(void *queue)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rx_queue_free(queue);
+}
+
+static inline int
+handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
+{
+ uint16_t tx_free_thresh;
+ uint16_t tx_rs_thresh;
+
+ /* constraint MACROs require that tx_free_thresh is configured
+ * before tx_rs_thresh */
+ if (conf->tx_free_thresh == 0)
+ tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
+ else
+ tx_free_thresh = conf->tx_free_thresh;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
+ FM10K_TX_FREE_THRESH_MAX(q),
+ FM10K_TX_FREE_THRESH_DIV(q),
+ tx_free_thresh)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
+ FM10K_TX_FREE_THRESH_MIN(q),
+ FM10K_TX_FREE_THRESH_DIV(q));
+ return -EINVAL;
+ }
+
+ q->free_thresh = tx_free_thresh;
+
+ if (conf->tx_rs_thresh == 0)
+ tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
+ else
+ tx_rs_thresh = conf->tx_rs_thresh;
+
+ q->tx_deferred_start = conf->tx_deferred_start;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
+ FM10K_TX_RS_THRESH_MAX(q),
+ FM10K_TX_RS_THRESH_DIV(q),
+ tx_rs_thresh)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
+ FM10K_TX_RS_THRESH_MIN(q),
+ FM10K_TX_RS_THRESH_DIV(q));
+ return -EINVAL;
+ }
+
+ q->rs_thresh = tx_rs_thresh;
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
+static int
+fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_tx_queue *q;
+ const struct rte_memzone *mz;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /* make sure a valid number of descriptors have been requested */
+ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
+ FM10K_MULT_TX_DESC, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
+ "less than or equal to %"PRIu32", "
+ "greater than or equal to %u, "
+ "and a multiple of %u",
+ nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
+ FM10K_MULT_TX_DESC);
+ return -EINVAL;
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->tx_queues[queue_id] != NULL) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ tx_queue_free(txq);
+ dev->data->tx_queues[queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (q == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return -ENOMEM;
+ }
+
+ /* setup queue */
+ q->nb_desc = nb_desc;
+ q->port_id = dev->data->port_id;
+ q->queue_id = queue_id;
+ q->offloads = offloads;
+ q->ops = &def_txq_ops;
+ q->tail_ptr = (volatile uint32_t *)
+ &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
+ if (handle_txconf(q, conf))
+ return -EINVAL;
+
+ /* allocate memory for the software ring */
+ q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
+ nb_desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+ FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
+ socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return -ENOMEM;
+ }
+ q->hw_ring = mz->addr;
+ q->hw_ring_phys_addr = mz->iova;
+
+ /*
+ * allocate memory for the RS bit tracker. Enough slots to hold the
+ * descriptor index for each RS bit needing to be set are required.
+ */
+ q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
+ ((nb_desc + 1) / q->rs_thresh) *
+ sizeof(uint16_t),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->rs_tracker.list == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ dev->data->tx_queues[queue_id] = q;
+ return 0;
+}
+
+static void
+fm10k_tx_queue_release(void *queue)
+{
+ struct fm10k_tx_queue *q = queue;
+ PMD_INIT_FUNC_TRACE();
+
+ tx_queue_free(q);
+}
+
+static int
+fm10k_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i, j, idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size > FM10K_MAX_RSS_INDICES) {
+ PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
+ return -EINVAL;
+ }
+
+ /*
+ * Update Redirection Table RETA[n], n=0..31. The redirection table has
+ * 128-entries in 32 registers
+ */
+ for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ BIT_MASK_PER_UINT32);
+ if (mask == 0)
+ continue;
+
+ reta = 0;
+ if (mask != BIT_MASK_PER_UINT32)
+ reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
+
+ for (j = 0; j < CHARS_PER_UINT32; j++) {
+ if (mask & (0x1 << j)) {
+ if (mask != 0xF)
+ reta &= ~(UINT8_MAX << CHAR_BIT * j);
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ }
+ }
+ FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i, j, idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size < FM10K_MAX_RSS_INDICES) {
+ PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
+ return -EINVAL;
+ }
+
+ /*
+ * Read Redirection Table RETA[n], n=0..31. The redirection table has
+ * 128-entries in 32 registers
+ */
+ for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ BIT_MASK_PER_UINT32);
+ if (mask == 0)
+ continue;
+
+ reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
+ for (j = 0; j < CHARS_PER_UINT32; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] = ((reta >>
+ CHAR_BIT * j) & UINT8_MAX);
+ }
+ }
+
+ return 0;
+}
+
+static int
+fm10k_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t hf = rss_conf->rss_hf;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
+ FM10K_RSSRK_ENTRIES_PER_REG))
+ return -EINVAL;
+
+ if (hf == 0)
+ return -EINVAL;
+
+ mrqc = 0;
+ mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+
+ /* If the mapping doesn't fit any supported, return */
+ if (mrqc == 0)
+ return -EINVAL;
+
+ if (key != NULL)
+ for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
+
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
+
+ return 0;
+}
+
+static int
+fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t hf;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
+ FM10K_RSSRK_ENTRIES_PER_REG))
+ return -EINVAL;
+
+ if (key != NULL)
+ for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
+ key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
+
+ mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
+ hf = 0;
+ hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
+
+ rss_conf->rss_hf = hf;
+
+ return 0;
+}
+
+static void
+fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
+
+ /* Bind all local non-queue interrupt to vector 0 */
+ int_map |= FM10K_MISC_VEC_ID;
+
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
+
+ /* Enable misc causes */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
+ FM10K_EIMR_ENABLE(THI_FAULT) |
+ FM10K_EIMR_ENABLE(FUM_FAULT) |
+ FM10K_EIMR_ENABLE(MAILBOX) |
+ FM10K_EIMR_ENABLE(SWITCHREADY) |
+ FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
+ FM10K_EIMR_ENABLE(SRAMERROR) |
+ FM10K_EIMR_ENABLE(VFLR));
+
+ /* Enable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_DISABLE;
+
+ int_map |= FM10K_MISC_VEC_ID;
+
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
+
+ /* Disable misc causes */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
+ FM10K_EIMR_DISABLE(THI_FAULT) |
+ FM10K_EIMR_DISABLE(FUM_FAULT) |
+ FM10K_EIMR_DISABLE(MAILBOX) |
+ FM10K_EIMR_DISABLE(SWITCHREADY) |
+ FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
+ FM10K_EIMR_DISABLE(SRAMERROR) |
+ FM10K_EIMR_DISABLE(VFLR));
+
+ /* Disable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
+
+ /* Bind all local non-queue interrupt to vector 0 */
+ int_map |= FM10K_MISC_VEC_ID;
+
+ /* Only INT 0 available, other 15 are reserved. */
+ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
+
+ /* Enable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_DISABLE;
+
+ int_map |= FM10K_MISC_VEC_ID;
+
+ /* Only INT 0 available, other 15 are reserved. */
+ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
+
+ /* Disable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static int
+fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+
+ /* Enable ITR */
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
+ rte_intr_ack(&pdev->intr_handle);
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+
+ /* Disable ITR */
+ if (hw->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_MASK_SET);
+ else
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
+ FM10K_ITR_MASK_SET);
+ return 0;
+}
+
+static int
+fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ uint32_t intr_vector, vec;
+ uint16_t queue_id;
+ int result = 0;
+
+ /* fm10k needs one separate interrupt for mailbox,
+ * so only drivers which support multiple interrupt vectors
+ * e.g. vfio-pci can work for fm10k interrupt mode
+ */
+ if (!rte_intr_cap_multiple(intr_handle) ||
+ dev->data->dev_conf.intr_conf.rxq == 0)
+ return result;
+
+ intr_vector = dev->data->nb_rx_queues;
+
+ /* disable interrupt first */
+ rte_intr_disable(intr_handle);
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_disable_intr_pf(dev);
+ else
+ fm10k_dev_disable_intr_vf(dev);
+
+ if (rte_intr_efd_enable(intr_handle, intr_vector)) {
+ PMD_INIT_LOG(ERR, "Failed to init event fd");
+ result = -EIO;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !result) {
+ intr_handle->intr_vec = rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec) {
+ for (queue_id = 0, vec = FM10K_RX_VEC_START;
+ queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < intr_handle->nb_efd - 1
+ + FM10K_RX_VEC_START)
+ vec++;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ rte_intr_efd_disable(intr_handle);
+ result = -ENOMEM;
+ }
+ }
+
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_enable_intr_pf(dev);
+ else
+ fm10k_dev_enable_intr_vf(dev);
+ rte_intr_enable(intr_handle);
+ hw->mac.ops.update_int_moderator(hw);
+ return result;
+}
+
+static int
+fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
+{
+ struct fm10k_fault fault;
+ int err;
+ const char *estr = "Unknown error";
+
+ /* Process PCA fault */
+ if (eicr & FM10K_EICR_PCA_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case PCA_NO_FAULT:
+ estr = "PCA_NO_FAULT"; break;
+ case PCA_UNMAPPED_ADDR:
+ estr = "PCA_UNMAPPED_ADDR"; break;
+ case PCA_BAD_QACCESS_PF:
+ estr = "PCA_BAD_QACCESS_PF"; break;
+ case PCA_BAD_QACCESS_VF:
+ estr = "PCA_BAD_QACCESS_VF"; break;
+ case PCA_MALICIOUS_REQ:
+ estr = "PCA_MALICIOUS_REQ"; break;
+ case PCA_POISONED_TLP:
+ estr = "PCA_POISONED_TLP"; break;
+ case PCA_TLP_ABORT:
+ estr = "PCA_TLP_ABORT"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ /* Process THI fault */
+ if (eicr & FM10K_EICR_THI_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case THI_NO_FAULT:
+ estr = "THI_NO_FAULT"; break;
+ case THI_MAL_DIS_Q_FAULT:
+ estr = "THI_MAL_DIS_Q_FAULT"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ /* Process FUM fault */
+ if (eicr & FM10K_EICR_FUM_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case FUM_NO_FAULT:
+ estr = "FUM_NO_FAULT"; break;
+ case FUM_UNMAPPED_ADDR:
+ estr = "FUM_UNMAPPED_ADDR"; break;
+ case FUM_POISONED_TLP:
+ estr = "FUM_POISONED_TLP"; break;
+ case FUM_BAD_VF_QACCESS:
+ estr = "FUM_BAD_VF_QACCESS"; break;
+ case FUM_ADD_DECODE_ERR:
+ estr = "FUM_ADD_DECODE_ERR"; break;
+ case FUM_RO_ERROR:
+ estr = "FUM_RO_ERROR"; break;
+ case FUM_QPRC_CRC_ERROR:
+ estr = "FUM_QPRC_CRC_ERROR"; break;
+ case FUM_CSR_TIMEOUT:
+ estr = "FUM_CSR_TIMEOUT"; break;
+ case FUM_INVALID_TYPE:
+ estr = "FUM_INVALID_TYPE"; break;
+ case FUM_INVALID_LENGTH:
+ estr = "FUM_INVALID_LENGTH"; break;
+ case FUM_INVALID_BE:
+ estr = "FUM_INVALID_BE"; break;
+ case FUM_INVALID_ALIGN:
+ estr = "FUM_INVALID_ALIGN"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ return 0;
+error:
+ PMD_INIT_LOG(ERR, "Failed to handle fault event.");
+ return err;
+}
+
+/**
+ * PF interrupt handler triggered by NIC for handling specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+fm10k_dev_interrupt_handler_pf(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t cause, status;
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ int status_mbx;
+ s32 err;
+
+ if (hw->mac.type != fm10k_mac_pf)
+ return;
+
+ cause = FM10K_READ_REG(hw, FM10K_EICR);
+
+ /* Handle PCI fault cases */
+ if (cause & FM10K_EICR_FAULT_MASK) {
+ PMD_INIT_LOG(ERR, "INT: find fault!");
+ fm10k_dev_handle_fault(hw, cause);
+ }
+
+ /* Handle switch up/down */
+ if (cause & FM10K_EICR_SWITCHNOTREADY)
+ PMD_INIT_LOG(ERR, "INT: Switch is not ready");
+
+ if (cause & FM10K_EICR_SWITCHREADY) {
+ PMD_INIT_LOG(INFO, "INT: Switch is ready");
+ if (dev_info->sm_down == 1) {
+ fm10k_mbx_lock(hw);
+
+ /* For recreating logical ports */
+ status_mbx = hw->mac.ops.update_lport_state(hw,
+ hw->mac.dglort_map, MAX_LPORT_NUM, 1);
+ if (status_mbx == FM10K_SUCCESS)
+ PMD_INIT_LOG(INFO,
+ "INT: Recreated Logical port");
+ else
+ PMD_INIT_LOG(INFO,
+ "INT: Logical ports weren't recreated");
+
+ status_mbx = hw->mac.ops.update_xcast_mode(hw,
+ hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
+ if (status_mbx != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
+
+ fm10k_mbx_unlock(hw);
+
+ /* first clear the internal SW recording structure */
+ if (!(dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid,
+ false);
+
+ fm10k_MAC_filter_set(dev, hw->mac.addr, false,
+ MAIN_VSI_POOL_NUMBER);
+
+ /*
+ * Add default mac address and vlan for the logical
+ * ports that have been created, leave to the
+ * application to fully recover Rx filtering.
+ */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ if (!(dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid,
+ true);
+
+ dev_info->sm_down = 0;
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ /* Handle mailbox message */
+ fm10k_mbx_lock(hw);
+ err = hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+
+ if (err == FM10K_ERR_RESET_REQUESTED) {
+ PMD_INIT_LOG(INFO, "INT: Switch is down");
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ /* Handle SRAM error */
+ if (cause & FM10K_EICR_SRAMERROR) {
+ PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
+
+ status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
+ /* Write to clear pending bits */
+ FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
+
+ /* Todo: print out error message after shared code updates */
+ }
+
+ /* Clear these 3 events if having any */
+ cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
+ FM10K_EICR_SWITCHREADY;
+ if (cause)
+ FM10K_WRITE_REG(hw, FM10K_EICR, cause);
+
+ /* Re-enable interrupt from device side */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ /* Re-enable interrupt from host side */
+ rte_intr_ack(dev->intr_handle);
+}
+
+/**
+ * VF interrupt handler triggered by NIC for handling specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+fm10k_dev_interrupt_handler_vf(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ const enum fm10k_mbx_state state = mbx->state;
+ int status_mbx;
+
+ if (hw->mac.type != fm10k_mac_vf)
+ return;
+
+ /* Handle mailbox message if lock is acquired */
+ fm10k_mbx_lock(hw);
+ hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+
+ if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
+ PMD_INIT_LOG(INFO, "INT: Switch has gone down");
+
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, 1);
+ fm10k_mbx_unlock(hw);
+
+ /* Setting reset flag */
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ if (dev_info->sm_down == 1 &&
+ hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
+ PMD_INIT_LOG(INFO, "INT: Switch has gone up");
+ fm10k_mbx_lock(hw);
+ status_mbx = hw->mac.ops.update_xcast_mode(hw,
+ hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
+ if (status_mbx != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
+ fm10k_mbx_unlock(hw);
+
+ /* first clear the internal SW recording structure */
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
+ fm10k_MAC_filter_set(dev, hw->mac.addr, false,
+ MAIN_VSI_POOL_NUMBER);
+
+ /*
+ * Add default mac address and vlan for the logical ports that
+ * have been created, leave to the application to fully recover
+ * Rx filtering.
+ */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+
+ dev_info->sm_down = 0;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ /* Re-enable interrupt from device side */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ /* Re-enable interrupt from host side */
+ rte_intr_ack(dev->intr_handle);
+}
+
+/* Mailbox message handler in VF */
+static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+static int
+fm10k_setup_mbx_service(struct fm10k_hw *hw)
+{
+ int err = 0;
+
+ /* Initialize mailbox lock */
+ fm10k_mbx_initlock(hw);
+
+ /* Replace default message handler with new ones */
+ if (hw->mac.type == fm10k_mac_vf)
+ err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
+
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
+ err);
+ return err;
+ }
+ /* Connect to SM for PF device or PF for VF device */
+ return hw->mbx.ops.connect(hw, &hw->mbx);
+}
+
+static void
+fm10k_close_mbx_service(struct fm10k_hw *hw)
+{
+ /* Disconnect from SM for PF device or PF for VF device */
+ hw->mbx.ops.disconnect(hw, &hw->mbx);
+}
+
+static void
+fm10k_dev_close(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, false);
+ fm10k_mbx_unlock(hw);
+
+ /* allow 100ms for device to quiesce */
+ rte_delay_us(FM10K_SWITCH_QUIESCE_US);
+
+ /* Stop mailbox service first */
+ fm10k_close_mbx_service(hw);
+ fm10k_dev_stop(dev);
+ fm10k_dev_queue_release(dev);
+ fm10k_stop_hw(hw);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* disable uio/vfio intr */
+ rte_intr_disable(intr_handle);
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_pf(dev);
+
+ /* unregister callback func to eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+ } else {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_vf(dev);
+
+ rte_intr_callback_unregister(intr_handle,
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+ }
+}
+
+static const struct eth_dev_ops fm10k_eth_dev_ops = {
+ .dev_configure = fm10k_dev_configure,
+ .dev_start = fm10k_dev_start,
+ .dev_stop = fm10k_dev_stop,
+ .dev_close = fm10k_dev_close,
+ .promiscuous_enable = fm10k_dev_promiscuous_enable,
+ .promiscuous_disable = fm10k_dev_promiscuous_disable,
+ .allmulticast_enable = fm10k_dev_allmulticast_enable,
+ .allmulticast_disable = fm10k_dev_allmulticast_disable,
+ .stats_get = fm10k_stats_get,
+ .xstats_get = fm10k_xstats_get,
+ .xstats_get_names = fm10k_xstats_get_names,
+ .stats_reset = fm10k_stats_reset,
+ .xstats_reset = fm10k_stats_reset,
+ .link_update = fm10k_link_update,
+ .dev_infos_get = fm10k_dev_infos_get,
+ .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
+ .vlan_filter_set = fm10k_vlan_filter_set,
+ .vlan_offload_set = fm10k_vlan_offload_set,
+ .mac_addr_add = fm10k_macaddr_add,
+ .mac_addr_remove = fm10k_macaddr_remove,
+ .rx_queue_start = fm10k_dev_rx_queue_start,
+ .rx_queue_stop = fm10k_dev_rx_queue_stop,
+ .tx_queue_start = fm10k_dev_tx_queue_start,
+ .tx_queue_stop = fm10k_dev_tx_queue_stop,
+ .rx_queue_setup = fm10k_rx_queue_setup,
+ .rx_queue_release = fm10k_rx_queue_release,
+ .tx_queue_setup = fm10k_tx_queue_setup,
+ .tx_queue_release = fm10k_tx_queue_release,
+ .rx_queue_count = fm10k_dev_rx_queue_count,
+ .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
+ .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
+ .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
+ .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
+ .reta_update = fm10k_reta_update,
+ .reta_query = fm10k_reta_query,
+ .rss_hash_update = fm10k_rss_hash_update,
+ .rss_hash_conf_get = fm10k_rss_hash_conf_get,
+};
+
+static int ftag_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+fm10k_check_ftag(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *ftag_key = "enable_ftag";
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, ftag_key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
+ if (rte_kvargs_process(kvlist, ftag_key,
+ ftag_check_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static uint16_t
+fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static void __rte_cold
+fm10k_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct fm10k_tx_queue *txq;
+ int i;
+ int use_sse = 1;
+ uint16_t tx_ftag_en = 0;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* primary process has set the ftag flag and offloads */
+ txq = dev->data->tx_queues[0];
+ if (fm10k_tx_vec_condition_check(txq)) {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ } else {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ }
+ return;
+ }
+
+ if (fm10k_check_ftag(dev->device->devargs))
+ tx_ftag_en = 1;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->tx_ftag_en = tx_ftag_en;
+ /* Check if Vector Tx is satisfied */
+ if (fm10k_tx_vec_condition_check(txq))
+ use_sse = 0;
+ }
+
+ if (use_sse) {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ fm10k_txq_vec_setup(txq);
+ }
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ }
+}
+
+static void __rte_cold
+fm10k_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ uint16_t i, rx_using_sse;
+ uint16_t rx_ftag_en = 0;
+
+ if (fm10k_check_ftag(dev->device->devargs))
+ rx_ftag_en = 1;
+
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met.
+ */
+ if (!fm10k_rx_vec_condition_check(dev) &&
+ dev_info->rx_vec_allowed && !rx_ftag_en) {
+ if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts_vec;
+ } else if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts;
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == fm10k_recv_pkts_vec);
+
+ if (rx_using_sse)
+ PMD_INIT_LOG(DEBUG, "Use vector Rx func");
+ else
+ PMD_INIT_LOG(DEBUG, "Use regular Rx func");
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ rxq->rx_ftag_en = rx_ftag_en;
+ }
+}
+
+static void
+fm10k_params_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_dev_info *info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+
+ /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
+ * there is no way to get link status without reading BAR4. Until this
+ * works, assume we have maximum bandwidth.
+ * @todo - fix bus info
+ */
+ hw->bus_caps.speed = fm10k_bus_speed_8000;
+ hw->bus_caps.width = fm10k_bus_width_pcie_x8;
+ hw->bus_caps.payload = fm10k_bus_payload_512;
+ hw->bus.speed = fm10k_bus_speed_8000;
+ hw->bus.width = fm10k_bus_width_pcie_x8;
+ hw->bus.payload = fm10k_bus_payload_256;
+
+ info->rx_vec_allowed = true;
+ info->sm_down = false;
+}
+
+static int
+eth_fm10k_dev_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ int diag, i;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev->dev_ops = &fm10k_eth_dev_ops;
+ dev->rx_pkt_burst = &fm10k_recv_pkts;
+ dev->tx_pkt_burst = &fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = &fm10k_prep_pkts;
+
+ /*
+ * Primary process does the whole initialization, for secondary
+ * processes, we just select the same Rx and Tx function as primary.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ fm10k_set_rx_function(dev);
+ fm10k_set_tx_function(dev);
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(dev, pdev);
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ memset(macvlan, 0, sizeof(*macvlan));
+ /* Vendor and Device ID need to be set before init of shared code */
+ memset(hw, 0, sizeof(*hw));
+ hw->device_id = pdev->id.device_id;
+ hw->vendor_id = pdev->id.vendor_id;
+ hw->subsystem_device_id = pdev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
+ hw->revision_id = 0;
+ hw->hw_addr = (void *)pdev->mem_resource[0].addr;
+ if (hw->hw_addr == NULL) {
+ PMD_INIT_LOG(ERR, "Bad mem resource."
+ " Try to blacklist unused devices.");
+ return -EIO;
+ }
+
+ /* Store fm10k_adapter pointer */
+ hw->back = dev->data->dev_private;
+
+ /* Initialize the shared code */
+ diag = fm10k_init_shared_code(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Initialize parameters */
+ fm10k_params_init(dev);
+
+ /* Initialize the hw */
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Initialize MAC address(es) */
+ dev->data->mac_addrs = rte_zmalloc("fm10k",
+ RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
+ if (dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
+ return -ENOMEM;
+ }
+
+ diag = fm10k_read_mac_addr(hw);
+
+ rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+
+ if (diag != FM10K_SUCCESS ||
+ !rte_is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
+
+ /* Generate a random addr */
+ rte_eth_random_addr(hw->mac.addr);
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+ }
+
+ /* Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ /* Reset the hw statistics */
+ diag = fm10k_stats_reset(dev);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag);
+ return diag;
+ }
+
+ /* Reset the hw */
+ diag = fm10k_reset_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Setup mailbox service */
+ diag = fm10k_setup_mbx_service(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
+ return -EIO;
+ }
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* register callback func to eal lib */
+ rte_intr_callback_register(intr_handle,
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+
+ /* enable MISC interrupt */
+ fm10k_dev_enable_intr_pf(dev);
+ } else { /* VF */
+ rte_intr_callback_register(intr_handle,
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+
+ fm10k_dev_enable_intr_vf(dev);
+ }
+
+ /* Enable intr after callback registered */
+ rte_intr_enable(intr_handle);
+
+ hw->mac.ops.update_int_moderator(hw);
+
+ /* Make sure Switch Manager is ready before going forward. */
+ if (hw->mac.type == fm10k_mac_pf) {
+ bool switch_ready = false;
+
+ for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.get_host_state(hw, &switch_ready);
+ fm10k_mbx_unlock(hw);
+ if (switch_ready == true)
+ break;
+ /* Delay some time to acquire async LPORT_MAP info. */
+ rte_delay_us(WAIT_SWITCH_MSG_US);
+ }
+
+ if (switch_ready == false) {
+ PMD_INIT_LOG(ERR, "switch is not ready");
+ return -1;
+ }
+ }
+
+ /*
+ * Below function will trigger operations on mailbox, acquire lock to
+ * avoid race condition from interrupt handler. Operations on mailbox
+ * FIFO will trigger interrupt to PF/SM, in which interrupt handler
+ * will handle and generate an interrupt to our side. Then, FIFO in
+ * mailbox will be touched.
+ */
+ fm10k_mbx_lock(hw);
+ /* Enable port first */
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, 1);
+
+ /* Set unicast mode by default. App can change to other mode in other
+ * API func.
+ */
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+
+ fm10k_mbx_unlock(hw);
+
+ /* Make sure default VID is ready before going forward. */
+ if (hw->mac.type == fm10k_mac_pf) {
+ for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
+ if (hw->mac.default_vid)
+ break;
+ /* Delay some time to acquire async port VLAN info. */
+ rte_delay_us(WAIT_SWITCH_MSG_US);
+ }
+
+ if (!hw->mac.default_vid) {
+ PMD_INIT_LOG(ERR, "default VID is not ready");
+ return -1;
+ }
+ }
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ return 0;
+}
+
+static int
+eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* only uninitialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* safe to close dev here */
+ fm10k_dev_close(dev);
+
+ return 0;
+}
+
+static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
+}
+
+static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
+}
+
+/*
+ * The set of PCI devices this driver supports. This driver will enable both PF
+ * and SRIOV-VF devices.
+ */
+static const struct rte_pci_id pci_id_fm10k_map[] = {
+ { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
+ { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
+ { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_pmd_fm10k = {
+ .id_table = pci_id_fm10k_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_fm10k_pci_probe,
+ .remove = eth_fm10k_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
+RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(fm10k_init_log)
+{
+ fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
+ if (fm10k_logtype_init >= 0)
+ rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
+ fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
+ if (fm10k_logtype_driver >= 0)
+ rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ fm10k_logtype_rx = rte_log_register("pmd.net.fm10k.rx");
+ if (fm10k_logtype_rx >= 0)
+ rte_log_set_level(fm10k_logtype_rx, RTE_LOG_DEBUG);
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX
+ fm10k_logtype_tx = rte_log_register("pmd.net.fm10k.tx");
+ if (fm10k_logtype_tx >= 0)
+ rte_log_set_level(fm10k_logtype_tx, RTE_LOG_DEBUG);
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+ fm10k_logtype_tx_free = rte_log_register("pmd.net.fm10k.tx_free");
+ if (fm10k_logtype_tx_free >= 0)
+ rte_log_set_level(fm10k_logtype_tx_free, RTE_LOG_DEBUG);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h b/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h
new file mode 100644
index 000000000..9ae743d80
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
+ */
+
+#ifndef _FM10K_LOGS_H_
+#define _FM10K_LOGS_H_
+
+#include <rte_log.h>
+
+extern int fm10k_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+extern int fm10k_logtype_rx;
+#define PMD_RX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_rx, \
+ "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX
+extern int fm10k_logtype_tx;
+#define PMD_TX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_tx, \
+ "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+extern int fm10k_logtype_tx_free;
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_tx_free, \
+ "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+extern int fm10k_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _FM10K_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c
new file mode 100644
index 000000000..4accaa2cd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c
@@ -0,0 +1,728 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2016 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+#include <rte_net.h>
+#include "fm10k.h"
+#include "base/fm10k_type.h"
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+static inline void dump_rxd(union fm10k_rx_desc *rxd)
+{
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
+ rxd->d.data);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
+ rxd->d.staterr);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| TIME TAG |");
+ PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+}
+#endif
+
+#define FM10K_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
+
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get()
+ */
+static inline void
+rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
+{
+ static const uint32_t
+ ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
+ __rte_cache_aligned = {
+ [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
+ [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
+ [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
+ [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
+ >> FM10K_RXD_PKTTYPE_SHIFT];
+
+ if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
+ m->ol_flags |= PKT_RX_RSS_HASH;
+
+ if (unlikely((d->d.staterr &
+ (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
+ (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
+ m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely((d->d.staterr &
+ (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
+ (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+}
+
+uint16_t
+fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ union fm10k_rx_desc desc;
+ struct fm10k_rx_queue *q = rx_queue;
+ uint16_t count = 0;
+ int alloc = 0;
+ uint16_t next_dd;
+ int ret;
+
+ next_dd = q->next_dd;
+
+ nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
+ for (count = 0; count < nb_pkts; ++count) {
+ if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
+ break;
+ mbuf = q->sw_ring[next_dd];
+ desc = q->hw_ring[next_dd];
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ dump_rxd(&desc);
+#endif
+ rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
+ rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+ mbuf->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ rx_desc_to_ol_flags(mbuf, &desc);
+#endif
+
+ mbuf->hash.rss = desc.d.rss;
+ /**
+ * Packets in fm10k device always carry at least one VLAN tag.
+ * For those packets coming in without VLAN tag,
+ * the port default VLAN tag will be used.
+ * So, always PKT_RX_VLAN flag is set and vlan_tci
+ * is valid for each RX packet's mbuf.
+ */
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = desc.w.vlan;
+ /**
+ * mbuf->vlan_tci_outer is an idle field in fm10k driver,
+ * so it can be selected to store sglort value.
+ */
+ if (q->rx_ftag_en)
+ mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
+
+ rx_pkts[count] = mbuf;
+ if (++next_dd == q->nb_desc) {
+ next_dd = 0;
+ alloc = 1;
+ }
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_prefetch0(q->sw_ring[next_dd]);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((next_dd & 0x3) == 0) {
+ rte_prefetch0(&q->hw_ring[next_dd]);
+ rte_prefetch0(&q->sw_ring[next_dd]);
+ }
+ }
+
+ q->next_dd = next_dd;
+
+ if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+ ret = rte_mempool_get_bulk(q->mp,
+ (void **)&q->sw_ring[q->next_alloc],
+ q->alloc_thresh);
+
+ if (unlikely(ret != 0)) {
+ uint16_t port = q->port_id;
+ PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+ /*
+ * Need to restore next_dd if we cannot allocate new
+ * buffers to replenish the old ones.
+ */
+ q->next_dd = (q->next_dd + q->nb_desc - count) %
+ q->nb_desc;
+ rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+ return 0;
+ }
+
+ for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+ mbuf = q->sw_ring[q->next_alloc];
+
+ /* setup static mbuf fields */
+ fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+ /* write descriptor */
+ desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ q->hw_ring[q->next_alloc] = desc;
+ }
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+ q->next_trigger += q->alloc_thresh;
+ if (q->next_trigger >= q->nb_desc) {
+ q->next_trigger = q->alloc_thresh - 1;
+ q->next_alloc = 0;
+ }
+ }
+
+ return count;
+}
+
+uint16_t
+fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ union fm10k_rx_desc desc;
+ struct fm10k_rx_queue *q = rx_queue;
+ uint16_t count = 0;
+ uint16_t nb_rcv, nb_seg;
+ int alloc = 0;
+ uint16_t next_dd;
+ struct rte_mbuf *first_seg = q->pkt_first_seg;
+ struct rte_mbuf *last_seg = q->pkt_last_seg;
+ int ret;
+
+ next_dd = q->next_dd;
+ nb_rcv = 0;
+
+ nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
+ for (count = 0; count < nb_seg; count++) {
+ if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
+ break;
+ mbuf = q->sw_ring[next_dd];
+ desc = q->hw_ring[next_dd];
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ dump_rxd(&desc);
+#endif
+
+ if (++next_dd == q->nb_desc) {
+ next_dd = 0;
+ alloc = 1;
+ }
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_prefetch0(q->sw_ring[next_dd]);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((next_dd & 0x3) == 0) {
+ rte_prefetch0(&q->hw_ring[next_dd]);
+ rte_prefetch0(&q->sw_ring[next_dd]);
+ }
+
+ /* Fill data length */
+ rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = mbuf;
+ first_seg->pkt_len = desc.w.length;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rte_pktmbuf_data_len(mbuf));
+ first_seg->nb_segs++;
+ last_seg->next = mbuf;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
+ last_seg = mbuf;
+ continue;
+ }
+
+ first_seg->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ rx_desc_to_ol_flags(first_seg, &desc);
+#endif
+ first_seg->hash.rss = desc.d.rss;
+ /**
+ * Packets in fm10k device always carry at least one VLAN tag.
+ * For those packets coming in without VLAN tag,
+ * the port default VLAN tag will be used.
+ * So, always PKT_RX_VLAN flag is set and vlan_tci
+ * is valid for each RX packet's mbuf.
+ */
+ first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ first_seg->vlan_tci = desc.w.vlan;
+ /**
+ * mbuf->vlan_tci_outer is an idle field in fm10k driver,
+ * so it can be selected to store sglort value.
+ */
+ if (q->rx_ftag_en)
+ first_seg->vlan_tci_outer =
+ rte_le_to_cpu_16(desc.w.sglort);
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rcv++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ q->next_dd = next_dd;
+
+ if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+ ret = rte_mempool_get_bulk(q->mp,
+ (void **)&q->sw_ring[q->next_alloc],
+ q->alloc_thresh);
+
+ if (unlikely(ret != 0)) {
+ uint16_t port = q->port_id;
+ PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+ /*
+ * Need to restore next_dd if we cannot allocate new
+ * buffers to replenish the old ones.
+ */
+ q->next_dd = (q->next_dd + q->nb_desc - count) %
+ q->nb_desc;
+ rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+ return 0;
+ }
+
+ for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+ mbuf = q->sw_ring[q->next_alloc];
+
+ /* setup static mbuf fields */
+ fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+ /* write descriptor */
+ desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ q->hw_ring[q->next_alloc] = desc;
+ }
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+ q->next_trigger += q->alloc_thresh;
+ if (q->next_trigger >= q->nb_desc) {
+ q->next_trigger = q->alloc_thresh - 1;
+ q->next_alloc = 0;
+ }
+ }
+
+ q->pkt_first_seg = first_seg;
+ q->pkt_last_seg = last_seg;
+
+ return nb_rcv;
+}
+
+uint32_t
+fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define FM10K_RXQ_SCAN_INTERVAL 4
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &rxq->hw_ring[rxq->next_dd];
+ while ((desc < rxq->nb_desc) &&
+ rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) {
+ /**
+ * Check the DD bit of a rx descriptor of each group of 4 desc,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += FM10K_RXQ_SCAN_INTERVAL;
+ rxdp += FM10K_RXQ_SCAN_INTERVAL;
+ if (rxq->next_dd + desc >= rxq->nb_desc)
+ rxdp = &rxq->hw_ring[rxq->next_dd + desc -
+ rxq->nb_desc];
+ }
+
+ return desc;
+}
+
+int
+fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
+ return 0;
+ }
+
+ desc = rxq->next_dd + offset;
+ if (desc >= rxq->nb_desc)
+ desc -= rxq->nb_desc;
+
+ rxdp = &rxq->hw_ring[desc];
+
+ ret = !!(rxdp->w.status &
+ rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
+
+ return ret;
+}
+
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint16_t nb_hold, trigger_last;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
+ return 0;
+ }
+
+ if (rxq->next_trigger < rxq->alloc_thresh)
+ trigger_last = rxq->next_trigger +
+ rxq->nb_desc - rxq->alloc_thresh;
+ else
+ trigger_last = rxq->next_trigger - rxq->alloc_thresh;
+
+ if (rxq->next_dd < trigger_last)
+ nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last;
+ else
+ nb_hold = rxq->next_dd - trigger_last;
+
+ if (offset >= rxq->nb_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->next_dd + offset;
+ if (desc >= rxq->nb_desc)
+ desc -= rxq->nb_desc;
+
+ rxdp = &rxq->hw_ring[desc];
+
+ ret = !!(rxdp->w.status &
+ rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
+
+ return ret;
+}
+
+int
+fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct fm10k_tx_desc *txdp;
+ struct fm10k_tx_queue *txq = tx_queue;
+ uint16_t desc;
+ uint16_t next_rs = txq->nb_desc;
+ struct fifo rs_tracker = txq->rs_tracker;
+ struct fifo *r = &rs_tracker;
+
+ if (unlikely(offset >= txq->nb_desc))
+ return -EINVAL;
+
+ desc = txq->next_free + offset;
+ /* go to next desc that has the RS bit */
+ desc = (desc / txq->rs_thresh + 1) *
+ txq->rs_thresh - 1;
+
+ if (desc >= txq->nb_desc) {
+ desc -= txq->nb_desc;
+ if (desc >= txq->nb_desc)
+ desc -= txq->nb_desc;
+ }
+
+ r->head = r->list;
+ for ( ; r->head != r->endp; ) {
+ if (*r->head >= desc && *r->head < next_rs)
+ next_rs = *r->head;
+ ++r->head;
+ }
+
+ txdp = &txq->hw_ring[next_rs];
+ if (txdp->flags & FM10K_TXD_FLAG_DONE)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+/*
+ * Free multiple TX mbuf at a time if they are in the same pool
+ *
+ * @txep: software desc ring index that starts to free
+ * @num: number of descs to free
+ *
+ */
+static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
+{
+ struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
+ int i;
+ int nb_free = 0;
+
+ if (unlikely(num == 0))
+ return;
+
+ m = rte_pktmbuf_prefree_seg(txep[0]);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < num; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ txep[i] = NULL;
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < num; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ txep[i] = NULL;
+ }
+ }
+}
+
+static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
+{
+ uint16_t next_rs, count = 0;
+
+ next_rs = fifo_peek(&q->rs_tracker);
+ if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
+ return;
+
+ /* the DONE flag is set on this descriptor so remove the ID
+ * from the RS bit tracker and free the buffers */
+ fifo_remove(&q->rs_tracker);
+
+ /* wrap around? if so, free buffers from last_free up to but NOT
+ * including nb_desc */
+ if (q->last_free > next_rs) {
+ count = q->nb_desc - q->last_free;
+ tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
+ q->last_free = 0;
+ }
+
+ /* adjust free descriptor count before the next loop */
+ q->nb_free += count + (next_rs + 1 - q->last_free);
+
+ /* free buffers from last_free, up to and including next_rs */
+ if (q->last_free <= next_rs) {
+ count = next_rs - q->last_free + 1;
+ tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
+ q->last_free += count;
+ }
+
+ if (q->last_free == q->nb_desc)
+ q->last_free = 0;
+}
+
+static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
+{
+ uint16_t last_id;
+ uint8_t flags, hdrlen;
+
+ /* always set the LAST flag on the last descriptor used to
+ * transmit the packet */
+ flags = FM10K_TXD_FLAG_LAST;
+ last_id = q->next_free + mb->nb_segs - 1;
+ if (last_id >= q->nb_desc)
+ last_id = last_id - q->nb_desc;
+
+ /* but only set the RS flag on the last descriptor if rs_thresh
+ * descriptors will be used since the RS flag was last set */
+ if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
+ flags |= FM10K_TXD_FLAG_RS;
+ fifo_insert(&q->rs_tracker, last_id);
+ q->nb_used = 0;
+ } else {
+ q->nb_used = q->nb_used + mb->nb_segs;
+ }
+
+ q->nb_free -= mb->nb_segs;
+
+ q->hw_ring[q->next_free].flags = 0;
+ if (q->tx_ftag_en)
+ q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
+ /* set checksum flags on first descriptor of packet. SCTP checksum
+ * offload is not supported, but we do not explicitly check for this
+ * case in favor of greatly simplified processing. */
+ if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
+ q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
+
+ /* set vlan if requested */
+ if (mb->ol_flags & PKT_TX_VLAN_PKT)
+ q->hw_ring[q->next_free].vlan = mb->vlan_tci;
+ else
+ q->hw_ring[q->next_free].vlan = 0;
+
+ q->sw_ring[q->next_free] = mb;
+ q->hw_ring[q->next_free].buffer_addr =
+ rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+ q->hw_ring[q->next_free].buflen =
+ rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+
+ if (mb->ol_flags & PKT_TX_TCP_SEG) {
+ hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+ hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ mb->outer_l2_len + mb->outer_l3_len : 0;
+ if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
+ hdrlen += sizeof(struct fm10k_ftag);
+
+ if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
+ (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
+ (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
+ q->hw_ring[q->next_free].mss = mb->tso_segsz;
+ q->hw_ring[q->next_free].hdrlen = hdrlen;
+ }
+ }
+
+ if (++q->next_free == q->nb_desc)
+ q->next_free = 0;
+
+ /* fill up the rings */
+ for (mb = mb->next; mb != NULL; mb = mb->next) {
+ q->sw_ring[q->next_free] = mb;
+ q->hw_ring[q->next_free].buffer_addr =
+ rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+ q->hw_ring[q->next_free].buflen =
+ rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+ q->hw_ring[q->next_free].flags = 0;
+ if (++q->next_free == q->nb_desc)
+ q->next_free = 0;
+ }
+
+ q->hw_ring[last_id].flags |= flags;
+}
+
+uint16_t
+fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *q = tx_queue;
+ struct rte_mbuf *mb;
+ uint16_t count;
+
+ for (count = 0; count < nb_pkts; ++count) {
+ mb = tx_pkts[count];
+
+ /* running low on descriptors? try to free some... */
+ if (q->nb_free < q->free_thresh)
+ tx_free_descriptors(q);
+
+ /* make sure there are enough free descriptors to transmit the
+ * entire packet before doing anything */
+ if (q->nb_free < mb->nb_segs)
+ break;
+
+ /* sanity check to make sure the mbuf is valid */
+ if ((mb->nb_segs == 0) ||
+ ((mb->nb_segs > 1) && (mb->next == NULL)))
+ break;
+
+ /* process the packet */
+ tx_xmit_pkt(q, mb);
+ }
+
+ /* update the tail pointer if any packets were processed */
+ if (likely(count > 0))
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
+
+ return count;
+}
+
+uint16_t
+fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if ((m->ol_flags & PKT_TX_TCP_SEG) &&
+ (m->tso_segsz < FM10K_TSO_MINMSS)) {
+ rte_errno = EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+ }
+
+ return i;
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
new file mode 100644
index 000000000..eff3933b5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -0,0 +1,892 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_common.h>
+#include "fm10k.h"
+#include "base/fm10k_type.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static void
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+
+/* Vlan present flag shift */
+#define VP_SHIFT (2)
+/* L3 type shift */
+#define L3TYPE_SHIFT (4)
+/* L4 type shift */
+#define L4TYPE_SHIFT (7)
+/* HBO flag shift */
+#define HBOFLAG_SHIFT (10)
+/* RXE flag shift */
+#define RXEFLAG_SHIFT (13)
+/* IPE/L4E flag shift */
+#define L3L4EFLAG_SHIFT (14)
+/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
+#define CKSUM_SHIFT (1)
+
+static inline void
+fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0, ptype1, vtag0, vtag1, eflag0, eflag1, cksumflag;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ const __m128i pkttype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+
+ /* mask everything except rss type */
+ const __m128i rsstype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x000F, 0x000F, 0x000F, 0x000F);
+
+ /* mask for HBO and RXE flag flags */
+ const __m128i rxe_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0001, 0x0001, 0x0001, 0x0001);
+
+ /* mask the lower byte of ol_flags */
+ const __m128i ol_flags_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
+ const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
+
+ const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0);
+
+ /* map rss type to rss hash flag */
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+
+ /* Calculate RSS_hash and Vlan fields */
+ ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+ ptype0 = _mm_and_si128(ptype0, rsstype_msk);
+ ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
+
+ vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+ eflag0 = vtag1;
+ cksumflag = vtag1;
+ vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
+ vtag1 = _mm_and_si128(vtag1, pkttype_msk);
+
+ vtag1 = _mm_or_si128(ptype0, vtag1);
+
+ /* Process err flags, simply set RECIP_ERR bit if HBO/IXE is set */
+ eflag1 = _mm_srli_epi16(eflag0, RXEFLAG_SHIFT);
+ eflag0 = _mm_srli_epi16(eflag0, HBOFLAG_SHIFT);
+ eflag0 = _mm_or_si128(eflag0, eflag1);
+ eflag0 = _mm_and_si128(eflag0, rxe_msk);
+ eflag0 = _mm_shuffle_epi8(rxe_flag, eflag0);
+
+ vtag1 = _mm_or_si128(eflag0, vtag1);
+
+ /* Process L4/L3 checksum error flags */
+ cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT);
+ cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag);
+
+ /* clean the higher byte and shift back the flag bits */
+ cksumflag = _mm_and_si128(cksumflag, ol_flags_msk);
+ cksumflag = _mm_slli_epi16(cksumflag, CKSUM_SHIFT);
+ vtag1 = _mm_or_si128(cksumflag, vtag1);
+
+ vol.dword = _mm_cvtsi128_si64(vtag1);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+
+/* @note: When this function is changed, make corresponding change to
+ * fm10k_dev_supported_ptypes_get().
+ */
+static inline void
+fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i l3l4type0, l3l4type1, l3type, l4type;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* L3 pkt type mask Bit4 to Bit6 */
+ const __m128i l3type_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0070, 0x0070, 0x0070, 0x0070);
+
+ /* L4 pkt type mask Bit7 to Bit9 */
+ const __m128i l4type_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0380, 0x0380, 0x0380, 0x0380);
+
+ /* convert RRC l3 type to mbuf format */
+ const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4, 0);
+
+ /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits
+ * to fill into8 bits length.
+ */
+ const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
+ RTE_PTYPE_TUNNEL_GENEVE >> 8,
+ RTE_PTYPE_TUNNEL_NVGRE >> 8,
+ RTE_PTYPE_TUNNEL_VXLAN >> 8,
+ RTE_PTYPE_TUNNEL_GRE >> 8,
+ RTE_PTYPE_L4_UDP >> 8,
+ RTE_PTYPE_L4_TCP >> 8,
+ 0);
+
+ l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1);
+
+ l3type = _mm_and_si128(l3l4type0, l3type_msk);
+ l4type = _mm_and_si128(l3l4type0, l4type_msk);
+
+ l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT);
+ l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT);
+
+ l3type = _mm_shuffle_epi8(l3type_flags, l3type);
+ /* l4type_flags shift-left for 8 bits, need shift-right back */
+ l4type = _mm_shuffle_epi8(l4type_flags, l4type);
+
+ l4type = _mm_slli_epi16(l4type, 8);
+ l3l4type0 = _mm_or_si128(l3type, l4type);
+ vol.dword = _mm_cvtsi128_si64(l3l4type0);
+
+ rx_pkts[0]->packet_type = vol.e[0];
+ rx_pkts[1]->packet_type = vol.e[1];
+ rx_pkts[2]->packet_type = vol.e[2];
+ rx_pkts[3]->packet_type = vol.e[3];
+}
+#else
+#define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
+#endif
+
+int __rte_cold
+fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* no header split support */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+
+int __rte_cold
+fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ /* data_off will be ajusted after new mbuf allocated for 512-byte
+ * alignment.
+ */
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline void
+fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union fm10k_rx_desc *rxdp;
+ struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i head_off = _mm_set_epi64x(
+ RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1,
+ RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1);
+ __m128i dma_addr0, dma_addr1;
+ /* Rx buffer need to be aligned with 512 byte */
+ const __m128i hba_msk = _mm_set_epi64x(0,
+ UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1);
+
+ rxdp = rxq->hw_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)mb_alloc,
+ RTE_FM10K_RXQ_REARM_THRESH) < 0) {
+ dma_addr0 = _mm_setzero_si128();
+ /* Clean up all the HW/SW ring content */
+ for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) {
+ mb_alloc[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].q,
+ dma_addr0);
+ }
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_FM10K_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = mb_alloc[0];
+ mb1 = mb_alloc[1];
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, head_off);
+ dma_addr1 = _mm_add_epi64(dma_addr1, head_off);
+
+ /* Do 512 byte alignment to satisfy HW requirement, in the
+ * meanwhile, set Header Buffer Address to zero.
+ */
+ dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
+ dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1);
+
+ /* enforce 512B alignment on default Rx virtual addresses */
+ mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr
+ + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb0->buf_addr);
+ mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr
+ + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb1->buf_addr);
+ }
+
+ rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
+}
+
+void __rte_cold
+fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_desc; i++)
+ if (rxq->sw_ring[i] != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ } else {
+ for (i = rxq->next_dd; i != rxq->rxrearm_start;
+ i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ rxq->rxrearm_nb = rxq->nb_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc);
+}
+
+static inline uint16_t
+fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct rte_mbuf **mbufp;
+ uint16_t nb_pkts_recd;
+ int pos;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint64_t var;
+ __m128i shuf_msk;
+ __m128i dd_check, eop_check;
+ uint16_t next_dd;
+
+ next_dd = rxq->next_dd;
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->hw_ring + next_dd;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
+ fm10k_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
+ return 0;
+
+ /* Vecotr RX will process 4 packets at a time, strip the unaligned
+ * tails in case it's not multiple of 4.
+ */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 15, 14, /* octet 14~15, low 16 bits vlan_macip */
+ 13, 12, /* octet 12~13, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 13, 12, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */
+ 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
+ );
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ mbufp = &rxq->sw_ring[next_dd];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_FM10K_DESCS_PER_LOOP,
+ rxdp += RTE_FM10K_DESCS_PER_LOOP) {
+ __m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf poitns */
+ mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
+
+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
+
+ /* set ol_flags with vlan packet type */
+ fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_FM10K_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
+ rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* vPMD receive routine
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
+ struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ start->packet_type = end->packet_type;
+#endif
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
+ * numbers of DD bit
+ */
+uint16_t
+fm10k_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
+ unsigned i = 0;
+
+ /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST);
+ /* get some new buffers */
+ uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static const struct fm10k_txq_ops vec_txq_ops = {
+ .reset = fm10k_reset_tx_queue,
+};
+
+void __rte_cold
+fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
+{
+ txq->ops = &vec_txq_ops;
+}
+
+int __rte_cold
+fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
+{
+ /* Vector TX can't offload any features yet */
+ if (txq->offloads != 0)
+ return -1;
+
+ if (txq->tx_ftag_en)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+vtx1(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ __m128i descriptor = _mm_set_epi64x(flags << 56 |
+ (uint64_t)pkt->vlan_tci << 16 | (uint64_t)pkt->data_len,
+ MBUF_DMA_ADDR(pkt));
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static __rte_always_inline int
+fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
+{
+ struct rte_mbuf **txep;
+ uint8_t flags;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ flags = txq->hw_ring[txq->next_dd].flags;
+ if (!(flags & FM10K_TXD_FLAG_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* First buffer to free from S/W ring is at index
+ * next_dd - (rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0]);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i]);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct rte_mbuf **txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i] = tx_pkts[i];
+}
+
+uint16_t
+fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+ volatile struct fm10k_tx_desc *txdp;
+ struct rte_mbuf **txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = FM10K_TXD_FLAG_LAST;
+ uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ fm10k_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->next_free;
+ txdp = &txq->hw_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &(txq->hw_ring[tx_id]);
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS;
+ txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->next_free = tx_id;
+
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free);
+
+ return nb_pkts;
+}
+
+static void __rte_cold
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
+{
+ static const struct fm10k_tx_desc zeroed_desc = {0};
+ struct rte_mbuf **txe = txq->sw_ring;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_desc; i++)
+ txq->hw_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_desc; i++)
+ txe[i] = NULL;
+
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ txq->next_free = 0;
+ txq->nb_used = 0;
+ /* Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_free = (uint16_t)(txq->nb_desc - 1);
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);
+}
diff --git a/src/spdk/dpdk/drivers/net/fm10k/meson.build b/src/spdk/dpdk/drivers/net/fm10k/meson.build
new file mode 100644
index 000000000..2772ea4df
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'fm10k_ethdev.c',
+ 'fm10k_rxtx.c',
+)
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_LIBRTE_FM10K_INC_VECTOR', 1)
+ sources += files('fm10k_rxtx_vec.c')
+endif
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map b/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};