summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/drivers/net/qede
diff options
context:
space:
mode:
Diffstat (limited to 'src/seastar/dpdk/drivers/net/qede')
-rw-r--r--src/seastar/dpdk/drivers/net/qede/LICENSE.qede_pmd28
-rw-r--r--src/seastar/dpdk/drivers/net/qede/Makefile109
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/bcm_osal.c221
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/bcm_osal.h427
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/common_hsi.h1638
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore.h868
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_attn_values.h13287
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_chain.h765
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.c2253
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.h201
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_cxt_api.h40
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.c1314
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.h57
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx_api.h199
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_dev.c4874
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_dev_api.h584
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h52
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_values.h33
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_common.h2311
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h1114
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_eth.h2400
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h132
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h454
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hw.c933
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hw.h296
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_hw_defs.h60
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c1883
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h471
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.c596
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.h111
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_int.c2260
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_int.h211
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_int_api.h327
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_iov_api.h717
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_iro.h197
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_iro_values.h113
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_l2.c2104
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_l2.h159
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_l2_api.h439
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.c3208
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.h485
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_mcp_api.h1126
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_mng_tlv.c1535
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_proto_if.h103
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_rt_defs.h452
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_sp_api.h64
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.c568
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.h146
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_spq.c998
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_spq.h282
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.c4515
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.h317
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_status.h31
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_utils.h37
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_vf.c1705
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_vf.h290
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_vf_api.h167
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/ecore_vfpf_if.h619
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/eth_common.h679
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/mcp_public.h1741
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/nvm_cfg.h1937
-rw-r--r--src/seastar/dpdk/drivers/net/qede/base/reg_addr.h1202
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_eth_if.c318
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_eth_if.h132
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_ethdev.c2470
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_ethdev.h251
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_fdir.c469
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_if.h213
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_logs.h82
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_main.c739
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_rxtx.c1952
-rw-r--r--src/seastar/dpdk/drivers/net/qede/qede_rxtx.h270
-rw-r--r--src/seastar/dpdk/drivers/net/qede/rte_pmd_qede_version.map4
73 files changed, 73345 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/net/qede/LICENSE.qede_pmd b/src/seastar/dpdk/drivers/net/qede/LICENSE.qede_pmd
new file mode 100644
index 00000000..c7cbdccc
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/LICENSE.qede_pmd
@@ -0,0 +1,28 @@
+/*
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of QLogic Corporation nor the name of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written consent.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/src/seastar/dpdk/drivers/net/qede/Makefile b/src/seastar/dpdk/drivers/net/qede/Makefile
new file mode 100644
index 00000000..3323914c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/Makefile
@@ -0,0 +1,109 @@
+# Copyright (c) 2016 QLogic Corporation.
+# All rights reserved.
+# www.qlogic.com
+#
+# See LICENSE.qede_pmd for copyright and licensing details.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_qede.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_qede_version.map
+
+LIBABIVER := 1
+
+#
+# OS
+#
+OS_TYPE := $(shell uname -s)
+
+#
+# CFLAGS
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+CFLAGS_BASE_DRIVER += -Wno-cast-qual
+CFLAGS_BASE_DRIVER += -Wno-unused-function
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+
+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+ifeq ($(OS_TYPE),Linux)
+ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+endif
+endif
+endif
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+CFLAGS_BASE_DRIVER += -Wno-missing-declarations
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized
+endif
+CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
+ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER += -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-visibility
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding
+CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
+ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
+endif
+else
+CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
+endif
+
+#
+# Add extra flags for base ecore driver files
+# to disable warnings in them
+#
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_l2.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dcbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/net/qede/base/bcm_osal.c b/src/seastar/dpdk/drivers/net/qede/base/bcm_osal.c
new file mode 100644
index 00000000..3f895cd4
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/bcm_osal.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <rte_memzone.h>
+#include <rte_errno.h>
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_iov_api.h"
+#include "ecore_mcp_api.h"
+#include "ecore_l2_api.h"
+
+
+unsigned long qede_log2_align(unsigned long n)
+{
+ unsigned long ret = n ? 1 : 0;
+ unsigned long _n = n >> 1;
+
+ while (_n) {
+ _n >>= 1;
+ ret <<= 1;
+ }
+
+ if (ret < n)
+ ret <<= 1;
+
+ return ret;
+}
+
+u32 qede_osal_log2(u32 val)
+{
+ u32 log = 0;
+
+ while (val >>= 1)
+ log++;
+
+ return log;
+}
+
+inline void qede_set_bit(u32 nr, unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+inline void qede_clr_bit(u32 nr, unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+inline bool qede_test_bit(u32 nr, unsigned long *addr)
+{
+ bool res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline u32 qede_ffb(unsigned long word)
+{
+ unsigned long first_bit;
+
+ first_bit = __builtin_ffsl(word);
+ return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
+{
+ u32 i;
+ u32 nwords = 0;
+ OSAL_BUILD_BUG_ON(!limit);
+ nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+ for (i = 0; i < nwords; i++)
+ if (addr[i] != 0)
+ break;
+
+ return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
+}
+
+static inline u32 qede_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
+{
+ u32 i;
+ u32 nwords = 0;
+ OSAL_BUILD_BUG_ON(!limit);
+ nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+ for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
+ return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
+}
+
+void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
+ __rte_unused struct vf_pf_resc_request *resc_req,
+ struct ecore_vf_acquire_sw_info *vf_sw_info)
+{
+ vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
+ vf_sw_info->override_fw_version = 1;
+}
+
+void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
+ dma_addr_t *phys, size_t size)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = 0;
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(p_dev, "Unable to allocate DMA memory "
+ "of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return OSAL_NULL;
+ }
+ *phys = mz->phys_addr;
+ DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
+ "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
+ mz->len, mz->phys_addr, mz->addr, socket_id);
+ return mz->addr;
+}
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
+ dma_addr_t *phys, size_t size, int align)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = 0;
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
+ if (!mz) {
+ DP_ERR(p_dev, "Unable to allocate DMA memory "
+ "of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return OSAL_NULL;
+ }
+ *phys = mz->phys_addr;
+ DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
+ "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
+ mz->len, mz->phys_addr, mz->addr, core_id);
+ return mz->addr;
+}
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_ERR(p_hwfn,
+ "zlib init failed, rc = %d\n", rc);
+ return 0;
+ }
+
+ rc = inflate(p_hwfn->stream, Z_FINISH);
+ inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_ERR(p_hwfn,
+ "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
+ rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+#endif
+
+void
+qede_get_mcp_proto_stats(struct ecore_dev *edev,
+ enum ecore_mcp_protocol_type type,
+ union ecore_mcp_protocol_stats *stats)
+{
+ struct ecore_eth_stats lan_stats;
+
+ if (type == ECORE_MCP_LAN_STATS) {
+ ecore_get_vport_stats(edev, &lan_stats);
+ stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ } else {
+ DP_INFO(edev, "Statistics request type %d not supported\n",
+ type);
+ }
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/bcm_osal.h b/src/seastar/dpdk/drivers/net/qede/base/bcm_osal.h
new file mode 100644
index 00000000..32c9b251
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/bcm_osal.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __BCM_OSAL_H
+#define __BCM_OSAL_H
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_memcpy.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_io.h>
+
+/* Forward declaration */
+struct ecore_dev;
+struct ecore_hwfn;
+struct ecore_vf_acquire_sw_info;
+struct vf_pf_resc_request;
+enum ecore_mcp_protocol_type;
+union ecore_mcp_protocol_stats;
+
+void qed_link_update(struct ecore_hwfn *hwfn);
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#undef __BIG_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN
+#endif
+#else
+#undef __LITTLE_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN
+#endif
+#endif
+
+#define OSAL_WARN(arg1, arg2, arg3, ...) (0)
+
+/* Memory Types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int16_t s16;
+typedef int32_t s32;
+
+typedef u16 __le16;
+typedef u32 __le32;
+typedef u32 OSAL_BE32;
+
+#define osal_uintptr_t uintptr_t
+
+typedef phys_addr_t dma_addr_t;
+
+typedef rte_spinlock_t osal_spinlock_t;
+
+typedef void *osal_dpc_t;
+
+typedef size_t osal_size_t;
+
+typedef intptr_t osal_int_ptr_t;
+
+typedef int bool;
+#define true 1
+#define false 0
+
+#define nothing do {} while (0)
+
+/* Delays */
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000 * (x))
+#define OSAL_UDELAY(time) usec_delay(time)
+#define OSAL_MSLEEP(time) msec_delay(time)
+
+/* Memory allocations and deallocations */
+
+#define OSAL_NULL ((void *)0)
+#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
+#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
+#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
+#define OSAL_VZALLOC(dev, size) rte_zmalloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) \
+ do { \
+ rte_free((void *)memory); \
+ memory = OSAL_NULL; \
+ } while (0)
+#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
+#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
+#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
+#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
+#define OSAL_MEMSET(dst, val, length) \
+ memset(dst, val, length)
+
+void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
+ size_t, int);
+
+#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
+ osal_dma_alloc_coherent(dev, phys, size)
+
+#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
+ osal_dma_alloc_coherent_aligned(dev, phys, size, align)
+
+/* TODO: */
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+
+/* HW reads/writes */
+
+#define DIRECT_REG_RD(_dev, _reg_addr) rte_read32(_reg_addr)
+
+#define REG_RD(_p_hwfn, _reg_offset) \
+ DIRECT_REG_RD(_p_hwfn, \
+ ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
+
+#define DIRECT_REG_WR16(_reg_addr, _val) rte_write16((_val), (_reg_addr))
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) rte_write32((_val), (_reg_addr))
+
+#define DIRECT_REG_WR_RELAXED(_dev, _reg_addr, _val) \
+ rte_write32_relaxed((_val), (_reg_addr))
+
+#define REG_WR(_p_hwfn, _reg_offset, _val) \
+ DIRECT_REG_WR(NULL, \
+ ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val)
+
+#define REG_WR16(_p_hwfn, _reg_offset, _val) \
+ DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
+ (_reg_offset)), (u16)_val)
+
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+ DIRECT_REG_WR_RELAXED((_p_hwfn), \
+ ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
+ (_db_addr)), (u32)_val)
+
+/* Mutexes */
+
+typedef pthread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
+#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
+#define OSAL_MUTEX_DEALLOC(lock) nothing
+
+/* Spinlocks */
+
+#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
+#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
+#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
+#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
+
+/* DPC */
+
+#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
+#define OSAL_DPC_INIT(dpc, hwfn) nothing
+#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_SYNC(hwfn) nothing
+
+/* Lists */
+
+#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing
+#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing
+
+typedef struct _osal_list_entry_t {
+ struct _osal_list_entry_t *next, *prev;
+} osal_list_entry_t;
+
+typedef struct osal_list_t {
+ osal_list_entry_t *head, *tail;
+ unsigned long cnt;
+} osal_list_t;
+
+#define OSAL_LIST_INIT(list) \
+ do { \
+ (list)->head = NULL; \
+ (list)->tail = NULL; \
+ (list)->cnt = 0; \
+ } while (0)
+
+#define OSAL_LIST_PUSH_HEAD(entry, list) \
+ do { \
+ (entry)->prev = (osal_list_entry_t *)0; \
+ (entry)->next = (list)->head; \
+ if ((list)->tail == (osal_list_entry_t *)0) { \
+ (list)->tail = (entry); \
+ } else { \
+ (list)->head->prev = (entry); \
+ } \
+ (list)->head = (entry); \
+ (list)->cnt++; \
+ } while (0)
+
+#define OSAL_LIST_PUSH_TAIL(entry, list) \
+ do { \
+ (entry)->next = (osal_list_entry_t *)0; \
+ (entry)->prev = (list)->tail; \
+ if ((list)->tail) { \
+ (list)->tail->next = (entry); \
+ } else { \
+ (list)->head = (entry); \
+ } \
+ (list)->tail = (entry); \
+ (list)->cnt++; \
+ } while (0)
+
+#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
+ (type *)((list)->head)
+
+#define OSAL_LIST_REMOVE_ENTRY(entry, list) \
+ do { \
+ if ((list)->head == (entry)) { \
+ if ((list)->head) { \
+ (list)->head = (list)->head->next; \
+ if ((list)->head) { \
+ (list)->head->prev = (osal_list_entry_t *)0;\
+ } else { \
+ (list)->tail = (osal_list_entry_t *)0; \
+ } \
+ (list)->cnt--; \
+ } \
+ } else if ((list)->tail == (entry)) { \
+ if ((list)->tail) { \
+ (list)->tail = (list)->tail->prev; \
+ if ((list)->tail) { \
+ (list)->tail->next = (osal_list_entry_t *)0;\
+ } else { \
+ (list)->head = (osal_list_entry_t *)0; \
+ } \
+ (list)->cnt--; \
+ } \
+ } else { \
+ (entry)->prev->next = (entry)->next; \
+ (entry)->next->prev = (entry)->prev; \
+ (list)->cnt--; \
+ } \
+ } while (0)
+
+#define OSAL_LIST_IS_EMPTY(list) \
+ ((list)->cnt == 0)
+
+#define OSAL_LIST_NEXT(entry, field, type) \
+ (type *)((&((entry)->field))->next)
+
+/* TODO: Check field, type order */
+
+#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
+ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
+ entry; \
+ entry = OSAL_LIST_NEXT(entry, field, type))
+
+#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
+ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
+ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \
+ entry != NULL; \
+ entry = (type *)tmp_entry, \
+ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
+
+/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */
+#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \
+ OSAL_LIST_PUSH_HEAD(new_entry, list)
+
+/* PCI config space */
+
+#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing
+#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing
+#define OSAL_BAR_SIZE(dev, bar_id) 0
+
+/* Barriers */
+
+#define OSAL_MMIOWB(dev) rte_wmb()
+#define OSAL_BARRIER(dev) rte_compiler_barrier()
+#define OSAL_SMP_RMB(dev) rte_rmb()
+#define OSAL_SMP_WMB(dev) rte_wmb()
+#define OSAL_RMB(dev) rte_rmb()
+#define OSAL_WMB(dev) rte_wmb()
+#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
+
+#define OSAL_BIT(nr) (1UL << (nr))
+#define OSAL_BITS_PER_BYTE (8)
+#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
+#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
+
+/* Bitops */
+void qede_set_bit(u32, unsigned long *);
+#define OSAL_SET_BIT(bit, bitmap) \
+ qede_set_bit(bit, bitmap)
+
+void qede_clr_bit(u32, unsigned long *);
+#define OSAL_CLEAR_BIT(bit, bitmap) \
+ qede_clr_bit(bit, bitmap)
+
+bool qede_test_bit(u32, unsigned long *);
+#define OSAL_TEST_BIT(bit, bitmap) \
+ qede_test_bit(bit, bitmap)
+
+u32 qede_find_first_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_BIT(bitmap, length) \
+ qede_find_first_bit(bitmap, length)
+
+u32 qede_find_first_zero_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
+ qede_find_first_zero_bit(bitmap, length)
+
+#define OSAL_BUILD_BUG_ON(cond) nothing
+#define ETH_ALEN ETHER_ADDR_LEN
+
+#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
+
+#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
+
+/* SR-IOV channel */
+
+#define OSAL_VF_FLR_UPDATE(hwfn) nothing
+#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
+#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
+#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
+#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
+#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
+#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
+#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
+#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
+#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
+#define OSAL_IOV_GET_OS_TYPE() 0
+#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) 0
+#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) 0
+
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf);
+void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
+ struct ecore_vf_acquire_sw_info *);
+#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
+ qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
+
+#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
+ qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)
+
+/* TODO: */
+#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+
+#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
+#define OSAL_NUM_ACTIVE_CPU() 0
+
+/* Utility functions */
+
+#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what)
+#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what))
+
+unsigned long qede_log2_align(unsigned long n);
+#define OSAL_ROUNDUP_POW_OF_TWO(val) \
+ qede_log2_align(val)
+
+u32 qede_osal_log2(u32);
+#define OSAL_LOG2(val) \
+ qede_osal_log2(val)
+
+#define PRINT(format, ...) printf
+#define PRINT_ERR(format, ...) PRINT
+
+#define OFFSETOF(str, field) __builtin_offsetof(str, field)
+#define OSAL_ASSERT(is_assert) assert(is_assert)
+#define OSAL_BEFORE_PF_START(file, engine) nothing
+#define OSAL_AFTER_PF_STOP(file, engine) nothing
+
+/* Endian macros */
+#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
+#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
+#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
+#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
+#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
+#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
+#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
+
+#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
+#define OSAL_SPRINTF(name, pattern, ...) \
+ sprintf(name, pattern, ##__VA_ARGS__)
+#define OSAL_SNPRINTF(buf, size, format, ...) \
+ snprintf(buf, size, format, ##__VA_ARGS__)
+#define OSAL_STRLEN(string) strlen(string)
+#define OSAL_STRCPY(dst, string) strcpy(dst, string)
+#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
+#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+#define OSAL_STRTOUL(str, base, res) 0
+
+#define OSAL_INLINE inline
+#define OSAL_REG_ADDR(_p_hwfn, _offset) \
+ (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
+#define OSAL_PAGE_SIZE 4096
+#define OSAL_IOMEM volatile
+#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#define OSAL_MIN_T(type, __min1, __min2) \
+ ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
+#define OSAL_MAX_T(type, __max1, __max2) \
+ ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2))
+
+void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
+ union ecore_mcp_protocol_stats *);
+#define OSAL_GET_PROTOCOL_STATS(dev, type, stats) \
+ qede_get_mcp_proto_stats(dev, type, stats)
+
+#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
+#endif /* __BCM_OSAL_H */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/common_hsi.h b/src/seastar/dpdk/drivers/net/qede/base/common_hsi.h
new file mode 100644
index 00000000..cbcde227
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/common_hsi.h
@@ -0,0 +1,1638 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
+
+/* Temporarily here should be added to HSI automatically by resource allocation
+ * tool.
+ */
+#define T_TEST_AGG_INT_TEMP 6
+#define M_TEST_AGG_INT_TEMP 8
+#define U_TEST_AGG_INT_TEMP 6
+#define X_TEST_AGG_INT_TEMP 14
+#define Y_TEST_AGG_INT_TEMP 4
+#define P_TEST_AGG_INT_TEMP 4
+
+#define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
+
+#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
+
+#define MAX_PINNED_CCFC 32
+
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
+
+/* Queue Zone sizes in bytes */
+#define TSTORM_QZONE_SIZE 8 /*tstorm_scsi_queue_zone*/
+#define MSTORM_QZONE_SIZE 16 /*mstorm_eth_queue_zone. Used only for RX
+ *producer of VFs in backward compatibility
+ *mode.
+ */
+#define USTORM_QZONE_SIZE 8 /*ustorm_eth_queue_zone*/
+#define XSTORM_QZONE_SIZE 8 /*xstorm_eth_queue_zone*/
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+/*Log of mstorm default VF zone size.*/
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+/*Maximum number of RX queues that can be allocated to VF by default*/
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
+/*Maximum number of RX queues that can be allocated to VF with doubled VF zone
+ * size. Up to 96 VF supported in this mode
+ */
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
+/*Maximum number of RX queues that can be allocated to VF with 4 VF zone size.
+ * Up to 48 VF supported in this mode
+ */
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON 8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+/*
+ * Usually LL2 queues are opened in pairs TX-RX.
+ * There is a hard restriction on number of RX queues (limited by Tstorm RAM)
+ * and TX counters (Pstorm RAM).
+ * Number of TX queues is almost unlimited.
+ * The constants are different so as to allow asymmetric LL2 connections
+ */
+
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
+
+
+/****************************************************************************/
+/* Include firmware version number only- do not add constants here to avoid */
+/* redundunt compilations */
+/****************************************************************************/
+
+
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 18
+#define FW_REVISION_VERSION 9
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS_K2 (192)
+#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define COMMON_MAX_NUM_VFS (240)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
+
+/* in both BB and K2, the VF number starts from 16. so for arrays containing all
+ * possible PFs and VFs - we need a constant for this size
+ */
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_BB (256)
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+/* 4-Port K2. */
+#define NUM_PHYS_TCS_4PORT_K2 (4)
+#define NUM_OF_PHYS_TCS (8)
+
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO (8)
+
+#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define E4_NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
+
+/* Clock values */
+#define MASTER_CLK_FREQ_E4 (375e6)
+#define STORM_CLK_FREQ_E4 (1000e6)
+#define CLK25M_CLK_FREQ_E4 (25e6)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
+
+/*****************/
+/* DQ CONSTANTS */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY 0
+#define DQ_DEMS_TOE_MORE_TO_SEND 3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
+#define DQ_DEMS_ROCE_CQ_CONS 7
+
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection (FW) */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+
+/* UCM agg val selection (HW) */
+#define DQ_UCM_AGG_VAL_SEL_WORD0 0
+#define DQ_UCM_AGG_VAL_SEL_WORD1 1
+#define DQ_UCM_AGG_VAL_SEL_WORD2 2
+#define DQ_UCM_AGG_VAL_SEL_WORD3 3
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
+
+/* UCM agg val selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
+#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
+#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
+#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
+
+/* TCM agg val selection (HW) */
+#define DQ_TCM_AGG_VAL_SEL_WORD0 0
+#define DQ_TCM_AGG_VAL_SEL_WORD1 1
+#define DQ_TCM_AGG_VAL_SEL_WORD2 2
+#define DQ_TCM_AGG_VAL_SEL_WORD3 3
+#define DQ_TCM_AGG_VAL_SEL_REG1 4
+#define DQ_TCM_AGG_VAL_SEL_REG2 5
+#define DQ_TCM_AGG_VAL_SEL_REG6 6
+#define DQ_TCM_AGG_VAL_SEL_REG9 7
+
+/* TCM agg val selection (FW) */
+#define DQ_TCM_L2B_BD_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD1
+#define DQ_TCM_ROCE_RQ_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD0
+
+/* XCM agg counter flag selection (HW) */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+
+/* UCM agg counter flag selection (HW) */
+#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
+#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
+#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
+#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
+#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
+#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
+
+/* UCM agg counter flag selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
+
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM16_BASE 0x40
+#define DQ_PWM_OFFSET_XCM32_BASE 0x44
+#define DQ_PWM_OFFSET_UCM16_BASE 0x48
+#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
+#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_TCM16_BASE 0x58
+#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
+
+#define DQ_REGION_SHIFT (12)
+
+/* DPM */
+#define DQ_DPM_WQE_BUFF_SIZE (320)
+
+/* Conn type ranges */
+#define DQ_CONN_TYPE_RANGE_SHIFT (4)
+
+/*****************/
+/* QM CONSTANTS */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2 512
+#define MAX_QM_TX_QUEUES_BB 448
+#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* the size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE 0x200
+
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS 256
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH 16
+#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH 24
+#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH 32
+#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH 32
+#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX 0
+#define CAU_FSM_ETH_TX 1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB 12
+
+/* fsm is stopped or not valid for this sb */
+#define CAU_HC_STOPPED_STATE 3
+/* fsm is working without interrupt coalescing for this sb*/
+#define CAU_HC_DISABLE_STATE 4
+/* fsm is working with interrupt coalescing for this sb*/
+#define CAU_HC_ENABLE_STATE 0
+
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_TOT_SB_PER_PATH \
+ MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE 0x0600
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* Bars for Blocks */
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_MSDM 0
+#define PXP_BAR_YSDM 0
+#define PXP_BAR_PSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
+/* PTT and GTT */
+#define PXP_PER_PF_ENTRY_SIZE 8
+#define PXP_NUM_GLOBAL_WINDOWS 243
+#define PXP_GLOBAL_ENTRY_SIZE 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_PF_WINDOW_ADMIN_START 0
+#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
+#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
+ PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
+ PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+ (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+ PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
+#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+
+#define PXP_NUM_PF_WINDOWS 12
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+ PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+/* PF BAR */
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC \
+ (PXP_BAR0_START_GRC + PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU \
+ (PXP_BAR0_START_IGU + PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
+#define PXP_BAR0_END_TSDM \
+ (PXP_BAR0_START_TSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM \
+ (PXP_BAR0_START_MSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM \
+ (PXP_BAR0_START_USDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM \
+ (PXP_BAR0_START_XSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM \
+ (PXP_BAR0_START_YSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM \
+ (PXP_BAR0_START_PSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS \
+ (PXP_BAR0_END_PSDM + 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+
+
+
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM 2
+/*****************/
+/* SDMs CONSTANTS */
+/*****************/
+
+
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
+
+/***********************************************************/
+/* Completion types */
+/***********************************************************/
+
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+/* Send direct message to local CM and/or remote CMs. Destinations are defined
+ * by vector in CompParams.
+ */
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+/* Send direct message to PXP (like "internal write" command) to write to remote
+ * Storm RAM via remote SDM
+ */
+#define SDM_COMP_TYPE_PXP 5
+/* Indicate error per thread */
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+/* Write to local RAM as a completion */
+#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /* Applicable only for E4 */
+
+
+/******************/
+/* PBF CONSTANTS */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+#define PRS_GFT_CAM_LINES_NO_MATCH 31
+/* Async data KCQ CQE */
+struct async_data {
+ /* Context ID of the connection */
+ __le32 cid;
+ /* Task Id of the task (for error that happened on a a task) */
+ __le16 itid;
+ /* error code - relevant only if the opcode indicates its an error */
+ u8 error_code;
+ /* internal fw debug parameter */
+ u8 fw_debug_param;
+};
+
+/*
+ * Interrupt coalescing TimeSet
+ */
+struct coalescing_timeset {
+ u8 value;
+/* Interrupt coalescing TimeSet (timeout_ticks = TimeSet shl (TimerRes+1)) */
+#define COALESCING_TIMESET_TIMESET_MASK 0x7F
+#define COALESCING_TIMESET_TIMESET_SHIFT 0
+/* Only if this flag is set, timeset will take effect */
+#define COALESCING_TIMESET_VALID_MASK 0x1
+#define COALESCING_TIMESET_VALID_SHIFT 7
+};
+
+struct common_queue_zone {
+ __le16 ring_drv_data_consumer;
+ __le16 reserved;
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_prod_data {
+ __le16 bd_prod /* BD producer. */;
+ __le16 cqe_prod /* CQE producer. */;
+};
+
+struct regpair {
+ __le32 lo /* low word for reg-pair */;
+ __le32 hi /* high word for reg-pair */;
+};
+
+/*
+ * Event Ring VF-PF Channel data
+ */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr /* VF-PF message address */;
+};
+
+struct iscsi_eqe_data {
+ __le32 cid /* Context ID of the connection */;
+ /* Task Id of the task (for error that happened on a a task) */;
+ __le16 conn_id;
+/* error code - relevant only if the opcode indicates its an error */
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+/* The processed PDUs opcode on which happened the error - updated for specific
+ * error codes, by default=0xFF
+ */
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+/* Indication for driver is the error_pdu_opcode field has valid value */
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+ u8 vfId /* Malicious VF ID */;
+ u8 errId /* Malicious VF error */;
+ __le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+ u8 vfId /* VF ID */;
+ u8 reserved[7];
+};
+
+/*
+ * Event Data Union
+ */
+union event_ring_data {
+ u8 bytes[8] /* Byte Array */;
+ struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+ struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+ struct regpair roceHandle /* Dedicated field for RDMA data */;
+ struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+ struct initial_cleanup_eqe_data vf_init_cleanup
+ /* VF Initial Cleanup data */;
+};
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id /* Event Protocol ID */;
+ u8 opcode /* Event Opcode */;
+ __le16 reserved0 /* Reserved */;
+ __le16 echo /* Echo value from ramrod data on the host */;
+ u8 fw_return_code /* FW return code for SP ramrods */;
+ u8 flags;
+/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+ ERROR_MODE /* Unsupported mode */,
+ MF_OVLAN /* Multi function based on outer VLAN */,
+ MF_NPAR /* Multi function based on MAC address (NIC partitioning) */,
+ MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+ PROTOCOLID_ISCSI /* iSCSI */,
+ PROTOCOLID_FCOE /* FCoE */,
+ PROTOCOLID_ROCE /* RoCE */,
+ PROTOCOLID_CORE /* Core (light L2, slow path core) */,
+ PROTOCOLID_ETH /* Ethernet */,
+ PROTOCOLID_IWARP /* iWARP */,
+ PROTOCOLID_TOE /* TOE */,
+ PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */,
+ PROTOCOLID_COMMON /* ProtocolCommon */,
+ PROTOCOLID_TCP /* TCP */,
+ MAX_PROTOCOL_TYPE
+};
+
+
+
+/*
+ * Ustorm Queue Zone
+ */
+struct ustorm_eth_queue_zone {
+/* Rx interrupt coalescing TimeSet */
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[3];
+};
+
+
+struct ustorm_queue_zone {
+ struct ustorm_eth_queue_zone eth;
+ struct common_queue_zone common;
+};
+
+/* status block structure */
+struct cau_pi_entry {
+ __le32 prod;
+/* A per protocol indexPROD value. */
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+/* This value determines the TimeSet that the PI is associated with */
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+/* Select the FSM within the SB */
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+/* Select the FSM within the SB */
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+ __le32 data;
+/* The SB PROD index which is sent to the IGU. */
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF /* RX state */
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF /* TX state */
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ __le32 params;
+/* Indicates the RX TimeSet that this SB is associated with. */
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+/* Indicates the TX TimeSet that this SB is associated with. */
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+/* This value will determine the RX FSM timer resolution in ticks */
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+/* This value will determine the TX FSM timer resolution in ticks */
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+/* If set then indicates that the TPH STAG is equal to the SB number. Otherwise
+ * the STAG will be equal to all ones.
+ */
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
+};
+
+/* core doorbell data */
+struct core_db_data {
+ u8 params;
+/* destination of doorbell (use enum db_dest) */
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+/* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+/* aggregative value selection */
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+/* bit for every DQ counter flags in CM context that DQ can increment */
+ u8 agg_flags;
+ __le16 spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+ DB_AGG_CMD_NOP /* No operation */,
+ DB_AGG_CMD_SET /* Set the value */,
+ DB_AGG_CMD_ADD /* Add the value */,
+ DB_AGG_CMD_MAX /* Set max of current and new value */,
+ MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+ DB_DEST_XCM /* TX doorbell to XCM */,
+ DB_DEST_UCM /* RX doorbell to UCM */,
+ DB_DEST_TCM /* RX doorbell to TCM */,
+ DB_NUM_DESTINATIONS,
+ MAX_DB_DEST
+};
+
+
+/*
+ * Enum of doorbell DPM types
+ */
+enum db_dpm_type {
+ DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
+ DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */,
+/* L2 DPM inline- to PBF, with packet data on doorbell */
+ DPM_L2_INLINE,
+ DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
+ MAX_DB_DPM_TYPE
+};
+
+/*
+ * Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM
+ * burst
+ */
+struct db_l2_dpm_data {
+ __le16 icid /* internal CID */;
+ __le16 bd_prod /* bd producer value to update */;
+ __le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
+ */
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+/* size of the packet to be transmitted in bytes */
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+/* In DPM_L2_BD mode: the number of SGE-s */
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/*
+ * Structure for SGE in a DPM doorbell of type DPM_L2_BD
+ */
+struct db_l2_dpm_sge {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ __le16 bitfields;
+/* The TPH STAG index value */
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+/* Indicate if ST hint is requested or not */
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+ __le32 reserved2;
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+ __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+/* doorbell extraction mode specifier- 0 if not used */
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF /* internal CID */
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
+};
+
+/*
+ * Structure for doorbell address, in PWM mode
+ */
+struct db_pwm_addr {
+ __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK 0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+/* Offset in PWM address space */
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT 3
+#define DB_PWM_ADDR_WID_MASK 0x3 /* Window ID */
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF /* Doorbell page ID */
+#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_RESERVED1_MASK 0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/*
+ * Parameters to RDMA firmware, passed in EDPM doorbell
+ */
+struct db_rdma_dpm_params {
+ __le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+/* opcode for RDMA operation */
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+/* the size of the WQE payload in bytes */
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+/* RoCE completion flag */
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+};
+
+/*
+ * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst
+ */
+struct db_rdma_dpm_data {
+ __le16 icid /* internal CID */;
+ __le16 prod_val /* aggregated value to update */;
+/* parameters passed to RDMA firmware */
+ struct db_rdma_dpm_params params;
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+ IGU_INT_ENABLE = 0,
+ IGU_INT_DISABLE = 1,
+ IGU_INT_NOP = 2,
+ IGU_INT_NOP2 = 3,
+ MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+ __le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+/* interrupt enable/disable/nop (use enum igu_int_cmd) */
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+/* (use enum igu_seg_access) */
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+/* must always be set cleared (use enum command_type_bit) */
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_REG = 0,
+ IGU_SEG_ACCESS_ATTN = 1,
+ MAX_IGU_SEG_ACCESS
+};
+
+
+/*
+ * Enumeration for L3 type field of parsing_and_err_flags_union. L3Type:
+ * 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according
+ * to the last-ethertype)
+ */
+enum l3_type {
+ e_l3Type_unknown,
+ e_l3Type_ipv4,
+ e_l3Type_ipv6,
+ MAX_L3_TYPE
+};
+
+
+/*
+ * Enumeration for l4Protocol field of parsing_and_err_flags_union. L4-protocol
+ * 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the
+ * first fragment, the protocol-type should be set to none.
+ */
+enum l4_protocol {
+ e_l4Protocol_none,
+ e_l4Protocol_tcp,
+ e_l4Protocol_udp,
+ MAX_L4_PROTOCOL
+};
+
+
+/*
+ * Parsing and error flags field.
+ */
+struct parsing_and_err_flags {
+ __le16 flags;
+/* L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled
+ * according to the last-ethertype) (use enum l3_type)
+ */
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+/* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and
+ * its not the first fragment, the protocol-type should be set to none.
+ * (use enum l4_protocol)
+ */
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+/* Set if the packet is IPv4 fragment. */
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+/* Set if VLAN tag exists. Invalid if tunnel type are IP GRE or IP GENEVE. */
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+/* Set if L4 checksum was calculated. */
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+/* Set for PTP packet. */
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+/* Set if PTP timestamp recorded. */
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+/* Set if either version-mismatch or hdr-len-error or ipv4-cksm is set or ipv6
+ * ver mismatch
+ */
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+/* Set if L4 checksum validation failed. Valid only if L4 checksum was
+ * calculated.
+ */
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+/* Set if GRE/VXLAN/GENEVE tunnel detected. */
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+/* Set if VLAN tag exists in tunnel header. */
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+/* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or
+ * tunnel-ipv4-cksm is set or tunneling ipv6 ver mismatch
+ */
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+/* Set if GRE or VXLAN/GENEVE UDP checksum was calculated. */
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+/* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum
+ * was calculated.
+ */
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+};
+
+
+/*
+ * Parsing error flags bitmap.
+ */
+struct parsing_err_flags {
+ __le16 flags;
+/* MAC error indication */
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+/* truncation error indication */
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+/* packet too small indication */
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+/* Header Missing Tag */
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len
+ * indicates number that is bigger than real packet length 3. tunneling:
+ * total-ip-length of the outer header points to offset that is smaller than
+ * the one pointed to by the total-ip-len of the inner hdr.
+ */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+/* from frame cracker output. for either TCP or UDP */
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any
+ * reason, like: udp/ipv4 checksum is 0 etc.
+ */
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+/* set if geneve option size was over 32 byte */
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
+
+/*
+ * Pb context
+ */
+struct pb_context {
+ __le32 crc[4];
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+ __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK 0xF /* Parent PFID */
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3 /* port number */
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1 /* path number */
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
+};
+
+struct pxp_pretend_concrete_fid {
+ __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+};
+
+union pxp_pretend_fid {
+ struct pxp_pretend_concrete_fid concrete_fid;
+ __le16 opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
+};
+
+
+/*
+ * VF Zone A Permission Register.
+ */
+struct pxp_vf_zone_a_permission {
+ __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
+
+/*
+ * Rdif context
+ */
+struct rdif_task_context {
+ __le32 initialRefTag;
+ __le16 appTagValue;
+ __le16 appTagMask;
+ u8 flags0;
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+/* 0 = IP checksum, 1 = CRC */
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+/* 1/2/3 - Protection Type */
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+/* 0=0x0000, 1=0xffff */
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+/* Keep reference tag constant */
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+ u8 partialDifData[7];
+ __le16 partialCrcValue;
+ __le16 partialChecksumValue;
+ __le32 offsetInIO;
+ __le16 flags1;
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+/* 0=None, 1=DIF, 2=DIX */
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+/* DIF tag right at the beginning of DIF interval */
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+/* 0=None, 1=DIF */
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+/* Forward application tag with mask */
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
+/* Forward reference tag with mask */
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+ __le16 state;
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+/* mask for refernce tag handling */
+#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+ __le32 reserved2;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
+ __le32 sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+};
+
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC \
+(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU \
+(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+/*
+ * Tdif context
+ */
+struct tdif_task_context {
+ __le32 initialRefTag;
+ __le16 appTagValue;
+ __le16 appTagMask;
+ __le16 partialCrcValueB;
+ __le16 partialChecksumValueB;
+ __le16 stateB;
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+ u8 reserved1;
+ u8 flags0;
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+/* 0 = IP checksum, 1 = CRC */
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+/* 1/2/3 - Protection Type */
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+/* 0=0x0000, 1=0xffff */
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+ __le32 flags1;
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+/* 0=None, 1=DIF, 2=DIX */
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+/* DIF tag right at the beginning of DIF interval */
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+/* reserved */
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+/* 0=None, 1=DIF */
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
+/* mask for refernce tag handling */
+#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
+/* Forward application tag with mask */
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
+/* Forward reference tag with mask */
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
+/* Keep reference tag constant */
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offsetInIOB;
+ __le16 partialCrcValueA;
+ __le16 partialChecksumValueA;
+ __le32 offsetInIOA;
+ u8 partialDifDataA[8];
+ u8 partialDifDataB[8];
+};
+
+
+/*
+ * Timers context
+ */
+struct timers_context {
+ __le32 logical_client_0;
+/* Expiration time of logical client 0 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
+/* Valid bit of logical client 0 */
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+/* Active bit of logical client 0 */
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+ __le32 logical_client_1;
+/* Expiration time of logical client 1 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
+/* Valid bit of logical client 1 */
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+/* Active bit of logical client 1 */
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
+ __le32 logical_client_2;
+/* Expiration time of logical client 2 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
+/* Valid bit of logical client 2 */
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+/* Active bit of logical client 2 */
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
+ __le32 host_expiration_fields;
+/* Expiration time on host (closest one) */
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
+/* Valid bit of host expiration */
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
+};
+
+
+/*
+ * Enum for next_protocol field of tunnel_parsing_flags
+ */
+enum tunnel_next_protocol {
+ e_unknown = 0,
+ e_l2 = 1,
+ e_ipv4 = 2,
+ e_ipv6 = 3,
+ MAX_TUNNEL_NEXT_PROTOCOL
+};
+
+#endif /* __COMMON_HSI__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore.h b/src/seastar/dpdk/drivers/net/qede/base/ecore.h
new file mode 100644
index 00000000..80b11a4c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore.h
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_H
+#define __ECORE_H
+
+/* @DPDK */
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#define CONFIG_ECORE_BINARY_FW
+#undef CONFIG_ECORE_ZIPPED_FW
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+#include <zlib.h>
+#endif
+
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_debug_tools.h"
+#include "ecore_hsi_init_func.h"
+#include "ecore_hsi_init_tool.h"
+#include "ecore_proto_if.h"
+#include "mcp_public.h"
+
+#define ECORE_MAJOR_VERSION 8
+#define ECORE_MINOR_VERSION 18
+#define ECORE_REVISION_VERSION 7
+#define ECORE_ENGINEERING_VERSION 0
+
+#define ECORE_VERSION \
+ ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
+ (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+
+#define MAX_HWFNS_PER_DEVICE 2
+#define NAME_SIZE 128 /* @DPDK */
+#define ECORE_WFQ_UNIT 100
+#include "../qede_logs.h" /* @DPDK */
+
+#define ISCSI_BDQ_ID(_port_id) (_port_id)
+#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
+/* Constants */
+#define ECORE_WID_SIZE (1024)
+
+/* Configurable */
+#define ECORE_PF_DEMS_SIZE (4)
+
+/* cau states */
+enum ecore_coalescing_mode {
+ ECORE_COAL_MODE_DISABLE,
+ ECORE_COAL_MODE_ENABLE
+};
+
+enum ecore_nvm_cmd {
+ ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+ ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+ ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
+ ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+ ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+ ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
+ ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
+ ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
+ ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
+ ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+ ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
+#ifndef LINUX_REMOVE
+#if !defined(CONFIG_ECORE_L2)
+#define CONFIG_ECORE_L2
+#define CONFIG_ECORE_SRIOV
+#endif
+#endif
+
+/* helpers */
+#ifndef __EXTRACT__LINUX__
+#define MASK_FIELD(_name, _value) \
+ ((_value) &= (_name##_MASK))
+
+#define FIELD_VALUE(_name, _value) \
+ ((_value & _name##_MASK) << _name##_SHIFT)
+
+#define SET_FIELD(value, name, flag) \
+do { \
+ (value) &= ~(name##_MASK << name##_SHIFT); \
+ (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\
+} while (0)
+
+#define GET_FIELD(value, name) \
+ (((value) >> (name##_SHIFT)) & name##_MASK)
+#endif
+
+#define ECORE_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define ECORE_MFW_SET_FIELD(name, field, value) \
+do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
+} while (0)
+
+static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * ECORE_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
+
+#ifndef LINUX_REMOVE
+#ifndef U64_HI
+#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#endif
+
+#ifndef U64_LO
+#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+#endif
+#endif
+
+#ifndef __EXTRACT__LINUX__
+enum DP_LEVEL {
+ ECORE_LEVEL_VERBOSE = 0x0,
+ ECORE_LEVEL_INFO = 0x1,
+ ECORE_LEVEL_NOTICE = 0x2,
+ ECORE_LEVEL_ERR = 0x3,
+};
+
+#define ECORE_LOG_LEVEL_SHIFT (30)
+#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
+#define ECORE_LOG_INFO_MASK (0x40000000)
+#define ECORE_LOG_NOTICE_MASK (0x80000000)
+
+enum DP_MODULE {
+#ifndef LINUX_REMOVE
+ ECORE_MSG_DRV = 0x0001,
+ ECORE_MSG_PROBE = 0x0002,
+ ECORE_MSG_LINK = 0x0004,
+ ECORE_MSG_TIMER = 0x0008,
+ ECORE_MSG_IFDOWN = 0x0010,
+ ECORE_MSG_IFUP = 0x0020,
+ ECORE_MSG_RX_ERR = 0x0040,
+ ECORE_MSG_TX_ERR = 0x0080,
+ ECORE_MSG_TX_QUEUED = 0x0100,
+ ECORE_MSG_INTR = 0x0200,
+ ECORE_MSG_TX_DONE = 0x0400,
+ ECORE_MSG_RX_STATUS = 0x0800,
+ ECORE_MSG_PKTDATA = 0x1000,
+ ECORE_MSG_HW = 0x2000,
+ ECORE_MSG_WOL = 0x4000,
+#endif
+ ECORE_MSG_SPQ = 0x10000,
+ ECORE_MSG_STATS = 0x20000,
+ ECORE_MSG_DCB = 0x40000,
+ ECORE_MSG_IOV = 0x80000,
+ ECORE_MSG_SP = 0x100000,
+ ECORE_MSG_STORAGE = 0x200000,
+ ECORE_MSG_OOO = 0x200000,
+ ECORE_MSG_CXT = 0x800000,
+ ECORE_MSG_LL2 = 0x1000000,
+ ECORE_MSG_ILT = 0x2000000,
+ ECORE_MSG_RDMA = 0x4000000,
+ ECORE_MSG_DEBUG = 0x8000000,
+ /* to be added...up to 0x8000000 */
+};
+#endif
+
+#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct ecore_ptt_pool;
+struct ecore_spq;
+struct ecore_sb_info;
+struct ecore_sb_attn_info;
+struct ecore_cxt_mngr;
+struct ecore_dma_mem;
+struct ecore_sb_sp_info;
+struct ecore_ll2_info;
+struct ecore_l2_info;
+struct ecore_igu_info;
+struct ecore_mcp_info;
+struct ecore_dcbx_info;
+
+struct ecore_rt_data {
+ u32 *init_val;
+ bool *b_valid;
+};
+
+enum ecore_tunn_mode {
+ ECORE_MODE_L2GENEVE_TUNN,
+ ECORE_MODE_IPGENEVE_TUNN,
+ ECORE_MODE_L2GRE_TUNN,
+ ECORE_MODE_IPGRE_TUNN,
+ ECORE_MODE_VXLAN_TUNN,
+};
+
+enum ecore_tunn_clss {
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_ECORE_TUNN_CLSS,
+};
+
+struct ecore_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum ecore_tunn_clss tun_cls;
+};
+
+struct ecore_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
+};
+
+struct ecore_tunnel_info {
+ struct ecore_tunn_update_type vxlan;
+ struct ecore_tunn_update_type l2_geneve;
+ struct ecore_tunn_update_type ip_geneve;
+ struct ecore_tunn_update_type l2_gre;
+ struct ecore_tunn_update_type ip_gre;
+
+ struct ecore_tunn_update_udp_port vxlan_port;
+ struct ecore_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
+ */
+enum ecore_pci_personality {
+ ECORE_PCI_ETH,
+ ECORE_PCI_FCOE,
+ ECORE_PCI_ISCSI,
+ ECORE_PCI_ETH_ROCE,
+ ECORE_PCI_ETH_IWARP,
+ ECORE_PCI_ETH_RDMA,
+ ECORE_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct ecore_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+#define MAX_PF_PER_PORT 8
+
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum ecore_resources {
+ ECORE_SB,
+ ECORE_L2_QUEUE,
+ ECORE_VPORT,
+ ECORE_RSS_ENG,
+ ECORE_PQ,
+ ECORE_RL,
+ ECORE_MAC,
+ ECORE_VLAN,
+ ECORE_RDMA_CNQ_RAM,
+ ECORE_ILT,
+ ECORE_LL2_QUEUE,
+ ECORE_CMDQS_CQS,
+ ECORE_RDMA_STATS_QUEUE,
+ ECORE_BDQ,
+ ECORE_MAX_RESC, /* must be last */
+};
+
+/* Features that require resources, given as input to the resource management
+ * algorithm, the output are the resources above
+ */
+enum ecore_feature {
+ ECORE_PF_L2_QUE,
+ ECORE_PF_TC,
+ ECORE_VF,
+ ECORE_EXTRA_VF_QUE,
+ ECORE_VMQ,
+ ECORE_RDMA_CNQ,
+ ECORE_ISCSI_CQ,
+ ECORE_FCOE_CQ,
+ ECORE_VF_L2_QUE,
+ ECORE_MAX_FEATURES,
+};
+
+enum ecore_port_mode {
+ ECORE_PORT_MODE_DE_2X40G,
+ ECORE_PORT_MODE_DE_2X50G,
+ ECORE_PORT_MODE_DE_1X100G,
+ ECORE_PORT_MODE_DE_4X10G_F,
+ ECORE_PORT_MODE_DE_4X10G_E,
+ ECORE_PORT_MODE_DE_4X20G,
+ ECORE_PORT_MODE_DE_1X40G,
+ ECORE_PORT_MODE_DE_2X25G,
+ ECORE_PORT_MODE_DE_1X25G,
+ ECORE_PORT_MODE_DE_4X25G,
+ ECORE_PORT_MODE_DE_2X10G,
+};
+
+enum ecore_dev_cap {
+ ECORE_DEV_CAP_ETH,
+ ECORE_DEV_CAP_FCOE,
+ ECORE_DEV_CAP_ISCSI,
+ ECORE_DEV_CAP_ROCE,
+ ECORE_DEV_CAP_IWARP
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_hw_err_type {
+ ECORE_HW_ERR_FAN_FAIL,
+ ECORE_HW_ERR_MFW_RESP_FAIL,
+ ECORE_HW_ERR_HW_ATTN,
+ ECORE_HW_ERR_DMAE_FAIL,
+ ECORE_HW_ERR_RAMROD_FAIL,
+ ECORE_HW_ERR_FW_ASSERT,
+};
+#endif
+
+struct ecore_hw_info {
+ /* PCI personality */
+ enum ecore_pci_personality personality;
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH || \
+ ECORE_IS_RDMA_PERSONALITY(dev))
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[ECORE_MAX_RESC];
+ u32 resc_num[ECORE_MAX_RESC];
+ u32 feat_num[ECORE_MAX_FEATURES];
+
+ #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+ #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+ #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
+ #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+/* Amount of TCs which should be active according to DCBx or upper layer driver
+ * configuration
+ */
+
+ u8 num_active_tc;
+
+ /* The traffic class used by PF for it's offloaded protocol */
+ u8 offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+ u64 node_wwn; /* For FCoE only */
+ u64 port_wwn; /* For FCoE only */
+
+ u16 num_iscsi_conns;
+ u16 num_fcoe_conns;
+
+ struct ecore_igu_info *p_igu_info;
+ /* Sriov */
+ u8 max_chains_per_vf;
+
+ u32 port_mode;
+ u32 hw_mode;
+ unsigned long device_capabilities;
+
+ /* Default DCBX mode */
+ u8 dcbx_mode;
+
+ u16 mtu;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct ecore_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ osal_mutex_t mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct ecore_wfq_data {
+ u32 default_min_speed; /* When wfq feature is not configured */
+ u32 min_speed; /* when feature is configured for any 1 vport */
+ bool configured;
+};
+
+struct ecore_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct ecore_wfq_data *wfq_data;
+ u8 num_pf_rls;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct ecore_fw_data {
+#ifdef CONFIG_ECORE_BINARY_FW
+ struct fw_ver_info *fw_ver_info;
+#endif
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+struct ecore_hwfn {
+ struct ecore_dev *p_dev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+ #define ECORE_PATH_ID(_p_hwfn) \
+ (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
+
+ /* BAR access */
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct ecore_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct ecore_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct ecore_rt_data rt_data;
+
+ /* SPQ */
+ struct ecore_spq *p_spq;
+
+ /* EQ */
+ struct ecore_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct ecore_consq *p_consq;
+
+ /* Slow-Path definitions */
+ osal_dpc_t sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct ecore_ptt *p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt;
+
+ struct ecore_sb_sp_info *p_sp_sb;
+ struct ecore_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ bool using_ll2;
+ struct ecore_ll2_info *p_ll2_info;
+ struct ecore_ooo_info *p_ooo_info;
+ struct ecore_iscsi_info *p_iscsi_info;
+ struct ecore_fcoe_info *p_fcoe_info;
+ struct ecore_rdma_info *p_rdma_info;
+ struct ecore_pf_params pf_params;
+
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
+
+ /* Array of sb_info of all status blocks */
+ struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
+
+ struct ecore_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+ bool b_int_requested;
+
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
+ struct ecore_vf_iov *vf_iov_info;
+ struct ecore_pf_iov *pf_iov_info;
+ struct ecore_mcp_info *mcp_info;
+ struct ecore_dcbx_info *p_dcbx_info;
+
+ struct ecore_dmae_info dmae_info;
+
+ /* QM init */
+ struct ecore_qm_info qm_info;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+#endif
+
+ struct dbg_tools_data dbg_info;
+
+ struct z_stream_s *stream;
+
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+ u32 dpi_start_offset; /* this is used to
+ * calculate th
+ * doorbell address
+ */
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
+ /* L2-related */
+ struct ecore_l2_info *p_l2_info;
+
+ /* @DPDK */
+ struct ecore_ptt *p_arfs_ptt;
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_mf_mode {
+ ECORE_MF_DEFAULT,
+ ECORE_MF_OVLAN,
+ ECORE_MF_NPAR,
+};
+#endif
+
+/* @DPDK */
+struct ecore_dbg_feature {
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+enum qed_dbg_features {
+ DBG_FEATURE_BUS,
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_NUM
+};
+
+struct ecore_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
+
+ u8 type;
+#define ECORE_DEV_TYPE_BB (0 << 0)
+#define ECORE_DEV_TYPE_AH (1 << 0)
+/* Translate type/revision combo into the proper conditions */
+#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
+#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
+#ifndef ASIC_ONLY
+#define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \
+ (CHIP_REV_IS_TEDIBEAR(dev)))
+#else
+#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev))
+#endif
+#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
+#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+
+#define ECORE_DEV_ID_MASK 0xff00
+#define ECORE_DEV_ID_MASK_BB 0x1600
+#define ECORE_DEV_ID_MASK_AH 0x8000
+
+ u16 vendor_id;
+ u16 device_id;
+
+ u16 chip_num;
+ #define CHIP_NUM_MASK 0xffff
+ #define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+ #define CHIP_REV_MASK 0xf
+ #define CHIP_REV_SHIFT 12
+#ifndef ASIC_ONLY
+ #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+ #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+ #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+ #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+ CHIP_REV_IS_EMUL_B0(_p_dev))
+ #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+ #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+ #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ CHIP_REV_IS_FPGA_B0(_p_dev))
+ #define CHIP_REV_IS_SLOW(_p_dev) \
+ (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+ #define CHIP_REV_IS_A0(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+ CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ !(_p_dev)->chip_rev)
+ #define CHIP_REV_IS_B0(_p_dev) \
+ (CHIP_REV_IS_EMUL_B0(_p_dev) || \
+ CHIP_REV_IS_FPGA_B0(_p_dev) || \
+ (_p_dev)->chip_rev == 1)
+ #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
+#else
+ #define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
+ #define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+#endif
+
+ u16 chip_metal;
+ #define CHIP_METAL_MASK 0xff
+ #define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+ #define CHIP_BOND_ID_MASK 0xf
+ #define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum ecore_mf_mode mf_mode;
+ #define IS_MF_DEFAULT(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+ #define IS_MF_SI(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+ #define IS_MF_SD(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum ecore_coalescing_mode int_coalescing_mode;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+ #define IRO (p_hwfn->p_dev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ /* SRIOV */
+ struct ecore_hw_sriov_info *p_iov_info;
+#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
+ struct ecore_tunnel_info tunnel;
+ bool b_is_vf;
+
+ u32 drv_type;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
+
+ struct ecore_eth_stats *reset_stats;
+ struct ecore_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Recovery */
+ bool recov_in_prog;
+
+/* Indicates whether should prevent attentions from being reasserted */
+
+ bool attn_clr_en;
+
+ /* Indicates whether allowing the MFW to collect a crash dump */
+ bool mdump_en;
+
+ /* Indicates if the reg_fifo is checked after any register access */
+ bool chk_reg_fifo;
+
+#ifndef ASIC_ONLY
+ bool b_is_emul_full;
+#endif
+
+#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
+ void *firmware;
+ u64 fw_len;
+#endif
+
+ /* @DPDK */
+ struct ecore_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+};
+
+#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
+
+/**
+ * @brief ecore_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return OSAL_INLINE u8
+ */
+static OSAL_INLINE u8
+ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
+{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
+
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
+
+ return sw_fid;
+}
+
+#define PURE_LB_TC 8
+#define PKT_LB_TC 9
+
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ u32 min_pf_rate);
+
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+int ecore_device_num_engines(struct ecore_dev *p_dev);
+int ecore_device_num_ports(struct ecore_dev *p_dev);
+void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
+ u8 *mac);
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (1 << 0)
+#define PQ_FLAGS_MCOS (1 << 1)
+#define PQ_FLAGS_LB (1 << 2)
+#define PQ_FLAGS_OOO (1 << 3)
+#define PQ_FLAGS_ACK (1 << 4)
+#define PQ_FLAGS_OFLD (1 << 5)
+#define PQ_FLAGS_VFS (1 << 6)
+
+/* physical queue index for cm context intialization */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+
+/* amount of resources used in qm init */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
+#endif /* __ECORE_H */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_attn_values.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_attn_values.h
new file mode 100644
index 00000000..d8951bcc
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_attn_values.h
@@ -0,0 +1,13287 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ATTN_VALUES_H__
+#define __ATTN_VALUES_H__
+
+#ifndef __PREVENT_INT_ATTN__
+
+/* HW Attention register */
+struct attn_hw_reg {
+ u16 reg_idx; /* Index of this register in its block */
+ u16 num_of_bits; /* number of valid attention bits */
+ const u16 *bit_attn_idx; /* attention index per valid bit */
+ u32 sts_addr; /* Address of the STS register */
+ u32 sts_clr_addr; /* Address of the STS_CLR register */
+ u32 sts_wr_addr; /* Address of the STS_WR register */
+ u32 mask_addr; /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+ u16 num_of_int_regs; /* Number of interrupt regs */
+ u16 num_of_prty_regs; /* Number of parity regs */
+ struct attn_hw_reg **int_regs; /* interrupt regs */
+ struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+ const char *name; /* Block name */
+ const char **int_desc; /* Array of interrupt attention descriptions */
+ const char **prty_desc; /* Array of parity attention descriptions */
+ struct attn_hw_regs chip_regs[3]; /* attention regs per chip.*/
+};
+
+#ifdef ATTN_DESC
+static const char *grc_int_attn_desc[5] = {
+ "grc_address_error",
+ "grc_timeout_event",
+ "grc_global_reserved_address",
+ "grc_path_isolation_error",
+ "grc_trace_fifo_valid_data",
+};
+#else
+#define grc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_int0_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_a0 = {
+ 0, 4, grc_int0_bb_a0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_a0_regs[1] = {
+ &grc_int0_bb_a0,
+};
+
+static const u16 grc_int0_bb_b0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+ 0, 4, grc_int0_bb_b0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+ &grc_int0_bb_b0,
+};
+
+static const u16 grc_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg grc_int0_k2 = {
+ 0, 5, grc_int0_k2_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_k2_regs[1] = {
+ &grc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *grc_prty_attn_desc[3] = {
+ "grc_mem003_i_mem_prty",
+ "grc_mem002_i_mem_prty",
+ "grc_mem001_i_mem_prty",
+};
+#else
+#define grc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_prty1_bb_a0_attn_idx[2] = {
+ 1, 2,
+};
+
+static struct attn_hw_reg grc_prty1_bb_a0 = {
+ 0, 2, grc_prty1_bb_a0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_a0_regs[1] = {
+ &grc_prty1_bb_a0,
+};
+
+static const u16 grc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+ 0, 2, grc_prty1_bb_b0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+ &grc_prty1_bb_b0,
+};
+
+static const u16 grc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_k2 = {
+ 0, 2, grc_prty1_k2_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_k2_regs[1] = {
+ &grc_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_int_attn_desc[14] = {
+ "miscs_address_error",
+ "miscs_generic_sw",
+ "miscs_cnig_interrupt",
+ "miscs_opte_dorq_fifo_err_eng1",
+ "miscs_opte_dorq_fifo_err_eng0",
+ "miscs_opte_dbg_fifo_err_eng1",
+ "miscs_opte_dbg_fifo_err_eng0",
+ "miscs_opte_btb_if1_fifo_err_eng1",
+ "miscs_opte_btb_if1_fifo_err_eng0",
+ "miscs_opte_btb_if0_fifo_err_eng1",
+ "miscs_opte_btb_if0_fifo_err_eng0",
+ "miscs_opte_btb_sop_fifo_err_eng1",
+ "miscs_opte_btb_sop_fifo_err_eng0",
+ "miscs_opte_storm_fifo_err_eng0",
+};
+#else
+#define miscs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg miscs_int0_bb_a0 = {
+ 0, 2, miscs_int0_bb_a0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_a0_attn_idx[11] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_a0 = {
+ 1, 11, miscs_int1_bb_a0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_a0_regs[2] = {
+ &miscs_int0_bb_a0, &miscs_int1_bb_a0,
+};
+
+static const u16 miscs_int0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+ 0, 3, miscs_int0_bb_b0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_b0_attn_idx[11] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+ 1, 11, miscs_int1_bb_b0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+ &miscs_int0_bb_b0, &miscs_int1_bb_b0,
+};
+
+static const u16 miscs_int0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_k2 = {
+ 0, 3, miscs_int0_k2_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static struct attn_hw_reg *miscs_int_k2_regs[1] = {
+ &miscs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_prty_attn_desc[1] = {
+ "miscs_cnig_parity",
+};
+#else
+#define miscs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+ 0, 1, miscs_prty0_bb_b0_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+ &miscs_prty0_bb_b0,
+};
+
+static const u16 miscs_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg miscs_prty0_k2 = {
+ 0, 1, miscs_prty0_k2_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_k2_regs[1] = {
+ &miscs_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *misc_int_attn_desc[1] = {
+ "misc_address_error",
+};
+#else
+#define misc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 misc_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_bb_a0 = {
+ 0, 1, misc_int0_bb_a0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_a0_regs[1] = {
+ &misc_int0_bb_a0,
+};
+
+static const u16 misc_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+ 0, 1, misc_int0_bb_b0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+ &misc_int0_bb_b0,
+};
+
+static const u16 misc_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_k2 = {
+ 0, 1, misc_int0_k2_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_k2_regs[1] = {
+ &misc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_int_attn_desc[24] = {
+ "pglue_b_address_error",
+ "pglue_b_incorrect_rcv_behavior",
+ "pglue_b_was_error_attn",
+ "pglue_b_vf_length_violation_attn",
+ "pglue_b_vf_grc_space_violation_attn",
+ "pglue_b_tcpl_error_attn",
+ "pglue_b_tcpl_in_two_rcbs_attn",
+ "pglue_b_cssnoop_fifo_overflow",
+ "pglue_b_tcpl_translation_size_different",
+ "pglue_b_pcie_rx_l0s_timeout",
+ "pglue_b_master_zlr_attn",
+ "pglue_b_admin_window_violation_attn",
+ "pglue_b_out_of_range_function_in_pretend",
+ "pglue_b_illegal_address",
+ "pglue_b_pgl_cpl_err",
+ "pglue_b_pgl_txw_of",
+ "pglue_b_pgl_cpl_aft",
+ "pglue_b_pgl_cpl_of",
+ "pglue_b_pgl_cpl_ecrc",
+ "pglue_b_pgl_pcie_attn",
+ "pglue_b_pgl_read_blocked",
+ "pglue_b_pgl_write_blocked",
+ "pglue_b_vf_ilt_err",
+ "pglue_b_rxobffexception_attn",
+};
+#else
+#define pglue_b_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_int0_bb_a0_attn_idx[23] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_a0 = {
+ 0, 23, pglue_b_int0_bb_a0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+ 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_a0_regs[1] = {
+ &pglue_b_int0_bb_a0,
+};
+
+static const u16 pglue_b_int0_bb_b0_attn_idx[23] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+ 0, 23, pglue_b_int0_bb_b0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+ 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+ &pglue_b_int0_bb_b0,
+};
+
+static const u16 pglue_b_int0_k2_attn_idx[24] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg pglue_b_int0_k2 = {
+ 0, 24, pglue_b_int0_k2_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_k2_regs[1] = {
+ &pglue_b_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_prty_attn_desc[35] = {
+ "pglue_b_datapath_registers",
+ "pglue_b_mem027_i_mem_prty",
+ "pglue_b_mem007_i_mem_prty",
+ "pglue_b_mem009_i_mem_prty",
+ "pglue_b_mem010_i_mem_prty",
+ "pglue_b_mem008_i_mem_prty",
+ "pglue_b_mem022_i_mem_prty",
+ "pglue_b_mem023_i_mem_prty",
+ "pglue_b_mem024_i_mem_prty",
+ "pglue_b_mem025_i_mem_prty",
+ "pglue_b_mem004_i_mem_prty",
+ "pglue_b_mem005_i_mem_prty",
+ "pglue_b_mem011_i_mem_prty",
+ "pglue_b_mem016_i_mem_prty",
+ "pglue_b_mem017_i_mem_prty",
+ "pglue_b_mem012_i_mem_prty",
+ "pglue_b_mem013_i_mem_prty",
+ "pglue_b_mem014_i_mem_prty",
+ "pglue_b_mem015_i_mem_prty",
+ "pglue_b_mem018_i_mem_prty",
+ "pglue_b_mem020_i_mem_prty",
+ "pglue_b_mem021_i_mem_prty",
+ "pglue_b_mem019_i_mem_prty",
+ "pglue_b_mem026_i_mem_prty",
+ "pglue_b_mem006_i_mem_prty",
+ "pglue_b_mem003_i_mem_prty",
+ "pglue_b_mem002_i_mem_prty_0",
+ "pglue_b_mem002_i_mem_prty_1",
+ "pglue_b_mem002_i_mem_prty_2",
+ "pglue_b_mem002_i_mem_prty_3",
+ "pglue_b_mem002_i_mem_prty_4",
+ "pglue_b_mem002_i_mem_prty_5",
+ "pglue_b_mem002_i_mem_prty_6",
+ "pglue_b_mem002_i_mem_prty_7",
+ "pglue_b_mem001_i_mem_prty",
+};
+#else
+#define pglue_b_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_prty1_bb_a0_attn_idx[22] = {
+ 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_a0 = {
+ 0, 22, pglue_b_prty1_bb_a0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_a0_regs[1] = {
+ &pglue_b_prty1_bb_a0,
+};
+
+static const u16 pglue_b_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+ 0, 1, pglue_b_prty0_bb_b0_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198,
+ 0x2a8194
+};
+
+static const u16 pglue_b_prty1_bb_b0_attn_idx[22] = {
+ 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+ 1, 22, pglue_b_prty1_bb_b0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+ &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0,
+};
+
+static const u16 pglue_b_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_k2 = {
+ 0, 1, pglue_b_prty0_k2_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194
+};
+
+static const u16 pglue_b_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pglue_b_prty1_k2 = {
+ 1, 31, pglue_b_prty1_k2_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static const u16 pglue_b_prty2_k2_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty2_k2 = {
+ 2, 3, pglue_b_prty2_k2_attn_idx, 0x2a8210, 0x2a821c, 0x2a8218, 0x2a8214
+};
+
+static struct attn_hw_reg *pglue_b_prty_k2_regs[3] = {
+ &pglue_b_prty0_k2, &pglue_b_prty1_k2, &pglue_b_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_int_attn_desc[10] = {
+ "cnig_address_error",
+ "cnig_tx_illegal_sop_port0",
+ "cnig_tx_illegal_sop_port1",
+ "cnig_tx_illegal_sop_port2",
+ "cnig_tx_illegal_sop_port3",
+ "cnig_tdm_lane_0_bandwidth_exceed",
+ "cnig_tdm_lane_1_bandwidth_exceed",
+ "cnig_pmeg_intr",
+ "cnig_pmfc_intr",
+ "cnig_fifo_error",
+};
+#else
+#define cnig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_int0_bb_a0_attn_idx[4] = {
+ 0, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_a0 = {
+ 0, 4, cnig_int0_bb_a0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_a0_regs[1] = {
+ &cnig_int0_bb_a0,
+};
+
+static const u16 cnig_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 3, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+ 0, 6, cnig_int0_bb_b0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+ &cnig_int0_bb_b0,
+};
+
+static const u16 cnig_int0_k2_attn_idx[7] = {
+ 0, 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg cnig_int0_k2 = {
+ 0, 7, cnig_int0_k2_attn_idx, 0x218218, 0x218224, 0x218220, 0x21821c
+};
+
+static struct attn_hw_reg *cnig_int_k2_regs[1] = {
+ &cnig_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_prty_attn_desc[3] = {
+ "cnig_unused_0",
+ "cnig_datapath_tx",
+ "cnig_datapath_rx",
+};
+#else
+#define cnig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_prty0_bb_b0_attn_idx[2] = {
+ 1, 2,
+};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+ 0, 2, cnig_prty0_bb_b0_attn_idx, 0x218348, 0x218354, 0x218350, 0x21834c
+};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+ &cnig_prty0_bb_b0,
+};
+
+static const u16 cnig_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg cnig_prty0_k2 = {
+ 0, 1, cnig_prty0_k2_attn_idx, 0x21822c, 0x218238, 0x218234, 0x218230
+};
+
+static struct attn_hw_reg *cnig_prty_k2_regs[1] = {
+ &cnig_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cpmu_int_attn_desc[1] = {
+ "cpmu_address_error",
+};
+#else
+#define cpmu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cpmu_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_a0 = {
+ 0, 1, cpmu_int0_bb_a0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_a0_regs[1] = {
+ &cpmu_int0_bb_a0,
+};
+
+static const u16 cpmu_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+ 0, 1, cpmu_int0_bb_b0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+ &cpmu_int0_bb_b0,
+};
+
+static const u16 cpmu_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_k2 = {
+ 0, 1, cpmu_int0_k2_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_k2_regs[1] = {
+ &cpmu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_int_attn_desc[1] = {
+ "ncsi_address_error",
+};
+#else
+#define ncsi_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_a0 = {
+ 0, 1, ncsi_int0_bb_a0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_a0_regs[1] = {
+ &ncsi_int0_bb_a0,
+};
+
+static const u16 ncsi_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+ 0, 1, ncsi_int0_bb_b0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+ &ncsi_int0_bb_b0,
+};
+
+static const u16 ncsi_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_k2 = {
+ 0, 1, ncsi_int0_k2_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_k2_regs[1] = {
+ &ncsi_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_prty_attn_desc[1] = {
+ "ncsi_mem002_i_mem_prty",
+};
+#else
+#define ncsi_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_prty1_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_a0 = {
+ 0, 1, ncsi_prty1_bb_a0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_a0_regs[1] = {
+ &ncsi_prty1_bb_a0,
+};
+
+static const u16 ncsi_prty1_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+ 0, 1, ncsi_prty1_bb_b0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+ &ncsi_prty1_bb_b0,
+};
+
+static const u16 ncsi_prty1_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_k2 = {
+ 0, 1, ncsi_prty1_k2_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_k2_regs[1] = {
+ &ncsi_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *opte_prty_attn_desc[12] = {
+ "opte_mem009_i_mem_prty",
+ "opte_mem010_i_mem_prty",
+ "opte_mem005_i_mem_prty",
+ "opte_mem006_i_mem_prty",
+ "opte_mem007_i_mem_prty",
+ "opte_mem008_i_mem_prty",
+ "opte_mem001_i_mem_prty",
+ "opte_mem002_i_mem_prty",
+ "opte_mem003_i_mem_prty",
+ "opte_mem004_i_mem_prty",
+ "opte_mem011_i_mem_prty",
+ "opte_datapath_parity_error",
+};
+#else
+#define opte_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 opte_prty1_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_a0 = {
+ 0, 11, opte_prty1_bb_a0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static struct attn_hw_reg *opte_prty_bb_a0_regs[1] = {
+ &opte_prty1_bb_a0,
+};
+
+static const u16 opte_prty1_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+ 0, 11, opte_prty1_bb_b0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_bb_b0_attn_idx[1] = {
+ 11,
+};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+ 1, 1, opte_prty0_bb_b0_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+ &opte_prty1_bb_b0, &opte_prty0_bb_b0,
+};
+
+static const u16 opte_prty1_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_k2 = {
+ 0, 11, opte_prty1_k2_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_k2_attn_idx[1] = {
+ 11,
+};
+
+static struct attn_hw_reg opte_prty0_k2 = {
+ 1, 1, opte_prty0_k2_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_k2_regs[2] = {
+ &opte_prty1_k2, &opte_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_int_attn_desc[297] = {
+ "bmb_address_error",
+ "bmb_rc_pkt0_rls_error",
+ "bmb_unused_0",
+ "bmb_rc_pkt0_protocol_error",
+ "bmb_rc_pkt1_rls_error",
+ "bmb_unused_1",
+ "bmb_rc_pkt1_protocol_error",
+ "bmb_rc_pkt2_rls_error",
+ "bmb_unused_2",
+ "bmb_rc_pkt2_protocol_error",
+ "bmb_rc_pkt3_rls_error",
+ "bmb_unused_3",
+ "bmb_rc_pkt3_protocol_error",
+ "bmb_rc_sop_req_tc_port_error",
+ "bmb_unused_4",
+ "bmb_wc0_protocol_error",
+ "bmb_wc1_protocol_error",
+ "bmb_wc2_protocol_error",
+ "bmb_wc3_protocol_error",
+ "bmb_unused_5",
+ "bmb_ll_blk_error",
+ "bmb_unused_6",
+ "bmb_mac0_fc_cnt_error",
+ "bmb_ll_arb_calc_error",
+ "bmb_wc0_inp_fifo_error",
+ "bmb_wc0_sop_fifo_error",
+ "bmb_wc0_len_fifo_error",
+ "bmb_wc0_queue_fifo_error",
+ "bmb_wc0_free_point_fifo_error",
+ "bmb_wc0_next_point_fifo_error",
+ "bmb_wc0_strt_fifo_error",
+ "bmb_wc0_second_dscr_fifo_error",
+ "bmb_wc0_pkt_avail_fifo_error",
+ "bmb_wc0_cos_cnt_fifo_error",
+ "bmb_wc0_notify_fifo_error",
+ "bmb_wc0_ll_req_fifo_error",
+ "bmb_wc0_ll_pa_cnt_error",
+ "bmb_wc0_bb_pa_cnt_error",
+ "bmb_wc1_inp_fifo_error",
+ "bmb_wc1_sop_fifo_error",
+ "bmb_wc1_queue_fifo_error",
+ "bmb_wc1_free_point_fifo_error",
+ "bmb_wc1_next_point_fifo_error",
+ "bmb_wc1_strt_fifo_error",
+ "bmb_wc1_second_dscr_fifo_error",
+ "bmb_wc1_pkt_avail_fifo_error",
+ "bmb_wc1_cos_cnt_fifo_error",
+ "bmb_wc1_notify_fifo_error",
+ "bmb_wc1_ll_req_fifo_error",
+ "bmb_wc1_ll_pa_cnt_error",
+ "bmb_wc1_bb_pa_cnt_error",
+ "bmb_wc2_inp_fifo_error",
+ "bmb_wc2_sop_fifo_error",
+ "bmb_wc2_queue_fifo_error",
+ "bmb_wc2_free_point_fifo_error",
+ "bmb_wc2_next_point_fifo_error",
+ "bmb_wc2_strt_fifo_error",
+ "bmb_wc2_second_dscr_fifo_error",
+ "bmb_wc2_pkt_avail_fifo_error",
+ "bmb_wc2_cos_cnt_fifo_error",
+ "bmb_wc2_notify_fifo_error",
+ "bmb_wc2_ll_req_fifo_error",
+ "bmb_wc2_ll_pa_cnt_error",
+ "bmb_wc2_bb_pa_cnt_error",
+ "bmb_wc3_inp_fifo_error",
+ "bmb_wc3_sop_fifo_error",
+ "bmb_wc3_queue_fifo_error",
+ "bmb_wc3_free_point_fifo_error",
+ "bmb_wc3_next_point_fifo_error",
+ "bmb_wc3_strt_fifo_error",
+ "bmb_wc3_second_dscr_fifo_error",
+ "bmb_wc3_pkt_avail_fifo_error",
+ "bmb_wc3_cos_cnt_fifo_error",
+ "bmb_wc3_notify_fifo_error",
+ "bmb_wc3_ll_req_fifo_error",
+ "bmb_wc3_ll_pa_cnt_error",
+ "bmb_wc3_bb_pa_cnt_error",
+ "bmb_rc_pkt0_side_fifo_error",
+ "bmb_rc_pkt0_req_fifo_error",
+ "bmb_rc_pkt0_blk_fifo_error",
+ "bmb_rc_pkt0_rls_left_fifo_error",
+ "bmb_rc_pkt0_strt_ptr_fifo_error",
+ "bmb_rc_pkt0_second_ptr_fifo_error",
+ "bmb_rc_pkt0_rsp_fifo_error",
+ "bmb_rc_pkt0_dscr_fifo_error",
+ "bmb_rc_pkt1_side_fifo_error",
+ "bmb_rc_pkt1_req_fifo_error",
+ "bmb_rc_pkt1_blk_fifo_error",
+ "bmb_rc_pkt1_rls_left_fifo_error",
+ "bmb_rc_pkt1_strt_ptr_fifo_error",
+ "bmb_rc_pkt1_second_ptr_fifo_error",
+ "bmb_rc_pkt1_rsp_fifo_error",
+ "bmb_rc_pkt1_dscr_fifo_error",
+ "bmb_rc_pkt2_side_fifo_error",
+ "bmb_rc_pkt2_req_fifo_error",
+ "bmb_rc_pkt2_blk_fifo_error",
+ "bmb_rc_pkt2_rls_left_fifo_error",
+ "bmb_rc_pkt2_strt_ptr_fifo_error",
+ "bmb_rc_pkt2_second_ptr_fifo_error",
+ "bmb_rc_pkt2_rsp_fifo_error",
+ "bmb_rc_pkt2_dscr_fifo_error",
+ "bmb_rc_pkt3_side_fifo_error",
+ "bmb_rc_pkt3_req_fifo_error",
+ "bmb_rc_pkt3_blk_fifo_error",
+ "bmb_rc_pkt3_rls_left_fifo_error",
+ "bmb_rc_pkt3_strt_ptr_fifo_error",
+ "bmb_rc_pkt3_second_ptr_fifo_error",
+ "bmb_rc_pkt3_rsp_fifo_error",
+ "bmb_rc_pkt3_dscr_fifo_error",
+ "bmb_rc_sop_strt_fifo_error",
+ "bmb_rc_sop_req_fifo_error",
+ "bmb_rc_sop_dscr_fifo_error",
+ "bmb_rc_sop_queue_fifo_error",
+ "bmb_ll_arb_rls_fifo_error",
+ "bmb_ll_arb_prefetch_fifo_error",
+ "bmb_rc_pkt0_rls_fifo_error",
+ "bmb_rc_pkt1_rls_fifo_error",
+ "bmb_rc_pkt2_rls_fifo_error",
+ "bmb_rc_pkt3_rls_fifo_error",
+ "bmb_rc_pkt4_rls_fifo_error",
+ "bmb_rc_pkt5_rls_fifo_error",
+ "bmb_rc_pkt6_rls_fifo_error",
+ "bmb_rc_pkt7_rls_fifo_error",
+ "bmb_rc_pkt8_rls_fifo_error",
+ "bmb_rc_pkt9_rls_fifo_error",
+ "bmb_rc_pkt4_rls_error",
+ "bmb_rc_pkt4_protocol_error",
+ "bmb_rc_pkt4_side_fifo_error",
+ "bmb_rc_pkt4_req_fifo_error",
+ "bmb_rc_pkt4_blk_fifo_error",
+ "bmb_rc_pkt4_rls_left_fifo_error",
+ "bmb_rc_pkt4_strt_ptr_fifo_error",
+ "bmb_rc_pkt4_second_ptr_fifo_error",
+ "bmb_rc_pkt4_rsp_fifo_error",
+ "bmb_rc_pkt4_dscr_fifo_error",
+ "bmb_rc_pkt5_rls_error",
+ "bmb_rc_pkt5_protocol_error",
+ "bmb_rc_pkt5_side_fifo_error",
+ "bmb_rc_pkt5_req_fifo_error",
+ "bmb_rc_pkt5_blk_fifo_error",
+ "bmb_rc_pkt5_rls_left_fifo_error",
+ "bmb_rc_pkt5_strt_ptr_fifo_error",
+ "bmb_rc_pkt5_second_ptr_fifo_error",
+ "bmb_rc_pkt5_rsp_fifo_error",
+ "bmb_rc_pkt5_dscr_fifo_error",
+ "bmb_rc_pkt6_rls_error",
+ "bmb_rc_pkt6_protocol_error",
+ "bmb_rc_pkt6_side_fifo_error",
+ "bmb_rc_pkt6_req_fifo_error",
+ "bmb_rc_pkt6_blk_fifo_error",
+ "bmb_rc_pkt6_rls_left_fifo_error",
+ "bmb_rc_pkt6_strt_ptr_fifo_error",
+ "bmb_rc_pkt6_second_ptr_fifo_error",
+ "bmb_rc_pkt6_rsp_fifo_error",
+ "bmb_rc_pkt6_dscr_fifo_error",
+ "bmb_rc_pkt7_rls_error",
+ "bmb_rc_pkt7_protocol_error",
+ "bmb_rc_pkt7_side_fifo_error",
+ "bmb_rc_pkt7_req_fifo_error",
+ "bmb_rc_pkt7_blk_fifo_error",
+ "bmb_rc_pkt7_rls_left_fifo_error",
+ "bmb_rc_pkt7_strt_ptr_fifo_error",
+ "bmb_rc_pkt7_second_ptr_fifo_error",
+ "bmb_rc_pkt7_rsp_fifo_error",
+ "bmb_packet_available_sync_fifo_push_error",
+ "bmb_rc_pkt8_rls_error",
+ "bmb_rc_pkt8_protocol_error",
+ "bmb_rc_pkt8_side_fifo_error",
+ "bmb_rc_pkt8_req_fifo_error",
+ "bmb_rc_pkt8_blk_fifo_error",
+ "bmb_rc_pkt8_rls_left_fifo_error",
+ "bmb_rc_pkt8_strt_ptr_fifo_error",
+ "bmb_rc_pkt8_second_ptr_fifo_error",
+ "bmb_rc_pkt8_rsp_fifo_error",
+ "bmb_rc_pkt8_dscr_fifo_error",
+ "bmb_rc_pkt9_rls_error",
+ "bmb_rc_pkt9_protocol_error",
+ "bmb_rc_pkt9_side_fifo_error",
+ "bmb_rc_pkt9_req_fifo_error",
+ "bmb_rc_pkt9_blk_fifo_error",
+ "bmb_rc_pkt9_rls_left_fifo_error",
+ "bmb_rc_pkt9_strt_ptr_fifo_error",
+ "bmb_rc_pkt9_second_ptr_fifo_error",
+ "bmb_rc_pkt9_rsp_fifo_error",
+ "bmb_rc_pkt9_dscr_fifo_error",
+ "bmb_wc4_protocol_error",
+ "bmb_wc5_protocol_error",
+ "bmb_wc6_protocol_error",
+ "bmb_wc7_protocol_error",
+ "bmb_wc8_protocol_error",
+ "bmb_wc9_protocol_error",
+ "bmb_wc4_inp_fifo_error",
+ "bmb_wc4_sop_fifo_error",
+ "bmb_wc4_queue_fifo_error",
+ "bmb_wc4_free_point_fifo_error",
+ "bmb_wc4_next_point_fifo_error",
+ "bmb_wc4_strt_fifo_error",
+ "bmb_wc4_second_dscr_fifo_error",
+ "bmb_wc4_pkt_avail_fifo_error",
+ "bmb_wc4_cos_cnt_fifo_error",
+ "bmb_wc4_notify_fifo_error",
+ "bmb_wc4_ll_req_fifo_error",
+ "bmb_wc4_ll_pa_cnt_error",
+ "bmb_wc4_bb_pa_cnt_error",
+ "bmb_wc5_inp_fifo_error",
+ "bmb_wc5_sop_fifo_error",
+ "bmb_wc5_queue_fifo_error",
+ "bmb_wc5_free_point_fifo_error",
+ "bmb_wc5_next_point_fifo_error",
+ "bmb_wc5_strt_fifo_error",
+ "bmb_wc5_second_dscr_fifo_error",
+ "bmb_wc5_pkt_avail_fifo_error",
+ "bmb_wc5_cos_cnt_fifo_error",
+ "bmb_wc5_notify_fifo_error",
+ "bmb_wc5_ll_req_fifo_error",
+ "bmb_wc5_ll_pa_cnt_error",
+ "bmb_wc5_bb_pa_cnt_error",
+ "bmb_wc6_inp_fifo_error",
+ "bmb_wc6_sop_fifo_error",
+ "bmb_wc6_queue_fifo_error",
+ "bmb_wc6_free_point_fifo_error",
+ "bmb_wc6_next_point_fifo_error",
+ "bmb_wc6_strt_fifo_error",
+ "bmb_wc6_second_dscr_fifo_error",
+ "bmb_wc6_pkt_avail_fifo_error",
+ "bmb_wc6_cos_cnt_fifo_error",
+ "bmb_wc6_notify_fifo_error",
+ "bmb_wc6_ll_req_fifo_error",
+ "bmb_wc6_ll_pa_cnt_error",
+ "bmb_wc6_bb_pa_cnt_error",
+ "bmb_wc7_inp_fifo_error",
+ "bmb_wc7_sop_fifo_error",
+ "bmb_wc7_queue_fifo_error",
+ "bmb_wc7_free_point_fifo_error",
+ "bmb_wc7_next_point_fifo_error",
+ "bmb_wc7_strt_fifo_error",
+ "bmb_wc7_second_dscr_fifo_error",
+ "bmb_wc7_pkt_avail_fifo_error",
+ "bmb_wc7_cos_cnt_fifo_error",
+ "bmb_wc7_notify_fifo_error",
+ "bmb_wc7_ll_req_fifo_error",
+ "bmb_wc7_ll_pa_cnt_error",
+ "bmb_wc7_bb_pa_cnt_error",
+ "bmb_wc8_inp_fifo_error",
+ "bmb_wc8_sop_fifo_error",
+ "bmb_wc8_queue_fifo_error",
+ "bmb_wc8_free_point_fifo_error",
+ "bmb_wc8_next_point_fifo_error",
+ "bmb_wc8_strt_fifo_error",
+ "bmb_wc8_second_dscr_fifo_error",
+ "bmb_wc8_pkt_avail_fifo_error",
+ "bmb_wc8_cos_cnt_fifo_error",
+ "bmb_wc8_notify_fifo_error",
+ "bmb_wc8_ll_req_fifo_error",
+ "bmb_wc8_ll_pa_cnt_error",
+ "bmb_wc8_bb_pa_cnt_error",
+ "bmb_wc9_inp_fifo_error",
+ "bmb_wc9_sop_fifo_error",
+ "bmb_wc9_queue_fifo_error",
+ "bmb_wc9_free_point_fifo_error",
+ "bmb_wc9_next_point_fifo_error",
+ "bmb_wc9_strt_fifo_error",
+ "bmb_wc9_second_dscr_fifo_error",
+ "bmb_wc9_pkt_avail_fifo_error",
+ "bmb_wc9_cos_cnt_fifo_error",
+ "bmb_wc9_notify_fifo_error",
+ "bmb_wc9_ll_req_fifo_error",
+ "bmb_wc9_ll_pa_cnt_error",
+ "bmb_wc9_bb_pa_cnt_error",
+ "bmb_rc9_sop_rc_out_sync_fifo_error",
+ "bmb_rc9_sop_out_sync_fifo_push_error",
+ "bmb_rc0_sop_pend_fifo_error",
+ "bmb_rc1_sop_pend_fifo_error",
+ "bmb_rc2_sop_pend_fifo_error",
+ "bmb_rc3_sop_pend_fifo_error",
+ "bmb_rc4_sop_pend_fifo_error",
+ "bmb_rc5_sop_pend_fifo_error",
+ "bmb_rc6_sop_pend_fifo_error",
+ "bmb_rc7_sop_pend_fifo_error",
+ "bmb_rc0_dscr_pend_fifo_error",
+ "bmb_rc1_dscr_pend_fifo_error",
+ "bmb_rc2_dscr_pend_fifo_error",
+ "bmb_rc3_dscr_pend_fifo_error",
+ "bmb_rc4_dscr_pend_fifo_error",
+ "bmb_rc5_dscr_pend_fifo_error",
+ "bmb_rc6_dscr_pend_fifo_error",
+ "bmb_rc7_dscr_pend_fifo_error",
+ "bmb_rc8_sop_inp_sync_fifo_push_error",
+ "bmb_rc9_sop_inp_sync_fifo_push_error",
+ "bmb_rc8_sop_out_sync_fifo_push_error",
+ "bmb_rc_gnt_pend_fifo_error",
+ "bmb_rc8_out_sync_fifo_push_error",
+ "bmb_rc9_out_sync_fifo_push_error",
+ "bmb_wc8_sync_fifo_push_error",
+ "bmb_wc9_sync_fifo_push_error",
+ "bmb_rc8_sop_rc_out_sync_fifo_error",
+ "bmb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define bmb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_a0 = {
+ 0, 16, bmb_int0_bb_a0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_a0_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_a0 = {
+ 1, 28, bmb_int1_bb_a0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_a0_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_a0 = {
+ 2, 26, bmb_int2_bb_a0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_a0_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_a0 = {
+ 3, 31, bmb_int3_bb_a0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_a0_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_a0 = {
+ 4, 27, bmb_int4_bb_a0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_a0_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_a0 = {
+ 5, 29, bmb_int5_bb_a0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_a0_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_a0 = {
+ 6, 30, bmb_int6_bb_a0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_a0_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_a0 = {
+ 7, 32, bmb_int7_bb_a0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_a0_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_a0 = {
+ 8, 32, bmb_int8_bb_a0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_a0_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_a0 = {
+ 9, 32, bmb_int9_bb_a0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_a0_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_a0 = {
+ 10, 3, bmb_int10_bb_a0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_a0_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_a0 = {
+ 11, 4, bmb_int11_bb_a0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_a0_regs[12] = {
+ &bmb_int0_bb_a0, &bmb_int1_bb_a0, &bmb_int2_bb_a0, &bmb_int3_bb_a0,
+ &bmb_int4_bb_a0, &bmb_int5_bb_a0, &bmb_int6_bb_a0, &bmb_int7_bb_a0,
+ &bmb_int8_bb_a0, &bmb_int9_bb_a0,
+ &bmb_int10_bb_a0, &bmb_int11_bb_a0,
+};
+
+static const u16 bmb_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+ 0, 16, bmb_int0_bb_b0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_b0_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+ 1, 28, bmb_int1_bb_b0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_b0_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+ 2, 26, bmb_int2_bb_b0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_b0_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+ 3, 31, bmb_int3_bb_b0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_b0_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+ 4, 27, bmb_int4_bb_b0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_b0_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+ 5, 29, bmb_int5_bb_b0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_b0_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+ 6, 30, bmb_int6_bb_b0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_b0_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+ 7, 32, bmb_int7_bb_b0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_b0_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+ 8, 32, bmb_int8_bb_b0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_b0_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+ 9, 32, bmb_int9_bb_b0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_b0_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+ 10, 3, bmb_int10_bb_b0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_b0_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+ 11, 4, bmb_int11_bb_b0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+ &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+ &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+ &bmb_int8_bb_b0, &bmb_int9_bb_b0,
+ &bmb_int10_bb_b0, &bmb_int11_bb_b0,
+};
+
+static const u16 bmb_int0_k2_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_k2 = {
+ 0, 16, bmb_int0_k2_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_k2_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_k2 = {
+ 1, 28, bmb_int1_k2_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_k2_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_k2 = {
+ 2, 26, bmb_int2_k2_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_k2_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_k2 = {
+ 3, 31, bmb_int3_k2_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_k2_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_k2 = {
+ 4, 27, bmb_int4_k2_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_k2_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_k2 = {
+ 5, 29, bmb_int5_k2_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_k2_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_k2 = {
+ 6, 30, bmb_int6_k2_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_k2_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_k2 = {
+ 7, 32, bmb_int7_k2_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_k2_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_k2 = {
+ 8, 32, bmb_int8_k2_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_k2_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_k2 = {
+ 9, 32, bmb_int9_k2_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_k2_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_k2 = {
+ 10, 3, bmb_int10_k2_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_k2_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_k2 = {
+ 11, 4, bmb_int11_k2_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_k2_regs[12] = {
+ &bmb_int0_k2, &bmb_int1_k2, &bmb_int2_k2, &bmb_int3_k2, &bmb_int4_k2,
+ &bmb_int5_k2, &bmb_int6_k2, &bmb_int7_k2, &bmb_int8_k2, &bmb_int9_k2,
+ &bmb_int10_k2, &bmb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_prty_attn_desc[61] = {
+ "bmb_ll_bank0_mem_prty",
+ "bmb_ll_bank1_mem_prty",
+ "bmb_ll_bank2_mem_prty",
+ "bmb_ll_bank3_mem_prty",
+ "bmb_datapath_registers",
+ "bmb_mem001_i_ecc_rf_int",
+ "bmb_mem008_i_ecc_rf_int",
+ "bmb_mem009_i_ecc_rf_int",
+ "bmb_mem010_i_ecc_rf_int",
+ "bmb_mem011_i_ecc_rf_int",
+ "bmb_mem012_i_ecc_rf_int",
+ "bmb_mem013_i_ecc_rf_int",
+ "bmb_mem014_i_ecc_rf_int",
+ "bmb_mem015_i_ecc_rf_int",
+ "bmb_mem016_i_ecc_rf_int",
+ "bmb_mem002_i_ecc_rf_int",
+ "bmb_mem003_i_ecc_rf_int",
+ "bmb_mem004_i_ecc_rf_int",
+ "bmb_mem005_i_ecc_rf_int",
+ "bmb_mem006_i_ecc_rf_int",
+ "bmb_mem007_i_ecc_rf_int",
+ "bmb_mem059_i_mem_prty",
+ "bmb_mem060_i_mem_prty",
+ "bmb_mem037_i_mem_prty",
+ "bmb_mem038_i_mem_prty",
+ "bmb_mem039_i_mem_prty",
+ "bmb_mem040_i_mem_prty",
+ "bmb_mem041_i_mem_prty",
+ "bmb_mem042_i_mem_prty",
+ "bmb_mem043_i_mem_prty",
+ "bmb_mem044_i_mem_prty",
+ "bmb_mem045_i_mem_prty",
+ "bmb_mem046_i_mem_prty",
+ "bmb_mem047_i_mem_prty",
+ "bmb_mem048_i_mem_prty",
+ "bmb_mem049_i_mem_prty",
+ "bmb_mem050_i_mem_prty",
+ "bmb_mem051_i_mem_prty",
+ "bmb_mem052_i_mem_prty",
+ "bmb_mem053_i_mem_prty",
+ "bmb_mem054_i_mem_prty",
+ "bmb_mem055_i_mem_prty",
+ "bmb_mem056_i_mem_prty",
+ "bmb_mem057_i_mem_prty",
+ "bmb_mem058_i_mem_prty",
+ "bmb_mem033_i_mem_prty",
+ "bmb_mem034_i_mem_prty",
+ "bmb_mem035_i_mem_prty",
+ "bmb_mem036_i_mem_prty",
+ "bmb_mem021_i_mem_prty",
+ "bmb_mem022_i_mem_prty",
+ "bmb_mem023_i_mem_prty",
+ "bmb_mem024_i_mem_prty",
+ "bmb_mem025_i_mem_prty",
+ "bmb_mem026_i_mem_prty",
+ "bmb_mem027_i_mem_prty",
+ "bmb_mem028_i_mem_prty",
+ "bmb_mem029_i_mem_prty",
+ "bmb_mem030_i_mem_prty",
+ "bmb_mem031_i_mem_prty",
+ "bmb_mem032_i_mem_prty",
+};
+#else
+#define bmb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_prty1_bb_a0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_a0 = {
+ 0, 31, bmb_prty1_bb_a0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_a0_attn_idx[25] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_a0 = {
+ 1, 25, bmb_prty2_bb_a0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_a0_regs[2] = {
+ &bmb_prty1_bb_a0, &bmb_prty2_bb_a0,
+};
+
+static const u16 bmb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+ 0, 5, bmb_prty0_bb_b0_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_bb_b0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+ 1, 31, bmb_prty1_bb_b0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_b0_attn_idx[15] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+ 2, 15, bmb_prty2_bb_b0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+ &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0,
+};
+
+static const u16 bmb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_k2 = {
+ 0, 5, bmb_prty0_k2_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_k2 = {
+ 1, 31, bmb_prty1_k2_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_k2_attn_idx[15] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_k2 = {
+ 2, 15, bmb_prty2_k2_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_k2_regs[3] = {
+ &bmb_prty0_k2, &bmb_prty1_k2, &bmb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_int_attn_desc[17] = {
+ "pcie_address_error",
+ "pcie_link_down_detect",
+ "pcie_link_up_detect",
+ "pcie_cfg_link_eq_req_int",
+ "pcie_pcie_bandwidth_change_detect",
+ "pcie_early_hot_reset_detect",
+ "pcie_hot_reset_detect",
+ "pcie_l1_entry_detect",
+ "pcie_l1_exit_detect",
+ "pcie_ltssm_state_match_detect",
+ "pcie_fc_timeout_detect",
+ "pcie_pme_turnoff_message_detect",
+ "pcie_cfg_send_cor_err",
+ "pcie_cfg_send_nf_err",
+ "pcie_cfg_send_f_err",
+ "pcie_qoverflow_detect",
+ "pcie_vdm_detect",
+};
+#else
+#define pcie_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg pcie_int0_k2 = {
+ 0, 17, pcie_int0_k2_attn_idx, 0x547a0, 0x547ac, 0x547a8, 0x547a4
+};
+
+static struct attn_hw_reg *pcie_int_k2_regs[1] = {
+ &pcie_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_prty_attn_desc[24] = {
+ "pcie_mem003_i_ecc_rf_int",
+ "pcie_mem004_i_ecc_rf_int",
+ "pcie_mem008_i_mem_prty",
+ "pcie_mem007_i_mem_prty",
+ "pcie_mem005_i_mem_prty",
+ "pcie_mem006_i_mem_prty",
+ "pcie_mem001_i_mem_prty",
+ "pcie_mem002_i_mem_prty",
+ "pcie_mem001_i_ecc_rf_int",
+ "pcie_mem005_i_ecc_rf_int",
+ "pcie_mem010_i_ecc_rf_int",
+ "pcie_mem009_i_ecc_rf_int",
+ "pcie_mem007_i_ecc_rf_int",
+ "pcie_mem004_i_mem_prty_0",
+ "pcie_mem004_i_mem_prty_1",
+ "pcie_mem004_i_mem_prty_2",
+ "pcie_mem004_i_mem_prty_3",
+ "pcie_mem011_i_mem_prty_1",
+ "pcie_mem011_i_mem_prty_2",
+ "pcie_mem012_i_mem_prty_1",
+ "pcie_mem012_i_mem_prty_2",
+ "pcie_app_parity_errs_0",
+ "pcie_app_parity_errs_1",
+ "pcie_app_parity_errs_2",
+};
+#else
+#define pcie_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_prty1_bb_a0_attn_idx[17] = {
+ 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_a0 = {
+ 0, 17, pcie_prty1_bb_a0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_a0_regs[1] = {
+ &pcie_prty1_bb_a0,
+};
+
+static const u16 pcie_prty1_bb_b0_attn_idx[17] = {
+ 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+ 0, 17, pcie_prty1_bb_b0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+ &pcie_prty1_bb_b0,
+};
+
+static const u16 pcie_prty1_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg pcie_prty1_k2 = {
+ 0, 8, pcie_prty1_k2_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static const u16 pcie_prty0_k2_attn_idx[3] = {
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg pcie_prty0_k2 = {
+ 1, 3, pcie_prty0_k2_attn_idx, 0x547b0, 0x547bc, 0x547b8, 0x547b4
+};
+
+static struct attn_hw_reg *pcie_prty_k2_regs[2] = {
+ &pcie_prty1_k2, &pcie_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcp2_prty_attn_desc[13] = {
+ "mcp2_rom_parity",
+ "mcp2_mem001_i_ecc_rf_int",
+ "mcp2_mem006_i_ecc_0_rf_int",
+ "mcp2_mem006_i_ecc_1_rf_int",
+ "mcp2_mem006_i_ecc_2_rf_int",
+ "mcp2_mem006_i_ecc_3_rf_int",
+ "mcp2_mem007_i_ecc_rf_int",
+ "mcp2_mem004_i_mem_prty",
+ "mcp2_mem003_i_mem_prty",
+ "mcp2_mem002_i_mem_prty",
+ "mcp2_mem009_i_mem_prty",
+ "mcp2_mem008_i_mem_prty",
+ "mcp2_mem005_i_mem_prty",
+};
+#else
+#define mcp2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcp2_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_a0 = {
+ 0, 1, mcp2_prty0_bb_a0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_a0_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_a0 = {
+ 1, 12, mcp2_prty1_bb_a0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_a0_regs[2] = {
+ &mcp2_prty0_bb_a0, &mcp2_prty1_bb_a0,
+};
+
+static const u16 mcp2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+ 0, 1, mcp2_prty0_bb_b0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_b0_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+ 1, 12, mcp2_prty1_bb_b0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+ &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0,
+};
+
+static const u16 mcp2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_k2 = {
+ 0, 1, mcp2_prty0_k2_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_k2_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_k2 = {
+ 1, 12, mcp2_prty1_k2_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_k2_regs[2] = {
+ &mcp2_prty0_k2, &mcp2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_int_attn_desc[18] = {
+ "pswhst_address_error",
+ "pswhst_hst_src_fifo1_err",
+ "pswhst_hst_src_fifo2_err",
+ "pswhst_hst_src_fifo3_err",
+ "pswhst_hst_src_fifo4_err",
+ "pswhst_hst_src_fifo5_err",
+ "pswhst_hst_hdr_sync_fifo_err",
+ "pswhst_hst_data_sync_fifo_err",
+ "pswhst_hst_cpl_sync_fifo_err",
+ "pswhst_hst_vf_disabled_access",
+ "pswhst_hst_permission_violation",
+ "pswhst_hst_incorrect_access",
+ "pswhst_hst_src_fifo6_err",
+ "pswhst_hst_src_fifo7_err",
+ "pswhst_hst_src_fifo8_err",
+ "pswhst_hst_src_fifo9_err",
+ "pswhst_hst_source_credit_violation",
+ "pswhst_hst_timeout",
+};
+#else
+#define pswhst_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_int0_bb_a0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_a0 = {
+ 0, 18, pswhst_int0_bb_a0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+ 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_a0_regs[1] = {
+ &pswhst_int0_bb_a0,
+};
+
+static const u16 pswhst_int0_bb_b0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+ 0, 18, pswhst_int0_bb_b0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+ 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+ &pswhst_int0_bb_b0,
+};
+
+static const u16 pswhst_int0_k2_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_k2 = {
+ 0, 18, pswhst_int0_k2_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_k2_regs[1] = {
+ &pswhst_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_prty_attn_desc[18] = {
+ "pswhst_datapath_registers",
+ "pswhst_mem006_i_mem_prty",
+ "pswhst_mem007_i_mem_prty",
+ "pswhst_mem005_i_mem_prty",
+ "pswhst_mem002_i_mem_prty",
+ "pswhst_mem003_i_mem_prty",
+ "pswhst_mem001_i_mem_prty",
+ "pswhst_mem008_i_mem_prty",
+ "pswhst_mem004_i_mem_prty",
+ "pswhst_mem009_i_mem_prty",
+ "pswhst_mem010_i_mem_prty",
+ "pswhst_mem016_i_mem_prty",
+ "pswhst_mem012_i_mem_prty",
+ "pswhst_mem013_i_mem_prty",
+ "pswhst_mem014_i_mem_prty",
+ "pswhst_mem015_i_mem_prty",
+ "pswhst_mem011_i_mem_prty",
+ "pswhst_mem017_i_mem_prty",
+};
+#else
+#define pswhst_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_prty1_bb_a0_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_a0 = {
+ 0, 17, pswhst_prty1_bb_a0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+ 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_a0_regs[1] = {
+ &pswhst_prty1_bb_a0,
+};
+
+static const u16 pswhst_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+ 0, 1, pswhst_prty0_bb_b0_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198,
+ 0x2a0194
+};
+
+static const u16 pswhst_prty1_bb_b0_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+ 1, 17, pswhst_prty1_bb_b0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+ 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+ &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0,
+};
+
+static const u16 pswhst_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst_prty0_k2 = {
+ 0, 1, pswhst_prty0_k2_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194
+};
+
+static const u16 pswhst_prty1_k2_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_k2 = {
+ 1, 17, pswhst_prty1_k2_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_k2_regs[2] = {
+ &pswhst_prty0_k2, &pswhst_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_int_attn_desc[5] = {
+ "pswhst2_address_error",
+ "pswhst2_hst_header_fifo_err",
+ "pswhst2_hst_data_fifo_err",
+ "pswhst2_hst_cpl_fifo_err",
+ "pswhst2_hst_ireq_fifo_err",
+};
+#else
+#define pswhst2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_a0 = {
+ 0, 5, pswhst2_int0_bb_a0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+ 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_a0_regs[1] = {
+ &pswhst2_int0_bb_a0,
+};
+
+static const u16 pswhst2_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+ 0, 5, pswhst2_int0_bb_b0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+ 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+ &pswhst2_int0_bb_b0,
+};
+
+static const u16 pswhst2_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_k2 = {
+ 0, 5, pswhst2_int0_k2_attn_idx, 0x29e180, 0x29e18c, 0x29e188, 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_k2_regs[1] = {
+ &pswhst2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_prty_attn_desc[1] = {
+ "pswhst2_datapath_registers",
+};
+#else
+#define pswhst2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+ 0, 1, pswhst2_prty0_bb_b0_attn_idx, 0x29e190, 0x29e19c, 0x29e198,
+ 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+ &pswhst2_prty0_bb_b0,
+};
+
+static const u16 pswhst2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_k2 = {
+ 0, 1, pswhst2_prty0_k2_attn_idx, 0x29e190, 0x29e19c, 0x29e198, 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_k2_regs[1] = {
+ &pswhst2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_int_attn_desc[3] = {
+ "pswrd_address_error",
+ "pswrd_pop_error",
+ "pswrd_pop_pbf_error",
+};
+#else
+#define pswrd_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_int0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_a0 = {
+ 0, 3, pswrd_int0_bb_a0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_a0_regs[1] = {
+ &pswrd_int0_bb_a0,
+};
+
+static const u16 pswrd_int0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+ 0, 3, pswrd_int0_bb_b0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+ &pswrd_int0_bb_b0,
+};
+
+static const u16 pswrd_int0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_k2 = {
+ 0, 3, pswrd_int0_k2_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_k2_regs[1] = {
+ &pswrd_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_prty_attn_desc[1] = {
+ "pswrd_datapath_registers",
+};
+#else
+#define pswrd_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+ 0, 1, pswrd_prty0_bb_b0_attn_idx, 0x29c190, 0x29c19c, 0x29c198,
+ 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+ &pswrd_prty0_bb_b0,
+};
+
+static const u16 pswrd_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd_prty0_k2 = {
+ 0, 1, pswrd_prty0_k2_attn_idx, 0x29c190, 0x29c19c, 0x29c198, 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_k2_regs[1] = {
+ &pswrd_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_int_attn_desc[5] = {
+ "pswrd2_address_error",
+ "pswrd2_sr_fifo_error",
+ "pswrd2_blk_fifo_error",
+ "pswrd2_push_error",
+ "pswrd2_push_pbf_error",
+};
+#else
+#define pswrd2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_a0 = {
+ 0, 5, pswrd2_int0_bb_a0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+ 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_a0_regs[1] = {
+ &pswrd2_int0_bb_a0,
+};
+
+static const u16 pswrd2_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+ 0, 5, pswrd2_int0_bb_b0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+ 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+ &pswrd2_int0_bb_b0,
+};
+
+static const u16 pswrd2_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_k2 = {
+ 0, 5, pswrd2_int0_k2_attn_idx, 0x29d180, 0x29d18c, 0x29d188, 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_k2_regs[1] = {
+ &pswrd2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_prty_attn_desc[36] = {
+ "pswrd2_datapath_registers",
+ "pswrd2_mem017_i_ecc_rf_int",
+ "pswrd2_mem018_i_ecc_rf_int",
+ "pswrd2_mem019_i_ecc_rf_int",
+ "pswrd2_mem020_i_ecc_rf_int",
+ "pswrd2_mem021_i_ecc_rf_int",
+ "pswrd2_mem022_i_ecc_rf_int",
+ "pswrd2_mem023_i_ecc_rf_int",
+ "pswrd2_mem024_i_ecc_rf_int",
+ "pswrd2_mem025_i_ecc_rf_int",
+ "pswrd2_mem015_i_ecc_rf_int",
+ "pswrd2_mem034_i_mem_prty",
+ "pswrd2_mem032_i_mem_prty",
+ "pswrd2_mem028_i_mem_prty",
+ "pswrd2_mem033_i_mem_prty",
+ "pswrd2_mem030_i_mem_prty",
+ "pswrd2_mem029_i_mem_prty",
+ "pswrd2_mem031_i_mem_prty",
+ "pswrd2_mem027_i_mem_prty",
+ "pswrd2_mem026_i_mem_prty",
+ "pswrd2_mem001_i_mem_prty",
+ "pswrd2_mem007_i_mem_prty",
+ "pswrd2_mem008_i_mem_prty",
+ "pswrd2_mem009_i_mem_prty",
+ "pswrd2_mem010_i_mem_prty",
+ "pswrd2_mem011_i_mem_prty",
+ "pswrd2_mem012_i_mem_prty",
+ "pswrd2_mem013_i_mem_prty",
+ "pswrd2_mem014_i_mem_prty",
+ "pswrd2_mem002_i_mem_prty",
+ "pswrd2_mem003_i_mem_prty",
+ "pswrd2_mem004_i_mem_prty",
+ "pswrd2_mem005_i_mem_prty",
+ "pswrd2_mem006_i_mem_prty",
+ "pswrd2_mem016_i_mem_prty",
+ "pswrd2_mem015_i_mem_prty",
+};
+#else
+#define pswrd2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_a0 = {
+ 0, 31, pswrd2_prty1_bb_a0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+ 0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_a0_attn_idx[3] = {
+ 33, 34, 35,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_a0 = {
+ 1, 3, pswrd2_prty2_bb_a0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+ 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_a0_regs[2] = {
+ &pswrd2_prty1_bb_a0, &pswrd2_prty2_bb_a0,
+};
+
+static const u16 pswrd2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+ 0, 1, pswrd2_prty0_bb_b0_attn_idx, 0x29d190, 0x29d19c, 0x29d198,
+ 0x29d194
+};
+
+static const u16 pswrd2_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+ 1, 31, pswrd2_prty1_bb_b0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+ 0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_b0_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+ 2, 3, pswrd2_prty2_bb_b0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+ 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+ &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0,
+};
+
+static const u16 pswrd2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_k2 = {
+ 0, 1, pswrd2_prty0_k2_attn_idx, 0x29d190, 0x29d19c, 0x29d198, 0x29d194
+};
+
+static const u16 pswrd2_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_k2 = {
+ 1, 31, pswrd2_prty1_k2_attn_idx, 0x29d200, 0x29d20c, 0x29d208, 0x29d204
+};
+
+static const u16 pswrd2_prty2_k2_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_k2 = {
+ 2, 3, pswrd2_prty2_k2_attn_idx, 0x29d210, 0x29d21c, 0x29d218, 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_k2_regs[3] = {
+ &pswrd2_prty0_k2, &pswrd2_prty1_k2, &pswrd2_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_int_attn_desc[16] = {
+ "pswwr_address_error",
+ "pswwr_src_fifo_overflow",
+ "pswwr_qm_fifo_overflow",
+ "pswwr_tm_fifo_overflow",
+ "pswwr_usdm_fifo_overflow",
+ "pswwr_usdmdp_fifo_overflow",
+ "pswwr_xsdm_fifo_overflow",
+ "pswwr_tsdm_fifo_overflow",
+ "pswwr_cduwr_fifo_overflow",
+ "pswwr_dbg_fifo_overflow",
+ "pswwr_dmae_fifo_overflow",
+ "pswwr_hc_fifo_overflow",
+ "pswwr_msdm_fifo_overflow",
+ "pswwr_ysdm_fifo_overflow",
+ "pswwr_psdm_fifo_overflow",
+ "pswwr_m2p_fifo_overflow",
+};
+#else
+#define pswwr_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_a0 = {
+ 0, 16, pswwr_int0_bb_a0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+ 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_a0_regs[1] = {
+ &pswwr_int0_bb_a0,
+};
+
+static const u16 pswwr_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+ 0, 16, pswwr_int0_bb_b0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+ 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+ &pswwr_int0_bb_b0,
+};
+
+static const u16 pswwr_int0_k2_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_k2 = {
+ 0, 16, pswwr_int0_k2_attn_idx, 0x29a180, 0x29a18c, 0x29a188, 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_k2_regs[1] = {
+ &pswwr_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_prty_attn_desc[1] = {
+ "pswwr_datapath_registers",
+};
+#else
+#define pswwr_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+ 0, 1, pswwr_prty0_bb_b0_attn_idx, 0x29a190, 0x29a19c, 0x29a198,
+ 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+ &pswwr_prty0_bb_b0,
+};
+
+static const u16 pswwr_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr_prty0_k2 = {
+ 0, 1, pswwr_prty0_k2_attn_idx, 0x29a190, 0x29a19c, 0x29a198, 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_k2_regs[1] = {
+ &pswwr_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_int_attn_desc[19] = {
+ "pswwr2_address_error",
+ "pswwr2_pglue_eop_error",
+ "pswwr2_pglue_lsr_error",
+ "pswwr2_tm_underflow",
+ "pswwr2_qm_underflow",
+ "pswwr2_src_underflow",
+ "pswwr2_usdm_underflow",
+ "pswwr2_tsdm_underflow",
+ "pswwr2_xsdm_underflow",
+ "pswwr2_usdmdp_underflow",
+ "pswwr2_cdu_underflow",
+ "pswwr2_dbg_underflow",
+ "pswwr2_dmae_underflow",
+ "pswwr2_hc_underflow",
+ "pswwr2_msdm_underflow",
+ "pswwr2_ysdm_underflow",
+ "pswwr2_psdm_underflow",
+ "pswwr2_m2p_underflow",
+ "pswwr2_pglue_eop_error_in_line",
+};
+#else
+#define pswwr2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_int0_bb_a0_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_a0 = {
+ 0, 19, pswwr2_int0_bb_a0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+ 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_a0_regs[1] = {
+ &pswwr2_int0_bb_a0,
+};
+
+static const u16 pswwr2_int0_bb_b0_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+ 0, 19, pswwr2_int0_bb_b0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+ 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+ &pswwr2_int0_bb_b0,
+};
+
+static const u16 pswwr2_int0_k2_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_k2 = {
+ 0, 19, pswwr2_int0_k2_attn_idx, 0x29b180, 0x29b18c, 0x29b188, 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_k2_regs[1] = {
+ &pswwr2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_prty_attn_desc[114] = {
+ "pswwr2_datapath_registers",
+ "pswwr2_mem008_i_ecc_rf_int",
+ "pswwr2_mem001_i_mem_prty",
+ "pswwr2_mem014_i_mem_prty_0",
+ "pswwr2_mem014_i_mem_prty_1",
+ "pswwr2_mem014_i_mem_prty_2",
+ "pswwr2_mem014_i_mem_prty_3",
+ "pswwr2_mem014_i_mem_prty_4",
+ "pswwr2_mem014_i_mem_prty_5",
+ "pswwr2_mem014_i_mem_prty_6",
+ "pswwr2_mem014_i_mem_prty_7",
+ "pswwr2_mem014_i_mem_prty_8",
+ "pswwr2_mem016_i_mem_prty_0",
+ "pswwr2_mem016_i_mem_prty_1",
+ "pswwr2_mem016_i_mem_prty_2",
+ "pswwr2_mem016_i_mem_prty_3",
+ "pswwr2_mem016_i_mem_prty_4",
+ "pswwr2_mem016_i_mem_prty_5",
+ "pswwr2_mem016_i_mem_prty_6",
+ "pswwr2_mem016_i_mem_prty_7",
+ "pswwr2_mem016_i_mem_prty_8",
+ "pswwr2_mem007_i_mem_prty_0",
+ "pswwr2_mem007_i_mem_prty_1",
+ "pswwr2_mem007_i_mem_prty_2",
+ "pswwr2_mem007_i_mem_prty_3",
+ "pswwr2_mem007_i_mem_prty_4",
+ "pswwr2_mem007_i_mem_prty_5",
+ "pswwr2_mem007_i_mem_prty_6",
+ "pswwr2_mem007_i_mem_prty_7",
+ "pswwr2_mem007_i_mem_prty_8",
+ "pswwr2_mem017_i_mem_prty_0",
+ "pswwr2_mem017_i_mem_prty_1",
+ "pswwr2_mem017_i_mem_prty_2",
+ "pswwr2_mem017_i_mem_prty_3",
+ "pswwr2_mem017_i_mem_prty_4",
+ "pswwr2_mem017_i_mem_prty_5",
+ "pswwr2_mem017_i_mem_prty_6",
+ "pswwr2_mem017_i_mem_prty_7",
+ "pswwr2_mem017_i_mem_prty_8",
+ "pswwr2_mem009_i_mem_prty_0",
+ "pswwr2_mem009_i_mem_prty_1",
+ "pswwr2_mem009_i_mem_prty_2",
+ "pswwr2_mem009_i_mem_prty_3",
+ "pswwr2_mem009_i_mem_prty_4",
+ "pswwr2_mem009_i_mem_prty_5",
+ "pswwr2_mem009_i_mem_prty_6",
+ "pswwr2_mem009_i_mem_prty_7",
+ "pswwr2_mem009_i_mem_prty_8",
+ "pswwr2_mem013_i_mem_prty_0",
+ "pswwr2_mem013_i_mem_prty_1",
+ "pswwr2_mem013_i_mem_prty_2",
+ "pswwr2_mem013_i_mem_prty_3",
+ "pswwr2_mem013_i_mem_prty_4",
+ "pswwr2_mem013_i_mem_prty_5",
+ "pswwr2_mem013_i_mem_prty_6",
+ "pswwr2_mem013_i_mem_prty_7",
+ "pswwr2_mem013_i_mem_prty_8",
+ "pswwr2_mem006_i_mem_prty_0",
+ "pswwr2_mem006_i_mem_prty_1",
+ "pswwr2_mem006_i_mem_prty_2",
+ "pswwr2_mem006_i_mem_prty_3",
+ "pswwr2_mem006_i_mem_prty_4",
+ "pswwr2_mem006_i_mem_prty_5",
+ "pswwr2_mem006_i_mem_prty_6",
+ "pswwr2_mem006_i_mem_prty_7",
+ "pswwr2_mem006_i_mem_prty_8",
+ "pswwr2_mem010_i_mem_prty_0",
+ "pswwr2_mem010_i_mem_prty_1",
+ "pswwr2_mem010_i_mem_prty_2",
+ "pswwr2_mem010_i_mem_prty_3",
+ "pswwr2_mem010_i_mem_prty_4",
+ "pswwr2_mem010_i_mem_prty_5",
+ "pswwr2_mem010_i_mem_prty_6",
+ "pswwr2_mem010_i_mem_prty_7",
+ "pswwr2_mem010_i_mem_prty_8",
+ "pswwr2_mem012_i_mem_prty",
+ "pswwr2_mem011_i_mem_prty_0",
+ "pswwr2_mem011_i_mem_prty_1",
+ "pswwr2_mem011_i_mem_prty_2",
+ "pswwr2_mem011_i_mem_prty_3",
+ "pswwr2_mem011_i_mem_prty_4",
+ "pswwr2_mem011_i_mem_prty_5",
+ "pswwr2_mem011_i_mem_prty_6",
+ "pswwr2_mem011_i_mem_prty_7",
+ "pswwr2_mem011_i_mem_prty_8",
+ "pswwr2_mem004_i_mem_prty_0",
+ "pswwr2_mem004_i_mem_prty_1",
+ "pswwr2_mem004_i_mem_prty_2",
+ "pswwr2_mem004_i_mem_prty_3",
+ "pswwr2_mem004_i_mem_prty_4",
+ "pswwr2_mem004_i_mem_prty_5",
+ "pswwr2_mem004_i_mem_prty_6",
+ "pswwr2_mem004_i_mem_prty_7",
+ "pswwr2_mem004_i_mem_prty_8",
+ "pswwr2_mem015_i_mem_prty_0",
+ "pswwr2_mem015_i_mem_prty_1",
+ "pswwr2_mem015_i_mem_prty_2",
+ "pswwr2_mem005_i_mem_prty_0",
+ "pswwr2_mem005_i_mem_prty_1",
+ "pswwr2_mem005_i_mem_prty_2",
+ "pswwr2_mem005_i_mem_prty_3",
+ "pswwr2_mem005_i_mem_prty_4",
+ "pswwr2_mem005_i_mem_prty_5",
+ "pswwr2_mem005_i_mem_prty_6",
+ "pswwr2_mem005_i_mem_prty_7",
+ "pswwr2_mem005_i_mem_prty_8",
+ "pswwr2_mem002_i_mem_prty_0",
+ "pswwr2_mem002_i_mem_prty_1",
+ "pswwr2_mem002_i_mem_prty_2",
+ "pswwr2_mem002_i_mem_prty_3",
+ "pswwr2_mem002_i_mem_prty_4",
+ "pswwr2_mem003_i_mem_prty_0",
+ "pswwr2_mem003_i_mem_prty_1",
+ "pswwr2_mem003_i_mem_prty_2",
+};
+#else
+#define pswwr2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_a0 = {
+ 0, 31, pswwr2_prty1_bb_a0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+ 0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_a0_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_a0 = {
+ 1, 31, pswwr2_prty2_bb_a0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+ 0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_a0_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_a0 = {
+ 2, 31, pswwr2_prty3_bb_a0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+ 0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_a0_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_a0 = {
+ 3, 20, pswwr2_prty4_bb_a0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+ 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_a0_regs[4] = {
+ &pswwr2_prty1_bb_a0, &pswwr2_prty2_bb_a0, &pswwr2_prty3_bb_a0,
+ &pswwr2_prty4_bb_a0,
+};
+
+static const u16 pswwr2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+ 0, 1, pswwr2_prty0_bb_b0_attn_idx, 0x29b190, 0x29b19c, 0x29b198,
+ 0x29b194
+};
+
+static const u16 pswwr2_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+ 1, 31, pswwr2_prty1_bb_b0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+ 0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_b0_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+ 2, 31, pswwr2_prty2_bb_b0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+ 0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_b0_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+ 3, 31, pswwr2_prty3_bb_b0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+ 0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_b0_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+ 4, 20, pswwr2_prty4_bb_b0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+ 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+ &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+ &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0,
+};
+
+static const u16 pswwr2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_k2 = {
+ 0, 1, pswwr2_prty0_k2_attn_idx, 0x29b190, 0x29b19c, 0x29b198, 0x29b194
+};
+
+static const u16 pswwr2_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_k2 = {
+ 1, 31, pswwr2_prty1_k2_attn_idx, 0x29b200, 0x29b20c, 0x29b208, 0x29b204
+};
+
+static const u16 pswwr2_prty2_k2_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_k2 = {
+ 2, 31, pswwr2_prty2_k2_attn_idx, 0x29b210, 0x29b21c, 0x29b218, 0x29b214
+};
+
+static const u16 pswwr2_prty3_k2_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_k2 = {
+ 3, 31, pswwr2_prty3_k2_attn_idx, 0x29b220, 0x29b22c, 0x29b228, 0x29b224
+};
+
+static const u16 pswwr2_prty4_k2_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_k2 = {
+ 4, 20, pswwr2_prty4_k2_attn_idx, 0x29b230, 0x29b23c, 0x29b238, 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_k2_regs[5] = {
+ &pswwr2_prty0_k2, &pswwr2_prty1_k2, &pswwr2_prty2_k2, &pswwr2_prty3_k2,
+ &pswwr2_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_int_attn_desc[21] = {
+ "pswrq_address_error",
+ "pswrq_pbf_fifo_overflow",
+ "pswrq_src_fifo_overflow",
+ "pswrq_qm_fifo_overflow",
+ "pswrq_tm_fifo_overflow",
+ "pswrq_usdm_fifo_overflow",
+ "pswrq_m2p_fifo_overflow",
+ "pswrq_xsdm_fifo_overflow",
+ "pswrq_tsdm_fifo_overflow",
+ "pswrq_ptu_fifo_overflow",
+ "pswrq_cduwr_fifo_overflow",
+ "pswrq_cdurd_fifo_overflow",
+ "pswrq_dmae_fifo_overflow",
+ "pswrq_hc_fifo_overflow",
+ "pswrq_dbg_fifo_overflow",
+ "pswrq_msdm_fifo_overflow",
+ "pswrq_ysdm_fifo_overflow",
+ "pswrq_psdm_fifo_overflow",
+ "pswrq_prm_fifo_overflow",
+ "pswrq_muld_fifo_overflow",
+ "pswrq_xyld_fifo_overflow",
+};
+#else
+#define pswrq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_int0_bb_a0_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_a0 = {
+ 0, 21, pswrq_int0_bb_a0_attn_idx, 0x280180, 0x28018c, 0x280188,
+ 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_a0_regs[1] = {
+ &pswrq_int0_bb_a0,
+};
+
+static const u16 pswrq_int0_bb_b0_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+ 0, 21, pswrq_int0_bb_b0_attn_idx, 0x280180, 0x28018c, 0x280188,
+ 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+ &pswrq_int0_bb_b0,
+};
+
+static const u16 pswrq_int0_k2_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_k2 = {
+ 0, 21, pswrq_int0_k2_attn_idx, 0x280180, 0x28018c, 0x280188, 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_k2_regs[1] = {
+ &pswrq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_prty_attn_desc[1] = {
+ "pswrq_pxp_busip_parity",
+};
+#else
+#define pswrq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+ 0, 1, pswrq_prty0_bb_b0_attn_idx, 0x280190, 0x28019c, 0x280198,
+ 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+ &pswrq_prty0_bb_b0,
+};
+
+static const u16 pswrq_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrq_prty0_k2 = {
+ 0, 1, pswrq_prty0_k2_attn_idx, 0x280190, 0x28019c, 0x280198, 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_k2_regs[1] = {
+ &pswrq_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_int_attn_desc[15] = {
+ "pswrq2_address_error",
+ "pswrq2_l2p_fifo_overflow",
+ "pswrq2_wdfifo_overflow",
+ "pswrq2_phyaddr_fifo_of",
+ "pswrq2_l2p_violation_1",
+ "pswrq2_l2p_violation_2",
+ "pswrq2_free_list_empty",
+ "pswrq2_elt_addr",
+ "pswrq2_l2p_vf_err",
+ "pswrq2_core_wdone_overflow",
+ "pswrq2_treq_fifo_underflow",
+ "pswrq2_treq_fifo_overflow",
+ "pswrq2_icpl_fifo_underflow",
+ "pswrq2_icpl_fifo_overflow",
+ "pswrq2_back2back_atc_response",
+};
+#else
+#define pswrq2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_int0_bb_a0_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_a0 = {
+ 0, 15, pswrq2_int0_bb_a0_attn_idx, 0x240180, 0x24018c, 0x240188,
+ 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_a0_regs[1] = {
+ &pswrq2_int0_bb_a0,
+};
+
+static const u16 pswrq2_int0_bb_b0_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+ 0, 15, pswrq2_int0_bb_b0_attn_idx, 0x240180, 0x24018c, 0x240188,
+ 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+ &pswrq2_int0_bb_b0,
+};
+
+static const u16 pswrq2_int0_k2_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_k2 = {
+ 0, 15, pswrq2_int0_k2_attn_idx, 0x240180, 0x24018c, 0x240188, 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_k2_regs[1] = {
+ &pswrq2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_prty_attn_desc[11] = {
+ "pswrq2_mem004_i_ecc_rf_int",
+ "pswrq2_mem005_i_ecc_rf_int",
+ "pswrq2_mem001_i_ecc_rf_int",
+ "pswrq2_mem006_i_mem_prty",
+ "pswrq2_mem008_i_mem_prty",
+ "pswrq2_mem009_i_mem_prty",
+ "pswrq2_mem003_i_mem_prty",
+ "pswrq2_mem002_i_mem_prty",
+ "pswrq2_mem010_i_mem_prty",
+ "pswrq2_mem007_i_mem_prty",
+ "pswrq2_mem005_i_mem_prty",
+};
+#else
+#define pswrq2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_prty1_bb_a0_attn_idx[9] = {
+ 0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_a0 = {
+ 0, 9, pswrq2_prty1_bb_a0_attn_idx, 0x240200, 0x24020c, 0x240208,
+ 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_a0_regs[1] = {
+ &pswrq2_prty1_bb_a0,
+};
+
+static const u16 pswrq2_prty1_bb_b0_attn_idx[9] = {
+ 0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+ 0, 9, pswrq2_prty1_bb_b0_attn_idx, 0x240200, 0x24020c, 0x240208,
+ 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+ &pswrq2_prty1_bb_b0,
+};
+
+static const u16 pswrq2_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg pswrq2_prty1_k2 = {
+ 0, 10, pswrq2_prty1_k2_attn_idx, 0x240200, 0x24020c, 0x240208, 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_k2_regs[1] = {
+ &pswrq2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglcs_int_attn_desc[2] = {
+ "pglcs_address_error",
+ "pglcs_rasdp_error",
+};
+#else
+#define pglcs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglcs_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_a0 = {
+ 0, 1, pglcs_int0_bb_a0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_a0_regs[1] = {
+ &pglcs_int0_bb_a0,
+};
+
+static const u16 pglcs_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+ 0, 1, pglcs_int0_bb_b0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+ &pglcs_int0_bb_b0,
+};
+
+static const u16 pglcs_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg pglcs_int0_k2 = {
+ 0, 2, pglcs_int0_k2_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_k2_regs[1] = {
+ &pglcs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_int_attn_desc[2] = {
+ "dmae_address_error",
+ "dmae_pci_rd_buf_err",
+};
+#else
+#define dmae_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_a0 = {
+ 0, 2, dmae_int0_bb_a0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_a0_regs[1] = {
+ &dmae_int0_bb_a0,
+};
+
+static const u16 dmae_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+ 0, 2, dmae_int0_bb_b0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+ &dmae_int0_bb_b0,
+};
+
+static const u16 dmae_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_k2 = {
+ 0, 2, dmae_int0_k2_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_k2_regs[1] = {
+ &dmae_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_prty_attn_desc[3] = {
+ "dmae_mem002_i_mem_prty",
+ "dmae_mem001_i_mem_prty",
+ "dmae_mem003_i_mem_prty",
+};
+#else
+#define dmae_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_prty1_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_a0 = {
+ 0, 3, dmae_prty1_bb_a0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_a0_regs[1] = {
+ &dmae_prty1_bb_a0,
+};
+
+static const u16 dmae_prty1_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+ 0, 3, dmae_prty1_bb_b0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+ &dmae_prty1_bb_b0,
+};
+
+static const u16 dmae_prty1_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_k2 = {
+ 0, 3, dmae_prty1_k2_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_k2_regs[1] = {
+ &dmae_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_int_attn_desc[8] = {
+ "ptu_address_error",
+ "ptu_atc_tcpl_to_not_pend",
+ "ptu_atc_gpa_multiple_hits",
+ "ptu_atc_rcpl_to_empty_cnt",
+ "ptu_atc_tcpl_error",
+ "ptu_atc_inv_halt",
+ "ptu_atc_reuse_transpend",
+ "ptu_atc_ireq_less_than_stu",
+};
+#else
+#define ptu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_a0 = {
+ 0, 8, ptu_int0_bb_a0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_a0_regs[1] = {
+ &ptu_int0_bb_a0,
+};
+
+static const u16 ptu_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+ 0, 8, ptu_int0_bb_b0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+ &ptu_int0_bb_b0,
+};
+
+static const u16 ptu_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_k2 = {
+ 0, 8, ptu_int0_k2_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_k2_regs[1] = {
+ &ptu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_prty_attn_desc[18] = {
+ "ptu_mem017_i_ecc_rf_int",
+ "ptu_mem018_i_mem_prty",
+ "ptu_mem006_i_mem_prty",
+ "ptu_mem001_i_mem_prty",
+ "ptu_mem002_i_mem_prty",
+ "ptu_mem003_i_mem_prty",
+ "ptu_mem004_i_mem_prty",
+ "ptu_mem005_i_mem_prty",
+ "ptu_mem009_i_mem_prty",
+ "ptu_mem010_i_mem_prty",
+ "ptu_mem016_i_mem_prty",
+ "ptu_mem007_i_mem_prty",
+ "ptu_mem015_i_mem_prty",
+ "ptu_mem013_i_mem_prty",
+ "ptu_mem012_i_mem_prty",
+ "ptu_mem014_i_mem_prty",
+ "ptu_mem011_i_mem_prty",
+ "ptu_mem008_i_mem_prty",
+};
+#else
+#define ptu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_prty1_bb_a0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_a0 = {
+ 0, 18, ptu_prty1_bb_a0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_a0_regs[1] = {
+ &ptu_prty1_bb_a0,
+};
+
+static const u16 ptu_prty1_bb_b0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+ 0, 18, ptu_prty1_bb_b0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+ &ptu_prty1_bb_b0,
+};
+
+static const u16 ptu_prty1_k2_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_k2 = {
+ 0, 18, ptu_prty1_k2_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_k2_regs[1] = {
+ &ptu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_int_attn_desc[41] = {
+ "tcm_address_error",
+ "tcm_is_storm_ovfl_err",
+ "tcm_is_storm_under_err",
+ "tcm_is_tsdm_ovfl_err",
+ "tcm_is_tsdm_under_err",
+ "tcm_is_msem_ovfl_err",
+ "tcm_is_msem_under_err",
+ "tcm_is_ysem_ovfl_err",
+ "tcm_is_ysem_under_err",
+ "tcm_is_dorq_ovfl_err",
+ "tcm_is_dorq_under_err",
+ "tcm_is_pbf_ovfl_err",
+ "tcm_is_pbf_under_err",
+ "tcm_is_prs_ovfl_err",
+ "tcm_is_prs_under_err",
+ "tcm_is_tm_ovfl_err",
+ "tcm_is_tm_under_err",
+ "tcm_is_qm_p_ovfl_err",
+ "tcm_is_qm_p_under_err",
+ "tcm_is_qm_s_ovfl_err",
+ "tcm_is_qm_s_under_err",
+ "tcm_is_grc_ovfl_err0",
+ "tcm_is_grc_under_err0",
+ "tcm_is_grc_ovfl_err1",
+ "tcm_is_grc_under_err1",
+ "tcm_is_grc_ovfl_err2",
+ "tcm_is_grc_under_err2",
+ "tcm_is_grc_ovfl_err3",
+ "tcm_is_grc_under_err3",
+ "tcm_in_prcs_tbl_ovfl",
+ "tcm_agg_con_data_buf_ovfl",
+ "tcm_agg_con_cmd_buf_ovfl",
+ "tcm_sm_con_data_buf_ovfl",
+ "tcm_sm_con_cmd_buf_ovfl",
+ "tcm_agg_task_data_buf_ovfl",
+ "tcm_agg_task_cmd_buf_ovfl",
+ "tcm_sm_task_data_buf_ovfl",
+ "tcm_sm_task_cmd_buf_ovfl",
+ "tcm_fi_desc_input_violate",
+ "tcm_se_desc_input_violate",
+ "tcm_qmreg_more4",
+};
+#else
+#define tcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_a0 = {
+ 0, 8, tcm_int0_bb_a0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+ 0x1180184
+};
+
+static const u16 tcm_int1_bb_a0_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_a0 = {
+ 1, 32, tcm_int1_bb_a0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+ 0x1180194
+};
+
+static const u16 tcm_int2_bb_a0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_a0 = {
+ 2, 1, tcm_int2_bb_a0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+ 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_a0_regs[3] = {
+ &tcm_int0_bb_a0, &tcm_int1_bb_a0, &tcm_int2_bb_a0,
+};
+
+static const u16 tcm_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+ 0, 8, tcm_int0_bb_b0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+ 0x1180184
+};
+
+static const u16 tcm_int1_bb_b0_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+ 1, 32, tcm_int1_bb_b0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+ 0x1180194
+};
+
+static const u16 tcm_int2_bb_b0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+ 2, 1, tcm_int2_bb_b0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+ 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+ &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0,
+};
+
+static const u16 tcm_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_k2 = {
+ 0, 8, tcm_int0_k2_attn_idx, 0x1180180, 0x118018c, 0x1180188, 0x1180184
+};
+
+static const u16 tcm_int1_k2_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_k2 = {
+ 1, 32, tcm_int1_k2_attn_idx, 0x1180190, 0x118019c, 0x1180198, 0x1180194
+};
+
+static const u16 tcm_int2_k2_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_k2 = {
+ 2, 1, tcm_int2_k2_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_k2_regs[3] = {
+ &tcm_int0_k2, &tcm_int1_k2, &tcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_prty_attn_desc[51] = {
+ "tcm_mem026_i_ecc_rf_int",
+ "tcm_mem003_i_ecc_0_rf_int",
+ "tcm_mem003_i_ecc_1_rf_int",
+ "tcm_mem022_i_ecc_0_rf_int",
+ "tcm_mem022_i_ecc_1_rf_int",
+ "tcm_mem005_i_ecc_0_rf_int",
+ "tcm_mem005_i_ecc_1_rf_int",
+ "tcm_mem024_i_ecc_0_rf_int",
+ "tcm_mem024_i_ecc_1_rf_int",
+ "tcm_mem018_i_mem_prty",
+ "tcm_mem019_i_mem_prty",
+ "tcm_mem015_i_mem_prty",
+ "tcm_mem016_i_mem_prty",
+ "tcm_mem017_i_mem_prty",
+ "tcm_mem010_i_mem_prty",
+ "tcm_mem020_i_mem_prty",
+ "tcm_mem011_i_mem_prty",
+ "tcm_mem012_i_mem_prty",
+ "tcm_mem013_i_mem_prty",
+ "tcm_mem014_i_mem_prty",
+ "tcm_mem029_i_mem_prty",
+ "tcm_mem028_i_mem_prty",
+ "tcm_mem027_i_mem_prty",
+ "tcm_mem004_i_mem_prty",
+ "tcm_mem023_i_mem_prty",
+ "tcm_mem006_i_mem_prty",
+ "tcm_mem025_i_mem_prty",
+ "tcm_mem021_i_mem_prty",
+ "tcm_mem007_i_mem_prty_0",
+ "tcm_mem007_i_mem_prty_1",
+ "tcm_mem008_i_mem_prty",
+ "tcm_mem025_i_ecc_rf_int",
+ "tcm_mem021_i_ecc_0_rf_int",
+ "tcm_mem021_i_ecc_1_rf_int",
+ "tcm_mem023_i_ecc_0_rf_int",
+ "tcm_mem023_i_ecc_1_rf_int",
+ "tcm_mem026_i_mem_prty",
+ "tcm_mem022_i_mem_prty",
+ "tcm_mem024_i_mem_prty",
+ "tcm_mem009_i_mem_prty",
+ "tcm_mem024_i_ecc_rf_int",
+ "tcm_mem001_i_ecc_0_rf_int",
+ "tcm_mem001_i_ecc_1_rf_int",
+ "tcm_mem019_i_ecc_0_rf_int",
+ "tcm_mem019_i_ecc_1_rf_int",
+ "tcm_mem022_i_ecc_rf_int",
+ "tcm_mem002_i_mem_prty",
+ "tcm_mem005_i_mem_prty_0",
+ "tcm_mem005_i_mem_prty_1",
+ "tcm_mem001_i_mem_prty",
+ "tcm_mem007_i_mem_prty",
+};
+#else
+#define tcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 30, 32,
+ 33, 36, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_a0 = {
+ 0, 31, tcm_prty1_bb_a0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_bb_a0_attn_idx[3] = {
+ 50, 21, 20,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_a0 = {
+ 1, 3, tcm_prty2_bb_a0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+ 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_a0_regs[2] = {
+ &tcm_prty1_bb_a0, &tcm_prty2_bb_a0,
+};
+
+static const u16 tcm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25,
+ 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+ 0, 31, tcm_prty1_bb_b0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_bb_b0_attn_idx[2] = {
+ 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+ 1, 2, tcm_prty2_bb_b0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+ 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+ &tcm_prty1_bb_b0, &tcm_prty2_bb_b0,
+};
+
+static const u16 tcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg tcm_prty1_k2 = {
+ 0, 31, tcm_prty1_k2_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_k2_attn_idx[3] = {
+ 39, 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_k2 = {
+ 1, 3, tcm_prty2_k2_attn_idx, 0x1180210, 0x118021c, 0x1180218, 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_k2_regs[2] = {
+ &tcm_prty1_k2, &tcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_int_attn_desc[41] = {
+ "mcm_address_error",
+ "mcm_is_storm_ovfl_err",
+ "mcm_is_storm_under_err",
+ "mcm_is_msdm_ovfl_err",
+ "mcm_is_msdm_under_err",
+ "mcm_is_ysdm_ovfl_err",
+ "mcm_is_ysdm_under_err",
+ "mcm_is_usdm_ovfl_err",
+ "mcm_is_usdm_under_err",
+ "mcm_is_tmld_ovfl_err",
+ "mcm_is_tmld_under_err",
+ "mcm_is_usem_ovfl_err",
+ "mcm_is_usem_under_err",
+ "mcm_is_ysem_ovfl_err",
+ "mcm_is_ysem_under_err",
+ "mcm_is_pbf_ovfl_err",
+ "mcm_is_pbf_under_err",
+ "mcm_is_qm_p_ovfl_err",
+ "mcm_is_qm_p_under_err",
+ "mcm_is_qm_s_ovfl_err",
+ "mcm_is_qm_s_under_err",
+ "mcm_is_grc_ovfl_err0",
+ "mcm_is_grc_under_err0",
+ "mcm_is_grc_ovfl_err1",
+ "mcm_is_grc_under_err1",
+ "mcm_is_grc_ovfl_err2",
+ "mcm_is_grc_under_err2",
+ "mcm_is_grc_ovfl_err3",
+ "mcm_is_grc_under_err3",
+ "mcm_in_prcs_tbl_ovfl",
+ "mcm_agg_con_data_buf_ovfl",
+ "mcm_agg_con_cmd_buf_ovfl",
+ "mcm_sm_con_data_buf_ovfl",
+ "mcm_sm_con_cmd_buf_ovfl",
+ "mcm_agg_task_data_buf_ovfl",
+ "mcm_agg_task_cmd_buf_ovfl",
+ "mcm_sm_task_data_buf_ovfl",
+ "mcm_sm_task_cmd_buf_ovfl",
+ "mcm_fi_desc_input_violate",
+ "mcm_se_desc_input_violate",
+ "mcm_qmreg_more4",
+};
+#else
+#define mcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_int0_bb_a0_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_a0 = {
+ 0, 14, mcm_int0_bb_a0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+ 0x1200184
+};
+
+static const u16 mcm_int1_bb_a0_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_a0 = {
+ 1, 26, mcm_int1_bb_a0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+ 0x1200194
+};
+
+static const u16 mcm_int2_bb_a0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_a0 = {
+ 2, 1, mcm_int2_bb_a0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+ 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_a0_regs[3] = {
+ &mcm_int0_bb_a0, &mcm_int1_bb_a0, &mcm_int2_bb_a0,
+};
+
+static const u16 mcm_int0_bb_b0_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+ 0, 14, mcm_int0_bb_b0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+ 0x1200184
+};
+
+static const u16 mcm_int1_bb_b0_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+ 1, 26, mcm_int1_bb_b0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+ 0x1200194
+};
+
+static const u16 mcm_int2_bb_b0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+ 2, 1, mcm_int2_bb_b0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+ 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+ &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0,
+};
+
+static const u16 mcm_int0_k2_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_k2 = {
+ 0, 14, mcm_int0_k2_attn_idx, 0x1200180, 0x120018c, 0x1200188, 0x1200184
+};
+
+static const u16 mcm_int1_k2_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_k2 = {
+ 1, 26, mcm_int1_k2_attn_idx, 0x1200190, 0x120019c, 0x1200198, 0x1200194
+};
+
+static const u16 mcm_int2_k2_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_k2 = {
+ 2, 1, mcm_int2_k2_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_k2_regs[3] = {
+ &mcm_int0_k2, &mcm_int1_k2, &mcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_prty_attn_desc[46] = {
+ "mcm_mem028_i_ecc_rf_int",
+ "mcm_mem003_i_ecc_rf_int",
+ "mcm_mem023_i_ecc_0_rf_int",
+ "mcm_mem023_i_ecc_1_rf_int",
+ "mcm_mem005_i_ecc_0_rf_int",
+ "mcm_mem005_i_ecc_1_rf_int",
+ "mcm_mem025_i_ecc_0_rf_int",
+ "mcm_mem025_i_ecc_1_rf_int",
+ "mcm_mem026_i_ecc_rf_int",
+ "mcm_mem017_i_mem_prty",
+ "mcm_mem019_i_mem_prty",
+ "mcm_mem016_i_mem_prty",
+ "mcm_mem015_i_mem_prty",
+ "mcm_mem020_i_mem_prty",
+ "mcm_mem021_i_mem_prty",
+ "mcm_mem018_i_mem_prty",
+ "mcm_mem011_i_mem_prty",
+ "mcm_mem012_i_mem_prty",
+ "mcm_mem013_i_mem_prty",
+ "mcm_mem014_i_mem_prty",
+ "mcm_mem031_i_mem_prty",
+ "mcm_mem030_i_mem_prty",
+ "mcm_mem029_i_mem_prty",
+ "mcm_mem004_i_mem_prty",
+ "mcm_mem024_i_mem_prty",
+ "mcm_mem006_i_mem_prty",
+ "mcm_mem027_i_mem_prty",
+ "mcm_mem022_i_mem_prty",
+ "mcm_mem007_i_mem_prty_0",
+ "mcm_mem007_i_mem_prty_1",
+ "mcm_mem008_i_mem_prty",
+ "mcm_mem001_i_ecc_rf_int",
+ "mcm_mem021_i_ecc_0_rf_int",
+ "mcm_mem021_i_ecc_1_rf_int",
+ "mcm_mem003_i_ecc_0_rf_int",
+ "mcm_mem003_i_ecc_1_rf_int",
+ "mcm_mem024_i_ecc_rf_int",
+ "mcm_mem009_i_mem_prty",
+ "mcm_mem010_i_mem_prty",
+ "mcm_mem028_i_mem_prty",
+ "mcm_mem002_i_mem_prty",
+ "mcm_mem025_i_mem_prty",
+ "mcm_mem005_i_mem_prty_0",
+ "mcm_mem005_i_mem_prty_1",
+ "mcm_mem001_i_mem_prty",
+ "mcm_mem007_i_mem_prty",
+};
+#else
+#define mcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_prty1_bb_a0_attn_idx[31] = {
+ 2, 3, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 22, 23, 25, 26, 27, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_a0 = {
+ 0, 31, mcm_prty1_bb_a0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_bb_a0_attn_idx[4] = {
+ 45, 30, 21, 20,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_a0 = {
+ 1, 4, mcm_prty2_bb_a0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+ 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_a0_regs[2] = {
+ &mcm_prty1_bb_a0, &mcm_prty2_bb_a0,
+};
+
+static const u16 mcm_prty1_bb_b0_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+ 0, 31, mcm_prty1_bb_b0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_bb_b0_attn_idx[4] = {
+ 37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+ 1, 4, mcm_prty2_bb_b0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+ 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+ &mcm_prty1_bb_b0, &mcm_prty2_bb_b0,
+};
+
+static const u16 mcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_k2 = {
+ 0, 31, mcm_prty1_k2_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_k2_attn_idx[4] = {
+ 37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_k2 = {
+ 1, 4, mcm_prty2_k2_attn_idx, 0x1200210, 0x120021c, 0x1200218, 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_k2_regs[2] = {
+ &mcm_prty1_k2, &mcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_int_attn_desc[47] = {
+ "ucm_address_error",
+ "ucm_is_storm_ovfl_err",
+ "ucm_is_storm_under_err",
+ "ucm_is_xsdm_ovfl_err",
+ "ucm_is_xsdm_under_err",
+ "ucm_is_ysdm_ovfl_err",
+ "ucm_is_ysdm_under_err",
+ "ucm_is_usdm_ovfl_err",
+ "ucm_is_usdm_under_err",
+ "ucm_is_rdif_ovfl_err",
+ "ucm_is_rdif_under_err",
+ "ucm_is_tdif_ovfl_err",
+ "ucm_is_tdif_under_err",
+ "ucm_is_muld_ovfl_err",
+ "ucm_is_muld_under_err",
+ "ucm_is_yuld_ovfl_err",
+ "ucm_is_yuld_under_err",
+ "ucm_is_dorq_ovfl_err",
+ "ucm_is_dorq_under_err",
+ "ucm_is_pbf_ovfl_err",
+ "ucm_is_pbf_under_err",
+ "ucm_is_tm_ovfl_err",
+ "ucm_is_tm_under_err",
+ "ucm_is_qm_p_ovfl_err",
+ "ucm_is_qm_p_under_err",
+ "ucm_is_qm_s_ovfl_err",
+ "ucm_is_qm_s_under_err",
+ "ucm_is_grc_ovfl_err0",
+ "ucm_is_grc_under_err0",
+ "ucm_is_grc_ovfl_err1",
+ "ucm_is_grc_under_err1",
+ "ucm_is_grc_ovfl_err2",
+ "ucm_is_grc_under_err2",
+ "ucm_is_grc_ovfl_err3",
+ "ucm_is_grc_under_err3",
+ "ucm_in_prcs_tbl_ovfl",
+ "ucm_agg_con_data_buf_ovfl",
+ "ucm_agg_con_cmd_buf_ovfl",
+ "ucm_sm_con_data_buf_ovfl",
+ "ucm_sm_con_cmd_buf_ovfl",
+ "ucm_agg_task_data_buf_ovfl",
+ "ucm_agg_task_cmd_buf_ovfl",
+ "ucm_sm_task_data_buf_ovfl",
+ "ucm_sm_task_cmd_buf_ovfl",
+ "ucm_fi_desc_input_violate",
+ "ucm_se_desc_input_violate",
+ "ucm_qmreg_more4",
+};
+#else
+#define ucm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_int0_bb_a0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_a0 = {
+ 0, 17, ucm_int0_bb_a0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+ 0x1280184
+};
+
+static const u16 ucm_int1_bb_a0_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_a0 = {
+ 1, 29, ucm_int1_bb_a0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+ 0x1280194
+};
+
+static const u16 ucm_int2_bb_a0_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_a0 = {
+ 2, 1, ucm_int2_bb_a0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+ 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_a0_regs[3] = {
+ &ucm_int0_bb_a0, &ucm_int1_bb_a0, &ucm_int2_bb_a0,
+};
+
+static const u16 ucm_int0_bb_b0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+ 0, 17, ucm_int0_bb_b0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+ 0x1280184
+};
+
+static const u16 ucm_int1_bb_b0_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+ 1, 29, ucm_int1_bb_b0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+ 0x1280194
+};
+
+static const u16 ucm_int2_bb_b0_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+ 2, 1, ucm_int2_bb_b0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+ 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+ &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0,
+};
+
+static const u16 ucm_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_k2 = {
+ 0, 17, ucm_int0_k2_attn_idx, 0x1280180, 0x128018c, 0x1280188, 0x1280184
+};
+
+static const u16 ucm_int1_k2_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_k2 = {
+ 1, 29, ucm_int1_k2_attn_idx, 0x1280190, 0x128019c, 0x1280198, 0x1280194
+};
+
+static const u16 ucm_int2_k2_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_k2 = {
+ 2, 1, ucm_int2_k2_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_k2_regs[3] = {
+ &ucm_int0_k2, &ucm_int1_k2, &ucm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_prty_attn_desc[54] = {
+ "ucm_mem030_i_ecc_rf_int",
+ "ucm_mem005_i_ecc_0_rf_int",
+ "ucm_mem005_i_ecc_1_rf_int",
+ "ucm_mem024_i_ecc_0_rf_int",
+ "ucm_mem024_i_ecc_1_rf_int",
+ "ucm_mem025_i_ecc_rf_int",
+ "ucm_mem007_i_ecc_0_rf_int",
+ "ucm_mem007_i_ecc_1_rf_int",
+ "ucm_mem008_i_ecc_rf_int",
+ "ucm_mem027_i_ecc_0_rf_int",
+ "ucm_mem027_i_ecc_1_rf_int",
+ "ucm_mem028_i_ecc_rf_int",
+ "ucm_mem020_i_mem_prty",
+ "ucm_mem021_i_mem_prty",
+ "ucm_mem019_i_mem_prty",
+ "ucm_mem013_i_mem_prty",
+ "ucm_mem018_i_mem_prty",
+ "ucm_mem022_i_mem_prty",
+ "ucm_mem014_i_mem_prty",
+ "ucm_mem015_i_mem_prty",
+ "ucm_mem016_i_mem_prty",
+ "ucm_mem017_i_mem_prty",
+ "ucm_mem033_i_mem_prty",
+ "ucm_mem032_i_mem_prty",
+ "ucm_mem031_i_mem_prty",
+ "ucm_mem006_i_mem_prty",
+ "ucm_mem026_i_mem_prty",
+ "ucm_mem009_i_mem_prty",
+ "ucm_mem029_i_mem_prty",
+ "ucm_mem023_i_mem_prty",
+ "ucm_mem010_i_mem_prty_0",
+ "ucm_mem003_i_ecc_0_rf_int",
+ "ucm_mem003_i_ecc_1_rf_int",
+ "ucm_mem022_i_ecc_0_rf_int",
+ "ucm_mem022_i_ecc_1_rf_int",
+ "ucm_mem023_i_ecc_rf_int",
+ "ucm_mem006_i_ecc_rf_int",
+ "ucm_mem025_i_ecc_0_rf_int",
+ "ucm_mem025_i_ecc_1_rf_int",
+ "ucm_mem026_i_ecc_rf_int",
+ "ucm_mem011_i_mem_prty",
+ "ucm_mem012_i_mem_prty",
+ "ucm_mem030_i_mem_prty",
+ "ucm_mem004_i_mem_prty",
+ "ucm_mem024_i_mem_prty",
+ "ucm_mem007_i_mem_prty",
+ "ucm_mem027_i_mem_prty",
+ "ucm_mem008_i_mem_prty_0",
+ "ucm_mem010_i_mem_prty_1",
+ "ucm_mem003_i_mem_prty",
+ "ucm_mem001_i_mem_prty",
+ "ucm_mem002_i_mem_prty",
+ "ucm_mem008_i_mem_prty_1",
+ "ucm_mem010_i_mem_prty",
+};
+#else
+#define ucm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 24, 28, 31, 32, 33, 34,
+ 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_a0 = {
+ 0, 31, ucm_prty1_bb_a0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_bb_a0_attn_idx[7] = {
+ 50, 51, 52, 27, 53, 23, 22,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_a0 = {
+ 1, 7, ucm_prty2_bb_a0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+ 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_a0_regs[2] = {
+ &ucm_prty1_bb_a0, &ucm_prty2_bb_a0,
+};
+
+static const u16 ucm_prty1_bb_b0_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+ 0, 31, ucm_prty1_bb_b0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_bb_b0_attn_idx[7] = {
+ 48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+ 1, 7, ucm_prty2_bb_b0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+ 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+ &ucm_prty1_bb_b0, &ucm_prty2_bb_b0,
+};
+
+static const u16 ucm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_k2 = {
+ 0, 31, ucm_prty1_k2_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_k2_attn_idx[7] = {
+ 48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_k2 = {
+ 1, 7, ucm_prty2_k2_attn_idx, 0x1280210, 0x128021c, 0x1280218, 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_k2_regs[2] = {
+ &ucm_prty1_k2, &ucm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_int_attn_desc[49] = {
+ "xcm_address_error",
+ "xcm_is_storm_ovfl_err",
+ "xcm_is_storm_under_err",
+ "xcm_is_msdm_ovfl_err",
+ "xcm_is_msdm_under_err",
+ "xcm_is_xsdm_ovfl_err",
+ "xcm_is_xsdm_under_err",
+ "xcm_is_ysdm_ovfl_err",
+ "xcm_is_ysdm_under_err",
+ "xcm_is_usdm_ovfl_err",
+ "xcm_is_usdm_under_err",
+ "xcm_is_msem_ovfl_err",
+ "xcm_is_msem_under_err",
+ "xcm_is_usem_ovfl_err",
+ "xcm_is_usem_under_err",
+ "xcm_is_ysem_ovfl_err",
+ "xcm_is_ysem_under_err",
+ "xcm_is_dorq_ovfl_err",
+ "xcm_is_dorq_under_err",
+ "xcm_is_pbf_ovfl_err",
+ "xcm_is_pbf_under_err",
+ "xcm_is_tm_ovfl_err",
+ "xcm_is_tm_under_err",
+ "xcm_is_qm_p_ovfl_err",
+ "xcm_is_qm_p_under_err",
+ "xcm_is_qm_s_ovfl_err",
+ "xcm_is_qm_s_under_err",
+ "xcm_is_grc_ovfl_err0",
+ "xcm_is_grc_under_err0",
+ "xcm_is_grc_ovfl_err1",
+ "xcm_is_grc_under_err1",
+ "xcm_is_grc_ovfl_err2",
+ "xcm_is_grc_under_err2",
+ "xcm_is_grc_ovfl_err3",
+ "xcm_is_grc_under_err3",
+ "xcm_in_prcs_tbl_ovfl",
+ "xcm_agg_con_data_buf_ovfl",
+ "xcm_agg_con_cmd_buf_ovfl",
+ "xcm_sm_con_data_buf_ovfl",
+ "xcm_sm_con_cmd_buf_ovfl",
+ "xcm_fi_desc_input_violate",
+ "xcm_qm_act_st_cnt_msg_prcs_under",
+ "xcm_qm_act_st_cnt_msg_prcs_ovfl",
+ "xcm_qm_act_st_cnt_ext_ld_under",
+ "xcm_qm_act_st_cnt_ext_ld_ovfl",
+ "xcm_qm_act_st_cnt_rbc_under",
+ "xcm_qm_act_st_cnt_rbc_ovfl",
+ "xcm_qm_act_st_cnt_drop_under",
+ "xcm_qm_act_st_cnt_illeg_pqnum",
+};
+#else
+#define xcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_a0 = {
+ 0, 16, xcm_int0_bb_a0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+ 0x1000184
+};
+
+static const u16 xcm_int1_bb_a0_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_a0 = {
+ 1, 25, xcm_int1_bb_a0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+ 0x1000194
+};
+
+static const u16 xcm_int2_bb_a0_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_a0 = {
+ 2, 8, xcm_int2_bb_a0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+ 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_a0_regs[3] = {
+ &xcm_int0_bb_a0, &xcm_int1_bb_a0, &xcm_int2_bb_a0,
+};
+
+static const u16 xcm_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+ 0, 16, xcm_int0_bb_b0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+ 0x1000184
+};
+
+static const u16 xcm_int1_bb_b0_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+ 1, 25, xcm_int1_bb_b0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+ 0x1000194
+};
+
+static const u16 xcm_int2_bb_b0_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+ 2, 8, xcm_int2_bb_b0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+ 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+ &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0,
+};
+
+static const u16 xcm_int0_k2_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_k2 = {
+ 0, 16, xcm_int0_k2_attn_idx, 0x1000180, 0x100018c, 0x1000188, 0x1000184
+};
+
+static const u16 xcm_int1_k2_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_k2 = {
+ 1, 25, xcm_int1_k2_attn_idx, 0x1000190, 0x100019c, 0x1000198, 0x1000194
+};
+
+static const u16 xcm_int2_k2_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_k2 = {
+ 2, 8, xcm_int2_k2_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_k2_regs[3] = {
+ &xcm_int0_k2, &xcm_int1_k2, &xcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_prty_attn_desc[59] = {
+ "xcm_mem036_i_ecc_rf_int",
+ "xcm_mem003_i_ecc_0_rf_int",
+ "xcm_mem003_i_ecc_1_rf_int",
+ "xcm_mem003_i_ecc_2_rf_int",
+ "xcm_mem003_i_ecc_3_rf_int",
+ "xcm_mem004_i_ecc_rf_int",
+ "xcm_mem033_i_ecc_0_rf_int",
+ "xcm_mem033_i_ecc_1_rf_int",
+ "xcm_mem034_i_ecc_rf_int",
+ "xcm_mem026_i_mem_prty",
+ "xcm_mem025_i_mem_prty",
+ "xcm_mem022_i_mem_prty",
+ "xcm_mem029_i_mem_prty",
+ "xcm_mem023_i_mem_prty",
+ "xcm_mem028_i_mem_prty",
+ "xcm_mem030_i_mem_prty",
+ "xcm_mem017_i_mem_prty",
+ "xcm_mem024_i_mem_prty",
+ "xcm_mem027_i_mem_prty",
+ "xcm_mem018_i_mem_prty",
+ "xcm_mem019_i_mem_prty",
+ "xcm_mem020_i_mem_prty",
+ "xcm_mem021_i_mem_prty",
+ "xcm_mem039_i_mem_prty",
+ "xcm_mem038_i_mem_prty",
+ "xcm_mem037_i_mem_prty",
+ "xcm_mem005_i_mem_prty",
+ "xcm_mem035_i_mem_prty",
+ "xcm_mem031_i_mem_prty",
+ "xcm_mem006_i_mem_prty",
+ "xcm_mem015_i_mem_prty",
+ "xcm_mem035_i_ecc_rf_int",
+ "xcm_mem032_i_ecc_0_rf_int",
+ "xcm_mem032_i_ecc_1_rf_int",
+ "xcm_mem033_i_ecc_rf_int",
+ "xcm_mem036_i_mem_prty",
+ "xcm_mem034_i_mem_prty",
+ "xcm_mem016_i_mem_prty",
+ "xcm_mem002_i_ecc_0_rf_int",
+ "xcm_mem002_i_ecc_1_rf_int",
+ "xcm_mem002_i_ecc_2_rf_int",
+ "xcm_mem002_i_ecc_3_rf_int",
+ "xcm_mem003_i_ecc_rf_int",
+ "xcm_mem031_i_ecc_0_rf_int",
+ "xcm_mem031_i_ecc_1_rf_int",
+ "xcm_mem032_i_ecc_rf_int",
+ "xcm_mem004_i_mem_prty",
+ "xcm_mem033_i_mem_prty",
+ "xcm_mem014_i_mem_prty",
+ "xcm_mem032_i_mem_prty",
+ "xcm_mem007_i_mem_prty",
+ "xcm_mem008_i_mem_prty",
+ "xcm_mem009_i_mem_prty",
+ "xcm_mem010_i_mem_prty",
+ "xcm_mem011_i_mem_prty",
+ "xcm_mem012_i_mem_prty",
+ "xcm_mem013_i_mem_prty",
+ "xcm_mem001_i_mem_prty",
+ "xcm_mem002_i_mem_prty",
+};
+#else
+#define xcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_prty1_bb_a0_attn_idx[31] = {
+ 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 25, 26, 27, 30,
+ 35,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_a0 = {
+ 0, 31, xcm_prty1_bb_a0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_bb_a0_attn_idx[11] = {
+ 50, 51, 52, 53, 54, 55, 56, 57, 15, 29, 24,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_a0 = {
+ 1, 11, xcm_prty2_bb_a0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_a0_regs[2] = {
+ &xcm_prty1_bb_a0, &xcm_prty2_bb_a0,
+};
+
+static const u16 xcm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 24,
+ 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+ 0, 31, xcm_prty1_bb_b0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_bb_b0_attn_idx[11] = {
+ 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, 28,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+ 1, 11, xcm_prty2_bb_b0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+ &xcm_prty1_bb_b0, &xcm_prty2_bb_b0,
+};
+
+static const u16 xcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg xcm_prty1_k2 = {
+ 0, 31, xcm_prty1_k2_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_k2_attn_idx[12] = {
+ 37, 49, 50, 51, 52, 53, 54, 55, 56, 48, 57, 58,
+};
+
+static struct attn_hw_reg xcm_prty2_k2 = {
+ 1, 12, xcm_prty2_k2_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_k2_regs[2] = {
+ &xcm_prty1_k2, &xcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_int_attn_desc[37] = {
+ "ycm_address_error",
+ "ycm_is_storm_ovfl_err",
+ "ycm_is_storm_under_err",
+ "ycm_is_msdm_ovfl_err",
+ "ycm_is_msdm_under_err",
+ "ycm_is_ysdm_ovfl_err",
+ "ycm_is_ysdm_under_err",
+ "ycm_is_xyld_ovfl_err",
+ "ycm_is_xyld_under_err",
+ "ycm_is_msem_ovfl_err",
+ "ycm_is_msem_under_err",
+ "ycm_is_usem_ovfl_err",
+ "ycm_is_usem_under_err",
+ "ycm_is_pbf_ovfl_err",
+ "ycm_is_pbf_under_err",
+ "ycm_is_qm_p_ovfl_err",
+ "ycm_is_qm_p_under_err",
+ "ycm_is_qm_s_ovfl_err",
+ "ycm_is_qm_s_under_err",
+ "ycm_is_grc_ovfl_err0",
+ "ycm_is_grc_under_err0",
+ "ycm_is_grc_ovfl_err1",
+ "ycm_is_grc_under_err1",
+ "ycm_is_grc_ovfl_err2",
+ "ycm_is_grc_under_err2",
+ "ycm_is_grc_ovfl_err3",
+ "ycm_is_grc_under_err3",
+ "ycm_in_prcs_tbl_ovfl",
+ "ycm_sm_con_data_buf_ovfl",
+ "ycm_sm_con_cmd_buf_ovfl",
+ "ycm_agg_task_data_buf_ovfl",
+ "ycm_agg_task_cmd_buf_ovfl",
+ "ycm_sm_task_data_buf_ovfl",
+ "ycm_sm_task_cmd_buf_ovfl",
+ "ycm_fi_desc_input_violate",
+ "ycm_se_desc_input_violate",
+ "ycm_qmreg_more4",
+};
+#else
+#define ycm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_int0_bb_a0_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_a0 = {
+ 0, 13, ycm_int0_bb_a0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+ 0x1080184
+};
+
+static const u16 ycm_int1_bb_a0_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_a0 = {
+ 1, 23, ycm_int1_bb_a0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+ 0x1080194
+};
+
+static const u16 ycm_int2_bb_a0_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_a0 = {
+ 2, 1, ycm_int2_bb_a0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+ 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_a0_regs[3] = {
+ &ycm_int0_bb_a0, &ycm_int1_bb_a0, &ycm_int2_bb_a0,
+};
+
+static const u16 ycm_int0_bb_b0_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+ 0, 13, ycm_int0_bb_b0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+ 0x1080184
+};
+
+static const u16 ycm_int1_bb_b0_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+ 1, 23, ycm_int1_bb_b0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+ 0x1080194
+};
+
+static const u16 ycm_int2_bb_b0_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+ 2, 1, ycm_int2_bb_b0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+ 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+ &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0,
+};
+
+static const u16 ycm_int0_k2_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_k2 = {
+ 0, 13, ycm_int0_k2_attn_idx, 0x1080180, 0x108018c, 0x1080188, 0x1080184
+};
+
+static const u16 ycm_int1_k2_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_k2 = {
+ 1, 23, ycm_int1_k2_attn_idx, 0x1080190, 0x108019c, 0x1080198, 0x1080194
+};
+
+static const u16 ycm_int2_k2_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_k2 = {
+ 2, 1, ycm_int2_k2_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_k2_regs[3] = {
+ &ycm_int0_k2, &ycm_int1_k2, &ycm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_prty_attn_desc[44] = {
+ "ycm_mem027_i_ecc_rf_int",
+ "ycm_mem003_i_ecc_0_rf_int",
+ "ycm_mem003_i_ecc_1_rf_int",
+ "ycm_mem022_i_ecc_0_rf_int",
+ "ycm_mem022_i_ecc_1_rf_int",
+ "ycm_mem023_i_ecc_rf_int",
+ "ycm_mem005_i_ecc_0_rf_int",
+ "ycm_mem005_i_ecc_1_rf_int",
+ "ycm_mem025_i_ecc_0_rf_int",
+ "ycm_mem025_i_ecc_1_rf_int",
+ "ycm_mem018_i_mem_prty",
+ "ycm_mem020_i_mem_prty",
+ "ycm_mem017_i_mem_prty",
+ "ycm_mem016_i_mem_prty",
+ "ycm_mem019_i_mem_prty",
+ "ycm_mem015_i_mem_prty",
+ "ycm_mem011_i_mem_prty",
+ "ycm_mem012_i_mem_prty",
+ "ycm_mem013_i_mem_prty",
+ "ycm_mem014_i_mem_prty",
+ "ycm_mem030_i_mem_prty",
+ "ycm_mem029_i_mem_prty",
+ "ycm_mem028_i_mem_prty",
+ "ycm_mem004_i_mem_prty",
+ "ycm_mem024_i_mem_prty",
+ "ycm_mem006_i_mem_prty",
+ "ycm_mem026_i_mem_prty",
+ "ycm_mem021_i_mem_prty",
+ "ycm_mem007_i_mem_prty_0",
+ "ycm_mem007_i_mem_prty_1",
+ "ycm_mem008_i_mem_prty",
+ "ycm_mem026_i_ecc_rf_int",
+ "ycm_mem021_i_ecc_0_rf_int",
+ "ycm_mem021_i_ecc_1_rf_int",
+ "ycm_mem022_i_ecc_rf_int",
+ "ycm_mem024_i_ecc_0_rf_int",
+ "ycm_mem024_i_ecc_1_rf_int",
+ "ycm_mem027_i_mem_prty",
+ "ycm_mem023_i_mem_prty",
+ "ycm_mem025_i_mem_prty",
+ "ycm_mem009_i_mem_prty",
+ "ycm_mem010_i_mem_prty",
+ "ycm_mem001_i_mem_prty",
+ "ycm_mem002_i_mem_prty",
+};
+#else
+#define ycm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_a0 = {
+ 0, 31, ycm_prty1_bb_a0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_bb_a0_attn_idx[3] = {
+ 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_a0 = {
+ 1, 3, ycm_prty2_bb_a0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+ 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_a0_regs[2] = {
+ &ycm_prty1_bb_a0, &ycm_prty2_bb_a0,
+};
+
+static const u16 ycm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+ 0, 31, ycm_prty1_bb_b0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_bb_b0_attn_idx[3] = {
+ 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+ 1, 3, ycm_prty2_bb_b0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+ 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+ &ycm_prty1_bb_b0, &ycm_prty2_bb_b0,
+};
+
+static const u16 ycm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ycm_prty1_k2 = {
+ 0, 31, ycm_prty1_k2_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_k2_attn_idx[4] = {
+ 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_k2 = {
+ 1, 4, ycm_prty2_k2_attn_idx, 0x1080210, 0x108021c, 0x1080218, 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_k2_regs[2] = {
+ &ycm_prty1_k2, &ycm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_int_attn_desc[20] = {
+ "pcm_address_error",
+ "pcm_is_storm_ovfl_err",
+ "pcm_is_storm_under_err",
+ "pcm_is_psdm_ovfl_err",
+ "pcm_is_psdm_under_err",
+ "pcm_is_pbf_ovfl_err",
+ "pcm_is_pbf_under_err",
+ "pcm_is_grc_ovfl_err0",
+ "pcm_is_grc_under_err0",
+ "pcm_is_grc_ovfl_err1",
+ "pcm_is_grc_under_err1",
+ "pcm_is_grc_ovfl_err2",
+ "pcm_is_grc_under_err2",
+ "pcm_is_grc_ovfl_err3",
+ "pcm_is_grc_under_err3",
+ "pcm_in_prcs_tbl_ovfl",
+ "pcm_sm_con_data_buf_ovfl",
+ "pcm_sm_con_cmd_buf_ovfl",
+ "pcm_fi_desc_input_violate",
+ "pcm_qmreg_more4",
+};
+#else
+#define pcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_a0 = {
+ 0, 5, pcm_int0_bb_a0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+ 0x1100184
+};
+
+static const u16 pcm_int1_bb_a0_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_a0 = {
+ 1, 14, pcm_int1_bb_a0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+ 0x1100194
+};
+
+static const u16 pcm_int2_bb_a0_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_a0 = {
+ 2, 1, pcm_int2_bb_a0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+ 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_a0_regs[3] = {
+ &pcm_int0_bb_a0, &pcm_int1_bb_a0, &pcm_int2_bb_a0,
+};
+
+static const u16 pcm_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+ 0, 5, pcm_int0_bb_b0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+ 0x1100184
+};
+
+static const u16 pcm_int1_bb_b0_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+ 1, 14, pcm_int1_bb_b0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+ 0x1100194
+};
+
+static const u16 pcm_int2_bb_b0_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+ 2, 1, pcm_int2_bb_b0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+ 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+ &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0,
+};
+
+static const u16 pcm_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_k2 = {
+ 0, 5, pcm_int0_k2_attn_idx, 0x1100180, 0x110018c, 0x1100188, 0x1100184
+};
+
+static const u16 pcm_int1_k2_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_k2 = {
+ 1, 14, pcm_int1_k2_attn_idx, 0x1100190, 0x110019c, 0x1100198, 0x1100194
+};
+
+static const u16 pcm_int2_k2_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_k2 = {
+ 2, 1, pcm_int2_k2_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_k2_regs[3] = {
+ &pcm_int0_k2, &pcm_int1_k2, &pcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_prty_attn_desc[18] = {
+ "pcm_mem012_i_ecc_rf_int",
+ "pcm_mem010_i_ecc_0_rf_int",
+ "pcm_mem010_i_ecc_1_rf_int",
+ "pcm_mem008_i_mem_prty",
+ "pcm_mem007_i_mem_prty",
+ "pcm_mem006_i_mem_prty",
+ "pcm_mem002_i_mem_prty",
+ "pcm_mem003_i_mem_prty",
+ "pcm_mem004_i_mem_prty",
+ "pcm_mem005_i_mem_prty",
+ "pcm_mem011_i_mem_prty",
+ "pcm_mem001_i_mem_prty",
+ "pcm_mem011_i_ecc_rf_int",
+ "pcm_mem009_i_ecc_0_rf_int",
+ "pcm_mem009_i_ecc_1_rf_int",
+ "pcm_mem010_i_mem_prty",
+ "pcm_mem013_i_mem_prty",
+ "pcm_mem012_i_mem_prty",
+};
+#else
+#define pcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_prty1_bb_a0_attn_idx[14] = {
+ 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_a0 = {
+ 0, 14, pcm_prty1_bb_a0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_a0_regs[1] = {
+ &pcm_prty1_bb_a0,
+};
+
+static const u16 pcm_prty1_bb_b0_attn_idx[11] = {
+ 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+ 0, 11, pcm_prty1_bb_b0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+ &pcm_prty1_bb_b0,
+};
+
+static const u16 pcm_prty1_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg pcm_prty1_k2 = {
+ 0, 12, pcm_prty1_k2_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_k2_regs[1] = {
+ &pcm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_int_attn_desc[22] = {
+ "qm_address_error",
+ "qm_ovf_err_tx",
+ "qm_ovf_err_other",
+ "qm_pf_usg_cnt_err",
+ "qm_vf_usg_cnt_err",
+ "qm_voq_crd_inc_err",
+ "qm_voq_crd_dec_err",
+ "qm_byte_crd_inc_err",
+ "qm_byte_crd_dec_err",
+ "qm_err_incdec_rlglblcrd",
+ "qm_err_incdec_rlpfcrd",
+ "qm_err_incdec_wfqpfcrd",
+ "qm_err_incdec_wfqvpcrd",
+ "qm_err_incdec_voqlinecrd",
+ "qm_err_incdec_voqbytecrd",
+ "qm_fifos_error",
+ "qm_qm_rl_dc_exp_pf_controller_pop_error",
+ "qm_qm_rl_dc_exp_pf_controller_push_error",
+ "qm_qm_rl_dc_rf_req_controller_pop_error",
+ "qm_qm_rl_dc_rf_req_controller_push_error",
+ "qm_qm_rl_dc_rf_res_controller_pop_error",
+ "qm_qm_rl_dc_rf_res_controller_push_error",
+};
+#else
+#define qm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg qm_int0_bb_a0 = {
+ 0, 16, qm_int0_bb_a0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_a0_regs[1] = {
+ &qm_int0_bb_a0,
+};
+
+static const u16 qm_int0_bb_b0_attn_idx[22] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21,
+};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+ 0, 22, qm_int0_bb_b0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+ &qm_int0_bb_b0,
+};
+
+static const u16 qm_int0_k2_attn_idx[22] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21,
+};
+
+static struct attn_hw_reg qm_int0_k2 = {
+ 0, 22, qm_int0_k2_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_k2_regs[1] = {
+ &qm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_prty_attn_desc[109] = {
+ "qm_xcm_wrc_fifo",
+ "qm_ucm_wrc_fifo",
+ "qm_tcm_wrc_fifo",
+ "qm_ccm_wrc_fifo",
+ "qm_bigramhigh",
+ "qm_bigramlow",
+ "qm_base_address",
+ "qm_wrbuff",
+ "qm_bigramhigh_ext_a",
+ "qm_bigramlow_ext_a",
+ "qm_base_address_ext_a",
+ "qm_mem006_i_ecc_0_rf_int",
+ "qm_mem006_i_ecc_1_rf_int",
+ "qm_mem005_i_ecc_0_rf_int",
+ "qm_mem005_i_ecc_1_rf_int",
+ "qm_mem012_i_ecc_rf_int",
+ "qm_mem037_i_mem_prty",
+ "qm_mem036_i_mem_prty",
+ "qm_mem039_i_mem_prty",
+ "qm_mem038_i_mem_prty",
+ "qm_mem040_i_mem_prty",
+ "qm_mem042_i_mem_prty",
+ "qm_mem041_i_mem_prty",
+ "qm_mem056_i_mem_prty",
+ "qm_mem055_i_mem_prty",
+ "qm_mem053_i_mem_prty",
+ "qm_mem054_i_mem_prty",
+ "qm_mem057_i_mem_prty",
+ "qm_mem058_i_mem_prty",
+ "qm_mem062_i_mem_prty",
+ "qm_mem061_i_mem_prty",
+ "qm_mem059_i_mem_prty",
+ "qm_mem060_i_mem_prty",
+ "qm_mem063_i_mem_prty",
+ "qm_mem064_i_mem_prty",
+ "qm_mem033_i_mem_prty",
+ "qm_mem032_i_mem_prty",
+ "qm_mem030_i_mem_prty",
+ "qm_mem031_i_mem_prty",
+ "qm_mem034_i_mem_prty",
+ "qm_mem035_i_mem_prty",
+ "qm_mem051_i_mem_prty",
+ "qm_mem042_i_ecc_0_rf_int",
+ "qm_mem042_i_ecc_1_rf_int",
+ "qm_mem041_i_ecc_0_rf_int",
+ "qm_mem041_i_ecc_1_rf_int",
+ "qm_mem048_i_ecc_rf_int",
+ "qm_mem009_i_mem_prty",
+ "qm_mem008_i_mem_prty",
+ "qm_mem011_i_mem_prty",
+ "qm_mem010_i_mem_prty",
+ "qm_mem012_i_mem_prty",
+ "qm_mem014_i_mem_prty",
+ "qm_mem013_i_mem_prty",
+ "qm_mem028_i_mem_prty",
+ "qm_mem027_i_mem_prty",
+ "qm_mem025_i_mem_prty",
+ "qm_mem026_i_mem_prty",
+ "qm_mem029_i_mem_prty",
+ "qm_mem005_i_mem_prty",
+ "qm_mem004_i_mem_prty",
+ "qm_mem002_i_mem_prty",
+ "qm_mem003_i_mem_prty",
+ "qm_mem006_i_mem_prty",
+ "qm_mem007_i_mem_prty",
+ "qm_mem023_i_mem_prty",
+ "qm_mem047_i_mem_prty",
+ "qm_mem049_i_mem_prty",
+ "qm_mem048_i_mem_prty",
+ "qm_mem052_i_mem_prty",
+ "qm_mem050_i_mem_prty",
+ "qm_mem045_i_mem_prty",
+ "qm_mem046_i_mem_prty",
+ "qm_mem043_i_mem_prty",
+ "qm_mem044_i_mem_prty",
+ "qm_mem017_i_mem_prty",
+ "qm_mem016_i_mem_prty",
+ "qm_mem021_i_mem_prty",
+ "qm_mem024_i_mem_prty",
+ "qm_mem019_i_mem_prty",
+ "qm_mem018_i_mem_prty",
+ "qm_mem015_i_mem_prty",
+ "qm_mem022_i_mem_prty",
+ "qm_mem020_i_mem_prty",
+ "qm_mem007_i_mem_prty_0",
+ "qm_mem007_i_mem_prty_1",
+ "qm_mem007_i_mem_prty_2",
+ "qm_mem001_i_mem_prty",
+ "qm_mem043_i_mem_prty_0",
+ "qm_mem043_i_mem_prty_1",
+ "qm_mem043_i_mem_prty_2",
+ "qm_mem007_i_mem_prty_3",
+ "qm_mem007_i_mem_prty_4",
+ "qm_mem007_i_mem_prty_5",
+ "qm_mem007_i_mem_prty_6",
+ "qm_mem007_i_mem_prty_7",
+ "qm_mem007_i_mem_prty_8",
+ "qm_mem007_i_mem_prty_9",
+ "qm_mem007_i_mem_prty_10",
+ "qm_mem007_i_mem_prty_11",
+ "qm_mem007_i_mem_prty_12",
+ "qm_mem007_i_mem_prty_13",
+ "qm_mem007_i_mem_prty_14",
+ "qm_mem007_i_mem_prty_15",
+ "qm_mem043_i_mem_prty_3",
+ "qm_mem043_i_mem_prty_4",
+ "qm_mem043_i_mem_prty_5",
+ "qm_mem043_i_mem_prty_6",
+ "qm_mem043_i_mem_prty_7",
+};
+#else
+#define qm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_prty0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_a0 = {
+ 0, 11, qm_prty0_bb_a0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_a0_attn_idx[31] = {
+ 17, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+};
+
+static struct attn_hw_reg qm_prty1_bb_a0 = {
+ 1, 31, qm_prty1_bb_a0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_a0_attn_idx[31] = {
+ 66, 67, 69, 70, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 20, 18, 25,
+ 27, 32, 24, 26, 41, 31, 29, 28, 30, 23, 88, 89, 90,
+};
+
+static struct attn_hw_reg qm_prty2_bb_a0 = {
+ 2, 31, qm_prty2_bb_a0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_a0_attn_idx[11] = {
+ 104, 105, 106, 107, 108, 33, 16, 34, 19, 72, 71,
+};
+
+static struct attn_hw_reg qm_prty3_bb_a0 = {
+ 3, 11, qm_prty3_bb_a0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_a0_regs[4] = {
+ &qm_prty0_bb_a0, &qm_prty1_bb_a0, &qm_prty2_bb_a0, &qm_prty3_bb_a0,
+};
+
+static const u16 qm_prty0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+ 0, 11, qm_prty0_bb_b0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_b0_attn_idx[31] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+ 1, 31, qm_prty1_bb_b0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_b0_attn_idx[31] = {
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+ 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+ 2, 31, qm_prty2_bb_b0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_b0_attn_idx[11] = {
+ 91, 92, 93, 94, 95, 55, 87, 54, 61, 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+ 3, 11, qm_prty3_bb_b0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+ &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0,
+};
+
+static const u16 qm_prty0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_k2 = {
+ 0, 11, qm_prty0_k2_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_k2_attn_idx[31] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_k2 = {
+ 1, 31, qm_prty1_k2_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_k2_attn_idx[31] = {
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+ 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_k2 = {
+ 2, 31, qm_prty2_k2_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_k2_attn_idx[19] = {
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 55, 87, 54, 61,
+ 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_k2 = {
+ 3, 19, qm_prty3_k2_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_k2_regs[4] = {
+ &qm_prty0_k2, &qm_prty1_k2, &qm_prty2_k2, &qm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_int_attn_desc[43] = {
+ "tm_address_error",
+ "tm_pxp_read_data_fifo_ov",
+ "tm_pxp_read_data_fifo_un",
+ "tm_pxp_read_ctrl_fifo_ov",
+ "tm_pxp_read_ctrl_fifo_un",
+ "tm_cfc_load_command_fifo_ov",
+ "tm_cfc_load_command_fifo_un",
+ "tm_cfc_load_echo_fifo_ov",
+ "tm_cfc_load_echo_fifo_un",
+ "tm_client_out_fifo_ov",
+ "tm_client_out_fifo_un",
+ "tm_ac_command_fifo_ov",
+ "tm_ac_command_fifo_un",
+ "tm_client_in_pbf_fifo_ov",
+ "tm_client_in_pbf_fifo_un",
+ "tm_client_in_ucm_fifo_ov",
+ "tm_client_in_ucm_fifo_un",
+ "tm_client_in_tcm_fifo_ov",
+ "tm_client_in_tcm_fifo_un",
+ "tm_client_in_xcm_fifo_ov",
+ "tm_client_in_xcm_fifo_un",
+ "tm_expiration_cmd_fifo_ov",
+ "tm_expiration_cmd_fifo_un",
+ "tm_stop_all_lc_invalid",
+ "tm_command_lc_invalid_0",
+ "tm_command_lc_invalid_1",
+ "tm_init_command_lc_valid",
+ "tm_stop_all_exp_lc_valid",
+ "tm_command_cid_invalid_0",
+ "tm_reserved_command",
+ "tm_command_cid_invalid_1",
+ "tm_cload_res_loaderr_conn",
+ "tm_cload_res_loadcancel_conn",
+ "tm_cload_res_validerr_conn",
+ "tm_context_rd_last",
+ "tm_context_wr_last",
+ "tm_pxp_rd_data_eop_bvalid",
+ "tm_pend_conn_scan",
+ "tm_pend_task_scan",
+ "tm_pxp_rd_data_eop_error",
+ "tm_cload_res_loaderr_task",
+ "tm_cload_res_loadcancel_task",
+ "tm_cload_res_validerr_task",
+};
+#else
+#define tm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_a0 = {
+ 0, 32, tm_int0_bb_a0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_a0_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_a0 = {
+ 1, 11, tm_int1_bb_a0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_a0_regs[2] = {
+ &tm_int0_bb_a0, &tm_int1_bb_a0,
+};
+
+static const u16 tm_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+ 0, 32, tm_int0_bb_b0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_b0_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+ 1, 11, tm_int1_bb_b0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+ &tm_int0_bb_b0, &tm_int1_bb_b0,
+};
+
+static const u16 tm_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_k2 = {
+ 0, 32, tm_int0_k2_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_k2_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_k2 = {
+ 1, 11, tm_int1_k2_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_k2_regs[2] = {
+ &tm_int0_k2, &tm_int1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_prty_attn_desc[17] = {
+ "tm_mem012_i_ecc_0_rf_int",
+ "tm_mem012_i_ecc_1_rf_int",
+ "tm_mem003_i_ecc_rf_int",
+ "tm_mem016_i_mem_prty",
+ "tm_mem007_i_mem_prty",
+ "tm_mem010_i_mem_prty",
+ "tm_mem008_i_mem_prty",
+ "tm_mem009_i_mem_prty",
+ "tm_mem013_i_mem_prty",
+ "tm_mem015_i_mem_prty",
+ "tm_mem014_i_mem_prty",
+ "tm_mem004_i_mem_prty",
+ "tm_mem005_i_mem_prty",
+ "tm_mem006_i_mem_prty",
+ "tm_mem011_i_mem_prty",
+ "tm_mem001_i_mem_prty",
+ "tm_mem002_i_mem_prty",
+};
+#else
+#define tm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_prty1_bb_a0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_a0 = {
+ 0, 17, tm_prty1_bb_a0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_a0_regs[1] = {
+ &tm_prty1_bb_a0,
+};
+
+static const u16 tm_prty1_bb_b0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+ 0, 17, tm_prty1_bb_b0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+ &tm_prty1_bb_b0,
+};
+
+static const u16 tm_prty1_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_k2 = {
+ 0, 17, tm_prty1_k2_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_k2_regs[1] = {
+ &tm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_int_attn_desc[9] = {
+ "dorq_address_error",
+ "dorq_db_drop",
+ "dorq_dorq_fifo_ovfl_err",
+ "dorq_dorq_fifo_afull",
+ "dorq_cfc_byp_validation_err",
+ "dorq_cfc_ld_resp_err",
+ "dorq_xcm_done_cnt_err",
+ "dorq_cfc_ld_req_fifo_ovfl_err",
+ "dorq_cfc_ld_req_fifo_under_err",
+};
+#else
+#define dorq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_a0 = {
+ 0, 9, dorq_int0_bb_a0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_a0_regs[1] = {
+ &dorq_int0_bb_a0,
+};
+
+static const u16 dorq_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+ 0, 9, dorq_int0_bb_b0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+ &dorq_int0_bb_b0,
+};
+
+static const u16 dorq_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_k2 = {
+ 0, 9, dorq_int0_k2_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_k2_regs[1] = {
+ &dorq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_prty_attn_desc[7] = {
+ "dorq_datapath_registers",
+ "dorq_mem002_i_ecc_rf_int",
+ "dorq_mem001_i_mem_prty",
+ "dorq_mem003_i_mem_prty",
+ "dorq_mem004_i_mem_prty",
+ "dorq_mem005_i_mem_prty",
+ "dorq_mem006_i_mem_prty",
+};
+#else
+#define dorq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_prty1_bb_a0_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_a0 = {
+ 0, 6, dorq_prty1_bb_a0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_a0_regs[1] = {
+ &dorq_prty1_bb_a0,
+};
+
+static const u16 dorq_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+ 0, 1, dorq_prty0_bb_b0_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_bb_b0_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+ 1, 6, dorq_prty1_bb_b0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+ &dorq_prty0_bb_b0, &dorq_prty1_bb_b0,
+};
+
+static const u16 dorq_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dorq_prty0_k2 = {
+ 0, 1, dorq_prty0_k2_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_k2_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_k2 = {
+ 1, 6, dorq_prty1_k2_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_k2_regs[2] = {
+ &dorq_prty0_k2, &dorq_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_int_attn_desc[237] = {
+ "brb_address_error",
+ "brb_rc_pkt0_rls_error",
+ "brb_rc_pkt0_1st_error",
+ "brb_rc_pkt0_len_error",
+ "brb_rc_pkt0_middle_error",
+ "brb_rc_pkt0_protocol_error",
+ "brb_rc_pkt1_rls_error",
+ "brb_rc_pkt1_1st_error",
+ "brb_rc_pkt1_len_error",
+ "brb_rc_pkt1_middle_error",
+ "brb_rc_pkt1_protocol_error",
+ "brb_rc_pkt2_rls_error",
+ "brb_rc_pkt2_1st_error",
+ "brb_rc_pkt2_len_error",
+ "brb_rc_pkt2_middle_error",
+ "brb_rc_pkt2_protocol_error",
+ "brb_rc_pkt3_rls_error",
+ "brb_rc_pkt3_1st_error",
+ "brb_rc_pkt3_len_error",
+ "brb_rc_pkt3_middle_error",
+ "brb_rc_pkt3_protocol_error",
+ "brb_rc_sop_req_tc_port_error",
+ "brb_uncomplient_lossless_error",
+ "brb_wc0_protocol_error",
+ "brb_wc1_protocol_error",
+ "brb_wc2_protocol_error",
+ "brb_wc3_protocol_error",
+ "brb_ll_arb_prefetch_sop_error",
+ "brb_ll_blk_error",
+ "brb_packet_counter_error",
+ "brb_byte_counter_error",
+ "brb_mac0_fc_cnt_error",
+ "brb_mac1_fc_cnt_error",
+ "brb_ll_arb_calc_error",
+ "brb_unused_0",
+ "brb_wc0_inp_fifo_error",
+ "brb_wc0_sop_fifo_error",
+ "brb_unused_1",
+ "brb_wc0_eop_fifo_error",
+ "brb_wc0_queue_fifo_error",
+ "brb_wc0_free_point_fifo_error",
+ "brb_wc0_next_point_fifo_error",
+ "brb_wc0_strt_fifo_error",
+ "brb_wc0_second_dscr_fifo_error",
+ "brb_wc0_pkt_avail_fifo_error",
+ "brb_wc0_cos_cnt_fifo_error",
+ "brb_wc0_notify_fifo_error",
+ "brb_wc0_ll_req_fifo_error",
+ "brb_wc0_ll_pa_cnt_error",
+ "brb_wc0_bb_pa_cnt_error",
+ "brb_wc1_inp_fifo_error",
+ "brb_wc1_sop_fifo_error",
+ "brb_wc1_eop_fifo_error",
+ "brb_wc1_queue_fifo_error",
+ "brb_wc1_free_point_fifo_error",
+ "brb_wc1_next_point_fifo_error",
+ "brb_wc1_strt_fifo_error",
+ "brb_wc1_second_dscr_fifo_error",
+ "brb_wc1_pkt_avail_fifo_error",
+ "brb_wc1_cos_cnt_fifo_error",
+ "brb_wc1_notify_fifo_error",
+ "brb_wc1_ll_req_fifo_error",
+ "brb_wc1_ll_pa_cnt_error",
+ "brb_wc1_bb_pa_cnt_error",
+ "brb_wc2_inp_fifo_error",
+ "brb_wc2_sop_fifo_error",
+ "brb_wc2_eop_fifo_error",
+ "brb_wc2_queue_fifo_error",
+ "brb_wc2_free_point_fifo_error",
+ "brb_wc2_next_point_fifo_error",
+ "brb_wc2_strt_fifo_error",
+ "brb_wc2_second_dscr_fifo_error",
+ "brb_wc2_pkt_avail_fifo_error",
+ "brb_wc2_cos_cnt_fifo_error",
+ "brb_wc2_notify_fifo_error",
+ "brb_wc2_ll_req_fifo_error",
+ "brb_wc2_ll_pa_cnt_error",
+ "brb_wc2_bb_pa_cnt_error",
+ "brb_wc3_inp_fifo_error",
+ "brb_wc3_sop_fifo_error",
+ "brb_wc3_eop_fifo_error",
+ "brb_wc3_queue_fifo_error",
+ "brb_wc3_free_point_fifo_error",
+ "brb_wc3_next_point_fifo_error",
+ "brb_wc3_strt_fifo_error",
+ "brb_wc3_second_dscr_fifo_error",
+ "brb_wc3_pkt_avail_fifo_error",
+ "brb_wc3_cos_cnt_fifo_error",
+ "brb_wc3_notify_fifo_error",
+ "brb_wc3_ll_req_fifo_error",
+ "brb_wc3_ll_pa_cnt_error",
+ "brb_wc3_bb_pa_cnt_error",
+ "brb_rc_pkt0_side_fifo_error",
+ "brb_rc_pkt0_req_fifo_error",
+ "brb_rc_pkt0_blk_fifo_error",
+ "brb_rc_pkt0_rls_left_fifo_error",
+ "brb_rc_pkt0_strt_ptr_fifo_error",
+ "brb_rc_pkt0_second_ptr_fifo_error",
+ "brb_rc_pkt0_rsp_fifo_error",
+ "brb_rc_pkt0_dscr_fifo_error",
+ "brb_rc_pkt1_side_fifo_error",
+ "brb_rc_pkt1_req_fifo_error",
+ "brb_rc_pkt1_blk_fifo_error",
+ "brb_rc_pkt1_rls_left_fifo_error",
+ "brb_rc_pkt1_strt_ptr_fifo_error",
+ "brb_rc_pkt1_second_ptr_fifo_error",
+ "brb_rc_pkt1_rsp_fifo_error",
+ "brb_rc_pkt1_dscr_fifo_error",
+ "brb_rc_pkt2_side_fifo_error",
+ "brb_rc_pkt2_req_fifo_error",
+ "brb_rc_pkt2_blk_fifo_error",
+ "brb_rc_pkt2_rls_left_fifo_error",
+ "brb_rc_pkt2_strt_ptr_fifo_error",
+ "brb_rc_pkt2_second_ptr_fifo_error",
+ "brb_rc_pkt2_rsp_fifo_error",
+ "brb_rc_pkt2_dscr_fifo_error",
+ "brb_rc_pkt3_side_fifo_error",
+ "brb_rc_pkt3_req_fifo_error",
+ "brb_rc_pkt3_blk_fifo_error",
+ "brb_rc_pkt3_rls_left_fifo_error",
+ "brb_rc_pkt3_strt_ptr_fifo_error",
+ "brb_rc_pkt3_second_ptr_fifo_error",
+ "brb_rc_pkt3_rsp_fifo_error",
+ "brb_rc_pkt3_dscr_fifo_error",
+ "brb_rc_sop_strt_fifo_error",
+ "brb_rc_sop_req_fifo_error",
+ "brb_rc_sop_dscr_fifo_error",
+ "brb_rc_sop_queue_fifo_error",
+ "brb_rc0_eop_error",
+ "brb_rc1_eop_error",
+ "brb_ll_arb_rls_fifo_error",
+ "brb_ll_arb_prefetch_fifo_error",
+ "brb_rc_pkt0_rls_fifo_error",
+ "brb_rc_pkt1_rls_fifo_error",
+ "brb_rc_pkt2_rls_fifo_error",
+ "brb_rc_pkt3_rls_fifo_error",
+ "brb_rc_pkt4_rls_fifo_error",
+ "brb_rc_pkt4_rls_error",
+ "brb_rc_pkt4_1st_error",
+ "brb_rc_pkt4_len_error",
+ "brb_rc_pkt4_middle_error",
+ "brb_rc_pkt4_protocol_error",
+ "brb_rc_pkt4_side_fifo_error",
+ "brb_rc_pkt4_req_fifo_error",
+ "brb_rc_pkt4_blk_fifo_error",
+ "brb_rc_pkt4_rls_left_fifo_error",
+ "brb_rc_pkt4_strt_ptr_fifo_error",
+ "brb_rc_pkt4_second_ptr_fifo_error",
+ "brb_rc_pkt4_rsp_fifo_error",
+ "brb_rc_pkt4_dscr_fifo_error",
+ "brb_rc_pkt5_rls_error",
+ "brb_packet_available_sync_fifo_push_error",
+ "brb_wc4_protocol_error",
+ "brb_wc5_protocol_error",
+ "brb_wc6_protocol_error",
+ "brb_wc7_protocol_error",
+ "brb_wc4_inp_fifo_error",
+ "brb_wc4_sop_fifo_error",
+ "brb_wc4_queue_fifo_error",
+ "brb_wc4_free_point_fifo_error",
+ "brb_wc4_next_point_fifo_error",
+ "brb_wc4_strt_fifo_error",
+ "brb_wc4_second_dscr_fifo_error",
+ "brb_wc4_pkt_avail_fifo_error",
+ "brb_wc4_cos_cnt_fifo_error",
+ "brb_wc4_notify_fifo_error",
+ "brb_wc4_ll_req_fifo_error",
+ "brb_wc4_ll_pa_cnt_error",
+ "brb_wc4_bb_pa_cnt_error",
+ "brb_wc5_inp_fifo_error",
+ "brb_wc5_sop_fifo_error",
+ "brb_wc5_queue_fifo_error",
+ "brb_wc5_free_point_fifo_error",
+ "brb_wc5_next_point_fifo_error",
+ "brb_wc5_strt_fifo_error",
+ "brb_wc5_second_dscr_fifo_error",
+ "brb_wc5_pkt_avail_fifo_error",
+ "brb_wc5_cos_cnt_fifo_error",
+ "brb_wc5_notify_fifo_error",
+ "brb_wc5_ll_req_fifo_error",
+ "brb_wc5_ll_pa_cnt_error",
+ "brb_wc5_bb_pa_cnt_error",
+ "brb_wc6_inp_fifo_error",
+ "brb_wc6_sop_fifo_error",
+ "brb_wc6_queue_fifo_error",
+ "brb_wc6_free_point_fifo_error",
+ "brb_wc6_next_point_fifo_error",
+ "brb_wc6_strt_fifo_error",
+ "brb_wc6_second_dscr_fifo_error",
+ "brb_wc6_pkt_avail_fifo_error",
+ "brb_wc6_cos_cnt_fifo_error",
+ "brb_wc6_notify_fifo_error",
+ "brb_wc6_ll_req_fifo_error",
+ "brb_wc6_ll_pa_cnt_error",
+ "brb_wc6_bb_pa_cnt_error",
+ "brb_wc7_inp_fifo_error",
+ "brb_wc7_sop_fifo_error",
+ "brb_wc7_queue_fifo_error",
+ "brb_wc7_free_point_fifo_error",
+ "brb_wc7_next_point_fifo_error",
+ "brb_wc7_strt_fifo_error",
+ "brb_wc7_second_dscr_fifo_error",
+ "brb_wc7_pkt_avail_fifo_error",
+ "brb_wc7_cos_cnt_fifo_error",
+ "brb_wc7_notify_fifo_error",
+ "brb_wc7_ll_req_fifo_error",
+ "brb_wc7_ll_pa_cnt_error",
+ "brb_wc7_bb_pa_cnt_error",
+ "brb_wc9_queue_fifo_error",
+ "brb_rc_sop_inp_sync_fifo_push_error",
+ "brb_rc0_inp_sync_fifo_push_error",
+ "brb_rc1_inp_sync_fifo_push_error",
+ "brb_rc2_inp_sync_fifo_push_error",
+ "brb_rc3_inp_sync_fifo_push_error",
+ "brb_rc0_out_sync_fifo_push_error",
+ "brb_rc1_out_sync_fifo_push_error",
+ "brb_rc2_out_sync_fifo_push_error",
+ "brb_rc3_out_sync_fifo_push_error",
+ "brb_rc4_out_sync_fifo_push_error",
+ "brb_unused_2",
+ "brb_rc0_eop_inp_sync_fifo_push_error",
+ "brb_rc1_eop_inp_sync_fifo_push_error",
+ "brb_rc2_eop_inp_sync_fifo_push_error",
+ "brb_rc3_eop_inp_sync_fifo_push_error",
+ "brb_rc0_eop_out_sync_fifo_push_error",
+ "brb_rc1_eop_out_sync_fifo_push_error",
+ "brb_rc2_eop_out_sync_fifo_push_error",
+ "brb_rc3_eop_out_sync_fifo_push_error",
+ "brb_unused_3",
+ "brb_rc2_eop_error",
+ "brb_rc3_eop_error",
+ "brb_mac2_fc_cnt_error",
+ "brb_mac3_fc_cnt_error",
+ "brb_wc4_eop_fifo_error",
+ "brb_wc5_eop_fifo_error",
+ "brb_wc6_eop_fifo_error",
+ "brb_wc7_eop_fifo_error",
+};
+#else
+#define brb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_a0 = {
+ 0, 32, brb_int0_bb_a0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_a0_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_a0 = {
+ 1, 30, brb_int1_bb_a0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_a0_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_a0 = {
+ 2, 28, brb_int2_bb_a0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_a0_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_bb_a0 = {
+ 3, 31, brb_int3_bb_a0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_a0_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_a0 = {
+ 4, 27, brb_int4_bb_a0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_a0_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_bb_a0 = {
+ 5, 1, brb_int5_bb_a0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_a0_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_a0 = {
+ 6, 8, brb_int6_bb_a0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_a0_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_bb_a0 = {
+ 7, 32, brb_int7_bb_a0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_a0_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_a0 = {
+ 8, 17, brb_int8_bb_a0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_a0_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_bb_a0 = {
+ 9, 1, brb_int9_bb_a0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_a0_attn_idx[14] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_a0 = {
+ 10, 14, brb_int10_bb_a0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+ 0x3401b8
+};
+
+static const u16 brb_int11_bb_a0_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_a0 = {
+ 11, 8, brb_int11_bb_a0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_a0_regs[12] = {
+ &brb_int0_bb_a0, &brb_int1_bb_a0, &brb_int2_bb_a0, &brb_int3_bb_a0,
+ &brb_int4_bb_a0, &brb_int5_bb_a0, &brb_int6_bb_a0, &brb_int7_bb_a0,
+ &brb_int8_bb_a0, &brb_int9_bb_a0,
+ &brb_int10_bb_a0, &brb_int11_bb_a0,
+};
+
+static const u16 brb_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+ 0, 32, brb_int0_bb_b0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_b0_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+ 1, 30, brb_int1_bb_b0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_b0_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+ 2, 28, brb_int2_bb_b0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_b0_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+ 3, 31, brb_int3_bb_b0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_b0_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+ 4, 27, brb_int4_bb_b0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_b0_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+ 5, 1, brb_int5_bb_b0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_b0_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+ 6, 8, brb_int6_bb_b0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_b0_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+ 7, 32, brb_int7_bb_b0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_b0_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+ 8, 17, brb_int8_bb_b0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_b0_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+ 9, 1, brb_int9_bb_b0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_b0_attn_idx[14] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+ 10, 14, brb_int10_bb_b0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+ 0x3401b8
+};
+
+static const u16 brb_int11_bb_b0_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+ 11, 8, brb_int11_bb_b0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+ &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+ &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+ &brb_int8_bb_b0, &brb_int9_bb_b0,
+ &brb_int10_bb_b0, &brb_int11_bb_b0,
+};
+
+static const u16 brb_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_k2 = {
+ 0, 32, brb_int0_k2_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_k2_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_k2 = {
+ 1, 30, brb_int1_k2_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_k2_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_k2 = {
+ 2, 28, brb_int2_k2_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_k2_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_k2 = {
+ 3, 31, brb_int3_k2_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_k2_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_k2 = {
+ 4, 27, brb_int4_k2_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_k2_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_k2 = {
+ 5, 1, brb_int5_k2_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_k2_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_k2 = {
+ 6, 8, brb_int6_k2_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_k2_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_k2 = {
+ 7, 32, brb_int7_k2_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_k2_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_k2 = {
+ 8, 17, brb_int8_k2_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_k2_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_k2 = {
+ 9, 1, brb_int9_k2_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_k2_attn_idx[18] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 222, 223,
+ 224,
+ 225, 226, 227,
+};
+
+static struct attn_hw_reg brb_int10_k2 = {
+ 10, 18, brb_int10_k2_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8
+};
+
+static const u16 brb_int11_k2_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_k2 = {
+ 11, 8, brb_int11_k2_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_k2_regs[12] = {
+ &brb_int0_k2, &brb_int1_k2, &brb_int2_k2, &brb_int3_k2, &brb_int4_k2,
+ &brb_int5_k2, &brb_int6_k2, &brb_int7_k2, &brb_int8_k2, &brb_int9_k2,
+ &brb_int10_k2, &brb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_prty_attn_desc[75] = {
+ "brb_ll_bank0_mem_prty",
+ "brb_ll_bank1_mem_prty",
+ "brb_ll_bank2_mem_prty",
+ "brb_ll_bank3_mem_prty",
+ "brb_datapath_registers",
+ "brb_mem001_i_ecc_rf_int",
+ "brb_mem008_i_ecc_rf_int",
+ "brb_mem009_i_ecc_rf_int",
+ "brb_mem010_i_ecc_rf_int",
+ "brb_mem011_i_ecc_rf_int",
+ "brb_mem012_i_ecc_rf_int",
+ "brb_mem013_i_ecc_rf_int",
+ "brb_mem014_i_ecc_rf_int",
+ "brb_mem015_i_ecc_rf_int",
+ "brb_mem016_i_ecc_rf_int",
+ "brb_mem002_i_ecc_rf_int",
+ "brb_mem003_i_ecc_rf_int",
+ "brb_mem004_i_ecc_rf_int",
+ "brb_mem005_i_ecc_rf_int",
+ "brb_mem006_i_ecc_rf_int",
+ "brb_mem007_i_ecc_rf_int",
+ "brb_mem070_i_mem_prty",
+ "brb_mem069_i_mem_prty",
+ "brb_mem053_i_mem_prty",
+ "brb_mem054_i_mem_prty",
+ "brb_mem055_i_mem_prty",
+ "brb_mem056_i_mem_prty",
+ "brb_mem057_i_mem_prty",
+ "brb_mem058_i_mem_prty",
+ "brb_mem059_i_mem_prty",
+ "brb_mem060_i_mem_prty",
+ "brb_mem061_i_mem_prty",
+ "brb_mem062_i_mem_prty",
+ "brb_mem063_i_mem_prty",
+ "brb_mem064_i_mem_prty",
+ "brb_mem065_i_mem_prty",
+ "brb_mem045_i_mem_prty",
+ "brb_mem046_i_mem_prty",
+ "brb_mem047_i_mem_prty",
+ "brb_mem048_i_mem_prty",
+ "brb_mem049_i_mem_prty",
+ "brb_mem050_i_mem_prty",
+ "brb_mem051_i_mem_prty",
+ "brb_mem052_i_mem_prty",
+ "brb_mem041_i_mem_prty",
+ "brb_mem042_i_mem_prty",
+ "brb_mem043_i_mem_prty",
+ "brb_mem044_i_mem_prty",
+ "brb_mem040_i_mem_prty",
+ "brb_mem035_i_mem_prty",
+ "brb_mem066_i_mem_prty",
+ "brb_mem067_i_mem_prty",
+ "brb_mem068_i_mem_prty",
+ "brb_mem030_i_mem_prty",
+ "brb_mem031_i_mem_prty",
+ "brb_mem032_i_mem_prty",
+ "brb_mem033_i_mem_prty",
+ "brb_mem037_i_mem_prty",
+ "brb_mem038_i_mem_prty",
+ "brb_mem034_i_mem_prty",
+ "brb_mem036_i_mem_prty",
+ "brb_mem017_i_mem_prty",
+ "brb_mem018_i_mem_prty",
+ "brb_mem019_i_mem_prty",
+ "brb_mem020_i_mem_prty",
+ "brb_mem021_i_mem_prty",
+ "brb_mem022_i_mem_prty",
+ "brb_mem023_i_mem_prty",
+ "brb_mem024_i_mem_prty",
+ "brb_mem029_i_mem_prty",
+ "brb_mem026_i_mem_prty",
+ "brb_mem027_i_mem_prty",
+ "brb_mem028_i_mem_prty",
+ "brb_mem025_i_mem_prty",
+ "brb_mem039_i_mem_prty",
+};
+#else
+#define brb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_prty1_bb_a0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+ 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 49,
+};
+
+static struct attn_hw_reg brb_prty1_bb_a0 = {
+ 0, 31, brb_prty1_bb_a0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_a0_attn_idx[19] = {
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74,
+ 48,
+};
+
+static struct attn_hw_reg brb_prty2_bb_a0 = {
+ 1, 19, brb_prty2_bb_a0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_a0_regs[2] = {
+ &brb_prty1_bb_a0, &brb_prty2_bb_a0,
+};
+
+static const u16 brb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+ 0, 5, brb_prty0_bb_b0_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_bb_b0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+ 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+ 1, 31, brb_prty1_bb_b0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_b0_attn_idx[14] = {
+ 53, 54, 55, 56, 59, 61, 62, 63, 64, 69, 70, 71, 72, 73,
+};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+ 2, 14, brb_prty2_bb_b0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+ &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0,
+};
+
+static const u16 brb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_k2 = {
+ 0, 5, brb_prty0_k2_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg brb_prty1_k2 = {
+ 1, 31, brb_prty1_k2_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_k2_attn_idx[30] = {
+ 50, 51, 52, 36, 37, 38, 39, 40, 41, 42, 43, 47, 53, 54, 55, 56, 57, 58,
+ 59, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+};
+
+static struct attn_hw_reg brb_prty2_k2 = {
+ 2, 30, brb_prty2_k2_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_k2_regs[3] = {
+ &brb_prty0_k2, &brb_prty1_k2, &brb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *src_int_attn_desc[1] = {
+ "src_address_error",
+};
+#else
+#define src_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 src_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_bb_a0 = {
+ 0, 1, src_int0_bb_a0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_a0_regs[1] = {
+ &src_int0_bb_a0,
+};
+
+static const u16 src_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+ 0, 1, src_int0_bb_b0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+ &src_int0_bb_b0,
+};
+
+static const u16 src_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_k2 = {
+ 0, 1, src_int0_k2_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_k2_regs[1] = {
+ &src_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_int_attn_desc[2] = {
+ "prs_address_error",
+ "prs_lcid_validation_err",
+};
+#else
+#define prs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_a0 = {
+ 0, 2, prs_int0_bb_a0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_a0_regs[1] = {
+ &prs_int0_bb_a0,
+};
+
+static const u16 prs_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+ 0, 2, prs_int0_bb_b0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+ &prs_int0_bb_b0,
+};
+
+static const u16 prs_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_k2 = {
+ 0, 2, prs_int0_k2_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_k2_regs[1] = {
+ &prs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_prty_attn_desc[75] = {
+ "prs_cam_parity",
+ "prs_gft_cam_parity",
+ "prs_mem011_i_ecc_rf_int",
+ "prs_mem012_i_ecc_rf_int",
+ "prs_mem016_i_ecc_rf_int",
+ "prs_mem017_i_ecc_rf_int",
+ "prs_mem021_i_ecc_rf_int",
+ "prs_mem022_i_ecc_rf_int",
+ "prs_mem026_i_ecc_rf_int",
+ "prs_mem027_i_ecc_rf_int",
+ "prs_mem064_i_mem_prty",
+ "prs_mem044_i_mem_prty",
+ "prs_mem043_i_mem_prty",
+ "prs_mem037_i_mem_prty",
+ "prs_mem033_i_mem_prty",
+ "prs_mem034_i_mem_prty",
+ "prs_mem035_i_mem_prty",
+ "prs_mem036_i_mem_prty",
+ "prs_mem029_i_mem_prty",
+ "prs_mem030_i_mem_prty",
+ "prs_mem031_i_mem_prty",
+ "prs_mem032_i_mem_prty",
+ "prs_mem007_i_mem_prty",
+ "prs_mem028_i_mem_prty",
+ "prs_mem039_i_mem_prty",
+ "prs_mem040_i_mem_prty",
+ "prs_mem058_i_mem_prty",
+ "prs_mem059_i_mem_prty",
+ "prs_mem041_i_mem_prty",
+ "prs_mem042_i_mem_prty",
+ "prs_mem060_i_mem_prty",
+ "prs_mem061_i_mem_prty",
+ "prs_mem009_i_mem_prty",
+ "prs_mem009_i_ecc_rf_int",
+ "prs_mem010_i_ecc_rf_int",
+ "prs_mem014_i_ecc_rf_int",
+ "prs_mem015_i_ecc_rf_int",
+ "prs_mem026_i_mem_prty",
+ "prs_mem025_i_mem_prty",
+ "prs_mem021_i_mem_prty",
+ "prs_mem019_i_mem_prty",
+ "prs_mem020_i_mem_prty",
+ "prs_mem017_i_mem_prty",
+ "prs_mem018_i_mem_prty",
+ "prs_mem005_i_mem_prty",
+ "prs_mem016_i_mem_prty",
+ "prs_mem023_i_mem_prty",
+ "prs_mem024_i_mem_prty",
+ "prs_mem008_i_mem_prty",
+ "prs_mem012_i_mem_prty",
+ "prs_mem013_i_mem_prty",
+ "prs_mem006_i_mem_prty",
+ "prs_mem011_i_mem_prty",
+ "prs_mem003_i_mem_prty",
+ "prs_mem004_i_mem_prty",
+ "prs_mem027_i_mem_prty",
+ "prs_mem010_i_mem_prty",
+ "prs_mem014_i_mem_prty",
+ "prs_mem015_i_mem_prty",
+ "prs_mem054_i_mem_prty",
+ "prs_mem055_i_mem_prty",
+ "prs_mem056_i_mem_prty",
+ "prs_mem057_i_mem_prty",
+ "prs_mem046_i_mem_prty",
+ "prs_mem047_i_mem_prty",
+ "prs_mem048_i_mem_prty",
+ "prs_mem049_i_mem_prty",
+ "prs_mem050_i_mem_prty",
+ "prs_mem051_i_mem_prty",
+ "prs_mem052_i_mem_prty",
+ "prs_mem053_i_mem_prty",
+ "prs_mem062_i_mem_prty",
+ "prs_mem045_i_mem_prty",
+ "prs_mem002_i_mem_prty",
+ "prs_mem001_i_mem_prty",
+};
+#else
+#define prs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prs_prty0_bb_a0 = {
+ 0, 1, prs_prty0_bb_a0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_a0_attn_idx[31] = {
+ 13, 14, 15, 16, 18, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+};
+
+static struct attn_hw_reg prs_prty1_bb_a0 = {
+ 1, 31, prs_prty1_bb_a0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_a0_attn_idx[5] = {
+ 73, 74, 20, 17, 19,
+};
+
+static struct attn_hw_reg prs_prty2_bb_a0 = {
+ 2, 5, prs_prty2_bb_a0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_a0_regs[3] = {
+ &prs_prty0_bb_a0, &prs_prty1_bb_a0, &prs_prty2_bb_a0,
+};
+
+static const u16 prs_prty0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+ 0, 2, prs_prty0_bb_b0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_b0_attn_idx[31] = {
+ 13, 14, 15, 16, 18, 19, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+ 1, 31, prs_prty1_bb_b0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_b0_attn_idx[5] = {
+ 73, 74, 20, 17, 55,
+};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+ 2, 5, prs_prty2_bb_b0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+ &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0,
+};
+
+static const u16 prs_prty0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_k2 = {
+ 0, 2, prs_prty0_k2_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_k2_attn_idx[31] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg prs_prty1_k2 = {
+ 1, 31, prs_prty1_k2_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_k2_attn_idx[31] = {
+ 56, 57, 58, 40, 41, 47, 38, 48, 50, 43, 46, 59, 60, 61, 62, 53, 54, 44,
+ 51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+};
+
+static struct attn_hw_reg prs_prty2_k2 = {
+ 2, 31, prs_prty2_k2_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_k2_regs[3] = {
+ &prs_prty0_k2, &prs_prty1_k2, &prs_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_int_attn_desc[28] = {
+ "tsdm_address_error",
+ "tsdm_inp_queue_error",
+ "tsdm_delay_fifo_error",
+ "tsdm_async_host_error",
+ "tsdm_prm_fifo_error",
+ "tsdm_ccfc_load_pend_error",
+ "tsdm_tcfc_load_pend_error",
+ "tsdm_dst_int_ram_wait_error",
+ "tsdm_dst_pas_buf_wait_error",
+ "tsdm_dst_pxp_immed_error",
+ "tsdm_dst_pxp_dst_pend_error",
+ "tsdm_dst_brb_src_pend_error",
+ "tsdm_dst_brb_src_addr_error",
+ "tsdm_rsp_brb_pend_error",
+ "tsdm_rsp_int_ram_pend_error",
+ "tsdm_rsp_brb_rd_data_error",
+ "tsdm_rsp_int_ram_rd_data_error",
+ "tsdm_rsp_pxp_rd_data_error",
+ "tsdm_cm_delay_error",
+ "tsdm_sh_delay_error",
+ "tsdm_cmpl_pend_error",
+ "tsdm_cprm_pend_error",
+ "tsdm_timer_addr_error",
+ "tsdm_timer_pend_error",
+ "tsdm_dorq_dpm_error",
+ "tsdm_dst_pxp_done_error",
+ "tsdm_xcm_rmt_buffer_error",
+ "tsdm_ycm_rmt_buffer_error",
+};
+#else
+#define tsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_a0 = {
+ 0, 26, tsdm_int0_bb_a0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_a0_regs[1] = {
+ &tsdm_int0_bb_a0,
+};
+
+static const u16 tsdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+ 0, 26, tsdm_int0_bb_b0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+ &tsdm_int0_bb_b0,
+};
+
+static const u16 tsdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg tsdm_int0_k2 = {
+ 0, 28, tsdm_int0_k2_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_k2_regs[1] = {
+ &tsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_prty_attn_desc[10] = {
+ "tsdm_mem009_i_mem_prty",
+ "tsdm_mem008_i_mem_prty",
+ "tsdm_mem007_i_mem_prty",
+ "tsdm_mem006_i_mem_prty",
+ "tsdm_mem005_i_mem_prty",
+ "tsdm_mem002_i_mem_prty",
+ "tsdm_mem010_i_mem_prty",
+ "tsdm_mem001_i_mem_prty",
+ "tsdm_mem003_i_mem_prty",
+ "tsdm_mem004_i_mem_prty",
+};
+#else
+#define tsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_a0 = {
+ 0, 10, tsdm_prty1_bb_a0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+ 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_a0_regs[1] = {
+ &tsdm_prty1_bb_a0,
+};
+
+static const u16 tsdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+ 0, 10, tsdm_prty1_bb_b0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+ 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+ &tsdm_prty1_bb_b0,
+};
+
+static const u16 tsdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_k2 = {
+ 0, 10, tsdm_prty1_k2_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_k2_regs[1] = {
+ &tsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_int_attn_desc[28] = {
+ "msdm_address_error",
+ "msdm_inp_queue_error",
+ "msdm_delay_fifo_error",
+ "msdm_async_host_error",
+ "msdm_prm_fifo_error",
+ "msdm_ccfc_load_pend_error",
+ "msdm_tcfc_load_pend_error",
+ "msdm_dst_int_ram_wait_error",
+ "msdm_dst_pas_buf_wait_error",
+ "msdm_dst_pxp_immed_error",
+ "msdm_dst_pxp_dst_pend_error",
+ "msdm_dst_brb_src_pend_error",
+ "msdm_dst_brb_src_addr_error",
+ "msdm_rsp_brb_pend_error",
+ "msdm_rsp_int_ram_pend_error",
+ "msdm_rsp_brb_rd_data_error",
+ "msdm_rsp_int_ram_rd_data_error",
+ "msdm_rsp_pxp_rd_data_error",
+ "msdm_cm_delay_error",
+ "msdm_sh_delay_error",
+ "msdm_cmpl_pend_error",
+ "msdm_cprm_pend_error",
+ "msdm_timer_addr_error",
+ "msdm_timer_pend_error",
+ "msdm_dorq_dpm_error",
+ "msdm_dst_pxp_done_error",
+ "msdm_xcm_rmt_buffer_error",
+ "msdm_ycm_rmt_buffer_error",
+};
+#else
+#define msdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_a0 = {
+ 0, 26, msdm_int0_bb_a0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_a0_regs[1] = {
+ &msdm_int0_bb_a0,
+};
+
+static const u16 msdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+ 0, 26, msdm_int0_bb_b0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+ &msdm_int0_bb_b0,
+};
+
+static const u16 msdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg msdm_int0_k2 = {
+ 0, 28, msdm_int0_k2_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_k2_regs[1] = {
+ &msdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_prty_attn_desc[11] = {
+ "msdm_mem009_i_mem_prty",
+ "msdm_mem008_i_mem_prty",
+ "msdm_mem007_i_mem_prty",
+ "msdm_mem006_i_mem_prty",
+ "msdm_mem005_i_mem_prty",
+ "msdm_mem002_i_mem_prty",
+ "msdm_mem011_i_mem_prty",
+ "msdm_mem001_i_mem_prty",
+ "msdm_mem003_i_mem_prty",
+ "msdm_mem004_i_mem_prty",
+ "msdm_mem010_i_mem_prty",
+};
+#else
+#define msdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_prty1_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_a0 = {
+ 0, 11, msdm_prty1_bb_a0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+ 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_a0_regs[1] = {
+ &msdm_prty1_bb_a0,
+};
+
+static const u16 msdm_prty1_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+ 0, 11, msdm_prty1_bb_b0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+ 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+ &msdm_prty1_bb_b0,
+};
+
+static const u16 msdm_prty1_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_k2 = {
+ 0, 11, msdm_prty1_k2_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_k2_regs[1] = {
+ &msdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_int_attn_desc[28] = {
+ "usdm_address_error",
+ "usdm_inp_queue_error",
+ "usdm_delay_fifo_error",
+ "usdm_async_host_error",
+ "usdm_prm_fifo_error",
+ "usdm_ccfc_load_pend_error",
+ "usdm_tcfc_load_pend_error",
+ "usdm_dst_int_ram_wait_error",
+ "usdm_dst_pas_buf_wait_error",
+ "usdm_dst_pxp_immed_error",
+ "usdm_dst_pxp_dst_pend_error",
+ "usdm_dst_brb_src_pend_error",
+ "usdm_dst_brb_src_addr_error",
+ "usdm_rsp_brb_pend_error",
+ "usdm_rsp_int_ram_pend_error",
+ "usdm_rsp_brb_rd_data_error",
+ "usdm_rsp_int_ram_rd_data_error",
+ "usdm_rsp_pxp_rd_data_error",
+ "usdm_cm_delay_error",
+ "usdm_sh_delay_error",
+ "usdm_cmpl_pend_error",
+ "usdm_cprm_pend_error",
+ "usdm_timer_addr_error",
+ "usdm_timer_pend_error",
+ "usdm_dorq_dpm_error",
+ "usdm_dst_pxp_done_error",
+ "usdm_xcm_rmt_buffer_error",
+ "usdm_ycm_rmt_buffer_error",
+};
+#else
+#define usdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_a0 = {
+ 0, 26, usdm_int0_bb_a0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_a0_regs[1] = {
+ &usdm_int0_bb_a0,
+};
+
+static const u16 usdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+ 0, 26, usdm_int0_bb_b0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+ &usdm_int0_bb_b0,
+};
+
+static const u16 usdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg usdm_int0_k2 = {
+ 0, 28, usdm_int0_k2_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_k2_regs[1] = {
+ &usdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_prty_attn_desc[10] = {
+ "usdm_mem008_i_mem_prty",
+ "usdm_mem007_i_mem_prty",
+ "usdm_mem006_i_mem_prty",
+ "usdm_mem005_i_mem_prty",
+ "usdm_mem002_i_mem_prty",
+ "usdm_mem010_i_mem_prty",
+ "usdm_mem001_i_mem_prty",
+ "usdm_mem003_i_mem_prty",
+ "usdm_mem004_i_mem_prty",
+ "usdm_mem009_i_mem_prty",
+};
+#else
+#define usdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_a0 = {
+ 0, 10, usdm_prty1_bb_a0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+ 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_a0_regs[1] = {
+ &usdm_prty1_bb_a0,
+};
+
+static const u16 usdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+ 0, 10, usdm_prty1_bb_b0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+ 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+ &usdm_prty1_bb_b0,
+};
+
+static const u16 usdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_k2 = {
+ 0, 10, usdm_prty1_k2_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_k2_regs[1] = {
+ &usdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_int_attn_desc[28] = {
+ "xsdm_address_error",
+ "xsdm_inp_queue_error",
+ "xsdm_delay_fifo_error",
+ "xsdm_async_host_error",
+ "xsdm_prm_fifo_error",
+ "xsdm_ccfc_load_pend_error",
+ "xsdm_tcfc_load_pend_error",
+ "xsdm_dst_int_ram_wait_error",
+ "xsdm_dst_pas_buf_wait_error",
+ "xsdm_dst_pxp_immed_error",
+ "xsdm_dst_pxp_dst_pend_error",
+ "xsdm_dst_brb_src_pend_error",
+ "xsdm_dst_brb_src_addr_error",
+ "xsdm_rsp_brb_pend_error",
+ "xsdm_rsp_int_ram_pend_error",
+ "xsdm_rsp_brb_rd_data_error",
+ "xsdm_rsp_int_ram_rd_data_error",
+ "xsdm_rsp_pxp_rd_data_error",
+ "xsdm_cm_delay_error",
+ "xsdm_sh_delay_error",
+ "xsdm_cmpl_pend_error",
+ "xsdm_cprm_pend_error",
+ "xsdm_timer_addr_error",
+ "xsdm_timer_pend_error",
+ "xsdm_dorq_dpm_error",
+ "xsdm_dst_pxp_done_error",
+ "xsdm_xcm_rmt_buffer_error",
+ "xsdm_ycm_rmt_buffer_error",
+};
+#else
+#define xsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_a0 = {
+ 0, 26, xsdm_int0_bb_a0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_a0_regs[1] = {
+ &xsdm_int0_bb_a0,
+};
+
+static const u16 xsdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+ 0, 26, xsdm_int0_bb_b0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+ &xsdm_int0_bb_b0,
+};
+
+static const u16 xsdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg xsdm_int0_k2 = {
+ 0, 28, xsdm_int0_k2_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_k2_regs[1] = {
+ &xsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_prty_attn_desc[10] = {
+ "xsdm_mem009_i_mem_prty",
+ "xsdm_mem008_i_mem_prty",
+ "xsdm_mem007_i_mem_prty",
+ "xsdm_mem006_i_mem_prty",
+ "xsdm_mem003_i_mem_prty",
+ "xsdm_mem010_i_mem_prty",
+ "xsdm_mem002_i_mem_prty",
+ "xsdm_mem004_i_mem_prty",
+ "xsdm_mem005_i_mem_prty",
+ "xsdm_mem001_i_mem_prty",
+};
+#else
+#define xsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_a0 = {
+ 0, 10, xsdm_prty1_bb_a0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+ 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_a0_regs[1] = {
+ &xsdm_prty1_bb_a0,
+};
+
+static const u16 xsdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+ 0, 10, xsdm_prty1_bb_b0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+ 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+ &xsdm_prty1_bb_b0,
+};
+
+static const u16 xsdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_k2 = {
+ 0, 10, xsdm_prty1_k2_attn_idx, 0xf80200, 0xf8020c, 0xf80208, 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_k2_regs[1] = {
+ &xsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_int_attn_desc[28] = {
+ "ysdm_address_error",
+ "ysdm_inp_queue_error",
+ "ysdm_delay_fifo_error",
+ "ysdm_async_host_error",
+ "ysdm_prm_fifo_error",
+ "ysdm_ccfc_load_pend_error",
+ "ysdm_tcfc_load_pend_error",
+ "ysdm_dst_int_ram_wait_error",
+ "ysdm_dst_pas_buf_wait_error",
+ "ysdm_dst_pxp_immed_error",
+ "ysdm_dst_pxp_dst_pend_error",
+ "ysdm_dst_brb_src_pend_error",
+ "ysdm_dst_brb_src_addr_error",
+ "ysdm_rsp_brb_pend_error",
+ "ysdm_rsp_int_ram_pend_error",
+ "ysdm_rsp_brb_rd_data_error",
+ "ysdm_rsp_int_ram_rd_data_error",
+ "ysdm_rsp_pxp_rd_data_error",
+ "ysdm_cm_delay_error",
+ "ysdm_sh_delay_error",
+ "ysdm_cmpl_pend_error",
+ "ysdm_cprm_pend_error",
+ "ysdm_timer_addr_error",
+ "ysdm_timer_pend_error",
+ "ysdm_dorq_dpm_error",
+ "ysdm_dst_pxp_done_error",
+ "ysdm_xcm_rmt_buffer_error",
+ "ysdm_ycm_rmt_buffer_error",
+};
+#else
+#define ysdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_a0 = {
+ 0, 26, ysdm_int0_bb_a0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_a0_regs[1] = {
+ &ysdm_int0_bb_a0,
+};
+
+static const u16 ysdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+ 0, 26, ysdm_int0_bb_b0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+ &ysdm_int0_bb_b0,
+};
+
+static const u16 ysdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg ysdm_int0_k2 = {
+ 0, 28, ysdm_int0_k2_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_k2_regs[1] = {
+ &ysdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_prty_attn_desc[9] = {
+ "ysdm_mem008_i_mem_prty",
+ "ysdm_mem007_i_mem_prty",
+ "ysdm_mem006_i_mem_prty",
+ "ysdm_mem005_i_mem_prty",
+ "ysdm_mem002_i_mem_prty",
+ "ysdm_mem009_i_mem_prty",
+ "ysdm_mem001_i_mem_prty",
+ "ysdm_mem003_i_mem_prty",
+ "ysdm_mem004_i_mem_prty",
+};
+#else
+#define ysdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_a0 = {
+ 0, 9, ysdm_prty1_bb_a0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_a0_regs[1] = {
+ &ysdm_prty1_bb_a0,
+};
+
+static const u16 ysdm_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+ 0, 9, ysdm_prty1_bb_b0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+ &ysdm_prty1_bb_b0,
+};
+
+static const u16 ysdm_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_k2 = {
+ 0, 9, ysdm_prty1_k2_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_k2_regs[1] = {
+ &ysdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_int_attn_desc[28] = {
+ "psdm_address_error",
+ "psdm_inp_queue_error",
+ "psdm_delay_fifo_error",
+ "psdm_async_host_error",
+ "psdm_prm_fifo_error",
+ "psdm_ccfc_load_pend_error",
+ "psdm_tcfc_load_pend_error",
+ "psdm_dst_int_ram_wait_error",
+ "psdm_dst_pas_buf_wait_error",
+ "psdm_dst_pxp_immed_error",
+ "psdm_dst_pxp_dst_pend_error",
+ "psdm_dst_brb_src_pend_error",
+ "psdm_dst_brb_src_addr_error",
+ "psdm_rsp_brb_pend_error",
+ "psdm_rsp_int_ram_pend_error",
+ "psdm_rsp_brb_rd_data_error",
+ "psdm_rsp_int_ram_rd_data_error",
+ "psdm_rsp_pxp_rd_data_error",
+ "psdm_cm_delay_error",
+ "psdm_sh_delay_error",
+ "psdm_cmpl_pend_error",
+ "psdm_cprm_pend_error",
+ "psdm_timer_addr_error",
+ "psdm_timer_pend_error",
+ "psdm_dorq_dpm_error",
+ "psdm_dst_pxp_done_error",
+ "psdm_xcm_rmt_buffer_error",
+ "psdm_ycm_rmt_buffer_error",
+};
+#else
+#define psdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_a0 = {
+ 0, 26, psdm_int0_bb_a0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_a0_regs[1] = {
+ &psdm_int0_bb_a0,
+};
+
+static const u16 psdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+ 0, 26, psdm_int0_bb_b0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+ &psdm_int0_bb_b0,
+};
+
+static const u16 psdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg psdm_int0_k2 = {
+ 0, 28, psdm_int0_k2_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_k2_regs[1] = {
+ &psdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_prty_attn_desc[9] = {
+ "psdm_mem008_i_mem_prty",
+ "psdm_mem007_i_mem_prty",
+ "psdm_mem006_i_mem_prty",
+ "psdm_mem005_i_mem_prty",
+ "psdm_mem002_i_mem_prty",
+ "psdm_mem009_i_mem_prty",
+ "psdm_mem001_i_mem_prty",
+ "psdm_mem003_i_mem_prty",
+ "psdm_mem004_i_mem_prty",
+};
+#else
+#define psdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_a0 = {
+ 0, 9, psdm_prty1_bb_a0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_a0_regs[1] = {
+ &psdm_prty1_bb_a0,
+};
+
+static const u16 psdm_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+ 0, 9, psdm_prty1_bb_b0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+ &psdm_prty1_bb_b0,
+};
+
+static const u16 psdm_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_k2 = {
+ 0, 9, psdm_prty1_k2_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_k2_regs[1] = {
+ &psdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_int_attn_desc[46] = {
+ "tsem_address_error",
+ "tsem_fic_last_error",
+ "tsem_fic_length_error",
+ "tsem_fic_fifo_error",
+ "tsem_pas_buf_fifo_error",
+ "tsem_sync_fin_pop_error",
+ "tsem_sync_dra_wr_push_error",
+ "tsem_sync_dra_wr_pop_error",
+ "tsem_sync_dra_rd_push_error",
+ "tsem_sync_dra_rd_pop_error",
+ "tsem_sync_fin_push_error",
+ "tsem_sem_fast_address_error",
+ "tsem_cam_lsb_inp_fifo",
+ "tsem_cam_msb_inp_fifo",
+ "tsem_cam_out_fifo",
+ "tsem_fin_fifo",
+ "tsem_thread_fifo_error",
+ "tsem_thread_overrun",
+ "tsem_sync_ext_store_push_error",
+ "tsem_sync_ext_store_pop_error",
+ "tsem_sync_ext_load_push_error",
+ "tsem_sync_ext_load_pop_error",
+ "tsem_sync_ram_rd_push_error",
+ "tsem_sync_ram_rd_pop_error",
+ "tsem_sync_ram_wr_pop_error",
+ "tsem_sync_ram_wr_push_error",
+ "tsem_sync_dbg_push_error",
+ "tsem_sync_dbg_pop_error",
+ "tsem_dbg_fifo_error",
+ "tsem_cam_msb2_inp_fifo",
+ "tsem_vfc_interrupt",
+ "tsem_vfc_out_fifo_error",
+ "tsem_storm_stack_uf_attn",
+ "tsem_storm_stack_of_attn",
+ "tsem_storm_runtime_error",
+ "tsem_ext_load_pend_wr_error",
+ "tsem_thread_rls_orun_error",
+ "tsem_thread_rls_aloc_error",
+ "tsem_thread_rls_vld_error",
+ "tsem_ext_thread_oor_error",
+ "tsem_ord_id_fifo_error",
+ "tsem_invld_foc_error",
+ "tsem_ext_ld_len_error",
+ "tsem_thrd_ord_fifo_error",
+ "tsem_invld_thrd_ord_error",
+ "tsem_fast_memory_address_error",
+};
+#else
+#define tsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_a0 = {
+ 0, 32, tsem_int0_bb_a0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_a0 = {
+ 1, 13, tsem_int1_bb_a0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_a0 = {
+ 2, 1, tsem_fast_memory_int0_bb_a0_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_a0_regs[3] = {
+ &tsem_int0_bb_a0, &tsem_int1_bb_a0, &tsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 tsem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+ 0, 32, tsem_int0_bb_b0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+ 1, 13, tsem_int1_bb_b0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+ 2, 1, tsem_fast_memory_int0_bb_b0_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+ &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 tsem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_k2 = {
+ 0, 32, tsem_int0_k2_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_k2 = {
+ 1, 13, tsem_int1_k2_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_k2 = {
+ 2, 1, tsem_fast_memory_int0_k2_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048,
+ 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_k2_regs[3] = {
+ &tsem_int0_k2, &tsem_int1_k2, &tsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_prty_attn_desc[23] = {
+ "tsem_vfc_rbc_parity_error",
+ "tsem_storm_rf_parity_error",
+ "tsem_reg_gen_parity_error",
+ "tsem_mem005_i_ecc_0_rf_int",
+ "tsem_mem005_i_ecc_1_rf_int",
+ "tsem_mem004_i_mem_prty",
+ "tsem_mem002_i_mem_prty",
+ "tsem_mem003_i_mem_prty",
+ "tsem_mem001_i_mem_prty",
+ "tsem_fast_memory_mem024_i_mem_prty",
+ "tsem_fast_memory_mem023_i_mem_prty",
+ "tsem_fast_memory_mem022_i_mem_prty",
+ "tsem_fast_memory_mem021_i_mem_prty",
+ "tsem_fast_memory_mem020_i_mem_prty",
+ "tsem_fast_memory_mem019_i_mem_prty",
+ "tsem_fast_memory_mem018_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "tsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "tsem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define tsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_a0 = {
+ 0, 3, tsem_prty0_bb_a0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_a0 = {
+ 1, 6, tsem_prty1_bb_a0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_a0 = {
+ 2, 6, tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x174a200,
+ 0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_a0_regs[3] = {
+ &tsem_prty0_bb_a0, &tsem_prty1_bb_a0,
+ &tsem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 tsem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+ 0, 3, tsem_prty0_bb_b0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+ 1, 6, tsem_prty1_bb_b0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x174a200,
+ 0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+ &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+ &tsem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 tsem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_k2 = {
+ 0, 3, tsem_prty0_k2_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_k2 = {
+ 1, 6, tsem_prty1_k2_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg tsem_fast_memory_prty1_k2 = {
+ 2, 7, tsem_fast_memory_prty1_k2_attn_idx, 0x1740200, 0x174020c,
+ 0x1740208,
+ 0x1740204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+ 16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_k2 = {
+ 3, 6, tsem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x174a200,
+ 0x174a20c,
+ 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_k2_regs[4] = {
+ &tsem_prty0_k2, &tsem_prty1_k2, &tsem_fast_memory_prty1_k2,
+ &tsem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_int_attn_desc[46] = {
+ "msem_address_error",
+ "msem_fic_last_error",
+ "msem_fic_length_error",
+ "msem_fic_fifo_error",
+ "msem_pas_buf_fifo_error",
+ "msem_sync_fin_pop_error",
+ "msem_sync_dra_wr_push_error",
+ "msem_sync_dra_wr_pop_error",
+ "msem_sync_dra_rd_push_error",
+ "msem_sync_dra_rd_pop_error",
+ "msem_sync_fin_push_error",
+ "msem_sem_fast_address_error",
+ "msem_cam_lsb_inp_fifo",
+ "msem_cam_msb_inp_fifo",
+ "msem_cam_out_fifo",
+ "msem_fin_fifo",
+ "msem_thread_fifo_error",
+ "msem_thread_overrun",
+ "msem_sync_ext_store_push_error",
+ "msem_sync_ext_store_pop_error",
+ "msem_sync_ext_load_push_error",
+ "msem_sync_ext_load_pop_error",
+ "msem_sync_ram_rd_push_error",
+ "msem_sync_ram_rd_pop_error",
+ "msem_sync_ram_wr_pop_error",
+ "msem_sync_ram_wr_push_error",
+ "msem_sync_dbg_push_error",
+ "msem_sync_dbg_pop_error",
+ "msem_dbg_fifo_error",
+ "msem_cam_msb2_inp_fifo",
+ "msem_vfc_interrupt",
+ "msem_vfc_out_fifo_error",
+ "msem_storm_stack_uf_attn",
+ "msem_storm_stack_of_attn",
+ "msem_storm_runtime_error",
+ "msem_ext_load_pend_wr_error",
+ "msem_thread_rls_orun_error",
+ "msem_thread_rls_aloc_error",
+ "msem_thread_rls_vld_error",
+ "msem_ext_thread_oor_error",
+ "msem_ord_id_fifo_error",
+ "msem_invld_foc_error",
+ "msem_ext_ld_len_error",
+ "msem_thrd_ord_fifo_error",
+ "msem_invld_thrd_ord_error",
+ "msem_fast_memory_address_error",
+};
+#else
+#define msem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_a0 = {
+ 0, 32, msem_int0_bb_a0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_a0 = {
+ 1, 13, msem_int1_bb_a0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_a0 = {
+ 2, 1, msem_fast_memory_int0_bb_a0_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_a0_regs[3] = {
+ &msem_int0_bb_a0, &msem_int1_bb_a0, &msem_fast_memory_int0_bb_a0,
+};
+
+static const u16 msem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+ 0, 32, msem_int0_bb_b0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+ 1, 13, msem_int1_bb_b0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+ 2, 1, msem_fast_memory_int0_bb_b0_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+ &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0,
+};
+
+static const u16 msem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_k2 = {
+ 0, 32, msem_int0_k2_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_k2 = {
+ 1, 13, msem_int1_k2_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_k2 = {
+ 2, 1, msem_fast_memory_int0_k2_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048,
+ 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_k2_regs[3] = {
+ &msem_int0_k2, &msem_int1_k2, &msem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_prty_attn_desc[23] = {
+ "msem_vfc_rbc_parity_error",
+ "msem_storm_rf_parity_error",
+ "msem_reg_gen_parity_error",
+ "msem_mem005_i_ecc_0_rf_int",
+ "msem_mem005_i_ecc_1_rf_int",
+ "msem_mem004_i_mem_prty",
+ "msem_mem002_i_mem_prty",
+ "msem_mem003_i_mem_prty",
+ "msem_mem001_i_mem_prty",
+ "msem_fast_memory_mem024_i_mem_prty",
+ "msem_fast_memory_mem023_i_mem_prty",
+ "msem_fast_memory_mem022_i_mem_prty",
+ "msem_fast_memory_mem021_i_mem_prty",
+ "msem_fast_memory_mem020_i_mem_prty",
+ "msem_fast_memory_mem019_i_mem_prty",
+ "msem_fast_memory_mem018_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "msem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "msem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define msem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_a0 = {
+ 0, 3, msem_prty0_bb_a0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_a0 = {
+ 1, 6, msem_prty1_bb_a0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_a0_regs[2] = {
+ &msem_prty0_bb_a0, &msem_prty1_bb_a0,
+};
+
+static const u16 msem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+ 0, 3, msem_prty0_bb_b0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+ 1, 6, msem_prty1_bb_b0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+ &msem_prty0_bb_b0, &msem_prty1_bb_b0,
+};
+
+static const u16 msem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_k2 = {
+ 0, 3, msem_prty0_k2_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_k2 = {
+ 1, 6, msem_prty1_k2_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static const u16 msem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg msem_fast_memory_prty1_k2 = {
+ 2, 7, msem_fast_memory_prty1_k2_attn_idx, 0x1840200, 0x184020c,
+ 0x1840208,
+ 0x1840204
+};
+
+static struct attn_hw_reg *msem_prty_k2_regs[3] = {
+ &msem_prty0_k2, &msem_prty1_k2, &msem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_int_attn_desc[46] = {
+ "usem_address_error",
+ "usem_fic_last_error",
+ "usem_fic_length_error",
+ "usem_fic_fifo_error",
+ "usem_pas_buf_fifo_error",
+ "usem_sync_fin_pop_error",
+ "usem_sync_dra_wr_push_error",
+ "usem_sync_dra_wr_pop_error",
+ "usem_sync_dra_rd_push_error",
+ "usem_sync_dra_rd_pop_error",
+ "usem_sync_fin_push_error",
+ "usem_sem_fast_address_error",
+ "usem_cam_lsb_inp_fifo",
+ "usem_cam_msb_inp_fifo",
+ "usem_cam_out_fifo",
+ "usem_fin_fifo",
+ "usem_thread_fifo_error",
+ "usem_thread_overrun",
+ "usem_sync_ext_store_push_error",
+ "usem_sync_ext_store_pop_error",
+ "usem_sync_ext_load_push_error",
+ "usem_sync_ext_load_pop_error",
+ "usem_sync_ram_rd_push_error",
+ "usem_sync_ram_rd_pop_error",
+ "usem_sync_ram_wr_pop_error",
+ "usem_sync_ram_wr_push_error",
+ "usem_sync_dbg_push_error",
+ "usem_sync_dbg_pop_error",
+ "usem_dbg_fifo_error",
+ "usem_cam_msb2_inp_fifo",
+ "usem_vfc_interrupt",
+ "usem_vfc_out_fifo_error",
+ "usem_storm_stack_uf_attn",
+ "usem_storm_stack_of_attn",
+ "usem_storm_runtime_error",
+ "usem_ext_load_pend_wr_error",
+ "usem_thread_rls_orun_error",
+ "usem_thread_rls_aloc_error",
+ "usem_thread_rls_vld_error",
+ "usem_ext_thread_oor_error",
+ "usem_ord_id_fifo_error",
+ "usem_invld_foc_error",
+ "usem_ext_ld_len_error",
+ "usem_thrd_ord_fifo_error",
+ "usem_invld_thrd_ord_error",
+ "usem_fast_memory_address_error",
+};
+#else
+#define usem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_a0 = {
+ 0, 32, usem_int0_bb_a0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_a0 = {
+ 1, 13, usem_int1_bb_a0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_a0 = {
+ 2, 1, usem_fast_memory_int0_bb_a0_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_a0_regs[3] = {
+ &usem_int0_bb_a0, &usem_int1_bb_a0, &usem_fast_memory_int0_bb_a0,
+};
+
+static const u16 usem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+ 0, 32, usem_int0_bb_b0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+ 1, 13, usem_int1_bb_b0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+ 2, 1, usem_fast_memory_int0_bb_b0_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+ &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0,
+};
+
+static const u16 usem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_k2 = {
+ 0, 32, usem_int0_k2_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_k2 = {
+ 1, 13, usem_int1_k2_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_k2 = {
+ 2, 1, usem_fast_memory_int0_k2_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048,
+ 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_k2_regs[3] = {
+ &usem_int0_k2, &usem_int1_k2, &usem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_prty_attn_desc[23] = {
+ "usem_vfc_rbc_parity_error",
+ "usem_storm_rf_parity_error",
+ "usem_reg_gen_parity_error",
+ "usem_mem005_i_ecc_0_rf_int",
+ "usem_mem005_i_ecc_1_rf_int",
+ "usem_mem004_i_mem_prty",
+ "usem_mem002_i_mem_prty",
+ "usem_mem003_i_mem_prty",
+ "usem_mem001_i_mem_prty",
+ "usem_fast_memory_mem024_i_mem_prty",
+ "usem_fast_memory_mem023_i_mem_prty",
+ "usem_fast_memory_mem022_i_mem_prty",
+ "usem_fast_memory_mem021_i_mem_prty",
+ "usem_fast_memory_mem020_i_mem_prty",
+ "usem_fast_memory_mem019_i_mem_prty",
+ "usem_fast_memory_mem018_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "usem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "usem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define usem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_a0 = {
+ 0, 3, usem_prty0_bb_a0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_a0 = {
+ 1, 6, usem_prty1_bb_a0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_a0_regs[2] = {
+ &usem_prty0_bb_a0, &usem_prty1_bb_a0,
+};
+
+static const u16 usem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+ 0, 3, usem_prty0_bb_b0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+ 1, 6, usem_prty1_bb_b0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+ &usem_prty0_bb_b0, &usem_prty1_bb_b0,
+};
+
+static const u16 usem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_k2 = {
+ 0, 3, usem_prty0_k2_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_k2 = {
+ 1, 6, usem_prty1_k2_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static const u16 usem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg usem_fast_memory_prty1_k2 = {
+ 2, 7, usem_fast_memory_prty1_k2_attn_idx, 0x1940200, 0x194020c,
+ 0x1940208,
+ 0x1940204
+};
+
+static struct attn_hw_reg *usem_prty_k2_regs[3] = {
+ &usem_prty0_k2, &usem_prty1_k2, &usem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_int_attn_desc[46] = {
+ "xsem_address_error",
+ "xsem_fic_last_error",
+ "xsem_fic_length_error",
+ "xsem_fic_fifo_error",
+ "xsem_pas_buf_fifo_error",
+ "xsem_sync_fin_pop_error",
+ "xsem_sync_dra_wr_push_error",
+ "xsem_sync_dra_wr_pop_error",
+ "xsem_sync_dra_rd_push_error",
+ "xsem_sync_dra_rd_pop_error",
+ "xsem_sync_fin_push_error",
+ "xsem_sem_fast_address_error",
+ "xsem_cam_lsb_inp_fifo",
+ "xsem_cam_msb_inp_fifo",
+ "xsem_cam_out_fifo",
+ "xsem_fin_fifo",
+ "xsem_thread_fifo_error",
+ "xsem_thread_overrun",
+ "xsem_sync_ext_store_push_error",
+ "xsem_sync_ext_store_pop_error",
+ "xsem_sync_ext_load_push_error",
+ "xsem_sync_ext_load_pop_error",
+ "xsem_sync_ram_rd_push_error",
+ "xsem_sync_ram_rd_pop_error",
+ "xsem_sync_ram_wr_pop_error",
+ "xsem_sync_ram_wr_push_error",
+ "xsem_sync_dbg_push_error",
+ "xsem_sync_dbg_pop_error",
+ "xsem_dbg_fifo_error",
+ "xsem_cam_msb2_inp_fifo",
+ "xsem_vfc_interrupt",
+ "xsem_vfc_out_fifo_error",
+ "xsem_storm_stack_uf_attn",
+ "xsem_storm_stack_of_attn",
+ "xsem_storm_runtime_error",
+ "xsem_ext_load_pend_wr_error",
+ "xsem_thread_rls_orun_error",
+ "xsem_thread_rls_aloc_error",
+ "xsem_thread_rls_vld_error",
+ "xsem_ext_thread_oor_error",
+ "xsem_ord_id_fifo_error",
+ "xsem_invld_foc_error",
+ "xsem_ext_ld_len_error",
+ "xsem_thrd_ord_fifo_error",
+ "xsem_invld_thrd_ord_error",
+ "xsem_fast_memory_address_error",
+};
+#else
+#define xsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_a0 = {
+ 0, 32, xsem_int0_bb_a0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_a0 = {
+ 1, 13, xsem_int1_bb_a0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_a0 = {
+ 2, 1, xsem_fast_memory_int0_bb_a0_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_a0_regs[3] = {
+ &xsem_int0_bb_a0, &xsem_int1_bb_a0, &xsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 xsem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+ 0, 32, xsem_int0_bb_b0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+ 1, 13, xsem_int1_bb_b0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+ 2, 1, xsem_fast_memory_int0_bb_b0_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+ &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 xsem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_k2 = {
+ 0, 32, xsem_int0_k2_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_k2 = {
+ 1, 13, xsem_int1_k2_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_k2 = {
+ 2, 1, xsem_fast_memory_int0_k2_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048,
+ 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_k2_regs[3] = {
+ &xsem_int0_k2, &xsem_int1_k2, &xsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_prty_attn_desc[24] = {
+ "xsem_vfc_rbc_parity_error",
+ "xsem_storm_rf_parity_error",
+ "xsem_reg_gen_parity_error",
+ "xsem_mem006_i_ecc_0_rf_int",
+ "xsem_mem006_i_ecc_1_rf_int",
+ "xsem_mem005_i_mem_prty",
+ "xsem_mem002_i_mem_prty",
+ "xsem_mem004_i_mem_prty",
+ "xsem_mem003_i_mem_prty",
+ "xsem_mem001_i_mem_prty",
+ "xsem_fast_memory_mem024_i_mem_prty",
+ "xsem_fast_memory_mem023_i_mem_prty",
+ "xsem_fast_memory_mem022_i_mem_prty",
+ "xsem_fast_memory_mem021_i_mem_prty",
+ "xsem_fast_memory_mem020_i_mem_prty",
+ "xsem_fast_memory_mem019_i_mem_prty",
+ "xsem_fast_memory_mem018_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "xsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "xsem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define xsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_a0 = {
+ 0, 3, xsem_prty0_bb_a0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_bb_a0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_a0 = {
+ 1, 7, xsem_prty1_bb_a0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_a0_regs[2] = {
+ &xsem_prty0_bb_a0, &xsem_prty1_bb_a0,
+};
+
+static const u16 xsem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+ 0, 3, xsem_prty0_bb_b0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_bb_b0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+ 1, 7, xsem_prty1_bb_b0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+ &xsem_prty0_bb_b0, &xsem_prty1_bb_b0,
+};
+
+static const u16 xsem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_k2 = {
+ 0, 3, xsem_prty0_k2_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_k2_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_k2 = {
+ 1, 7, xsem_prty1_k2_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static const u16 xsem_fast_memory_prty1_k2_attn_idx[7] = {
+ 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg xsem_fast_memory_prty1_k2 = {
+ 2, 7, xsem_fast_memory_prty1_k2_attn_idx, 0x1440200, 0x144020c,
+ 0x1440208,
+ 0x1440204
+};
+
+static struct attn_hw_reg *xsem_prty_k2_regs[3] = {
+ &xsem_prty0_k2, &xsem_prty1_k2, &xsem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_int_attn_desc[46] = {
+ "ysem_address_error",
+ "ysem_fic_last_error",
+ "ysem_fic_length_error",
+ "ysem_fic_fifo_error",
+ "ysem_pas_buf_fifo_error",
+ "ysem_sync_fin_pop_error",
+ "ysem_sync_dra_wr_push_error",
+ "ysem_sync_dra_wr_pop_error",
+ "ysem_sync_dra_rd_push_error",
+ "ysem_sync_dra_rd_pop_error",
+ "ysem_sync_fin_push_error",
+ "ysem_sem_fast_address_error",
+ "ysem_cam_lsb_inp_fifo",
+ "ysem_cam_msb_inp_fifo",
+ "ysem_cam_out_fifo",
+ "ysem_fin_fifo",
+ "ysem_thread_fifo_error",
+ "ysem_thread_overrun",
+ "ysem_sync_ext_store_push_error",
+ "ysem_sync_ext_store_pop_error",
+ "ysem_sync_ext_load_push_error",
+ "ysem_sync_ext_load_pop_error",
+ "ysem_sync_ram_rd_push_error",
+ "ysem_sync_ram_rd_pop_error",
+ "ysem_sync_ram_wr_pop_error",
+ "ysem_sync_ram_wr_push_error",
+ "ysem_sync_dbg_push_error",
+ "ysem_sync_dbg_pop_error",
+ "ysem_dbg_fifo_error",
+ "ysem_cam_msb2_inp_fifo",
+ "ysem_vfc_interrupt",
+ "ysem_vfc_out_fifo_error",
+ "ysem_storm_stack_uf_attn",
+ "ysem_storm_stack_of_attn",
+ "ysem_storm_runtime_error",
+ "ysem_ext_load_pend_wr_error",
+ "ysem_thread_rls_orun_error",
+ "ysem_thread_rls_aloc_error",
+ "ysem_thread_rls_vld_error",
+ "ysem_ext_thread_oor_error",
+ "ysem_ord_id_fifo_error",
+ "ysem_invld_foc_error",
+ "ysem_ext_ld_len_error",
+ "ysem_thrd_ord_fifo_error",
+ "ysem_invld_thrd_ord_error",
+ "ysem_fast_memory_address_error",
+};
+#else
+#define ysem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_a0 = {
+ 0, 32, ysem_int0_bb_a0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_a0 = {
+ 1, 13, ysem_int1_bb_a0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_a0 = {
+ 2, 1, ysem_fast_memory_int0_bb_a0_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_a0_regs[3] = {
+ &ysem_int0_bb_a0, &ysem_int1_bb_a0, &ysem_fast_memory_int0_bb_a0,
+};
+
+static const u16 ysem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+ 0, 32, ysem_int0_bb_b0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+ 1, 13, ysem_int1_bb_b0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+ 2, 1, ysem_fast_memory_int0_bb_b0_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+ &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0,
+};
+
+static const u16 ysem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_k2 = {
+ 0, 32, ysem_int0_k2_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_k2 = {
+ 1, 13, ysem_int1_k2_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_k2 = {
+ 2, 1, ysem_fast_memory_int0_k2_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048,
+ 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_k2_regs[3] = {
+ &ysem_int0_k2, &ysem_int1_k2, &ysem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_prty_attn_desc[24] = {
+ "ysem_vfc_rbc_parity_error",
+ "ysem_storm_rf_parity_error",
+ "ysem_reg_gen_parity_error",
+ "ysem_mem006_i_ecc_0_rf_int",
+ "ysem_mem006_i_ecc_1_rf_int",
+ "ysem_mem005_i_mem_prty",
+ "ysem_mem002_i_mem_prty",
+ "ysem_mem004_i_mem_prty",
+ "ysem_mem003_i_mem_prty",
+ "ysem_mem001_i_mem_prty",
+ "ysem_fast_memory_mem024_i_mem_prty",
+ "ysem_fast_memory_mem023_i_mem_prty",
+ "ysem_fast_memory_mem022_i_mem_prty",
+ "ysem_fast_memory_mem021_i_mem_prty",
+ "ysem_fast_memory_mem020_i_mem_prty",
+ "ysem_fast_memory_mem019_i_mem_prty",
+ "ysem_fast_memory_mem018_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "ysem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "ysem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define ysem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_a0 = {
+ 0, 3, ysem_prty0_bb_a0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_bb_a0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_a0 = {
+ 1, 7, ysem_prty1_bb_a0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_a0_regs[2] = {
+ &ysem_prty0_bb_a0, &ysem_prty1_bb_a0,
+};
+
+static const u16 ysem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+ 0, 3, ysem_prty0_bb_b0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_bb_b0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+ 1, 7, ysem_prty1_bb_b0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+ &ysem_prty0_bb_b0, &ysem_prty1_bb_b0,
+};
+
+static const u16 ysem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_k2 = {
+ 0, 3, ysem_prty0_k2_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_k2_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_k2 = {
+ 1, 7, ysem_prty1_k2_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static const u16 ysem_fast_memory_prty1_k2_attn_idx[7] = {
+ 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ysem_fast_memory_prty1_k2 = {
+ 2, 7, ysem_fast_memory_prty1_k2_attn_idx, 0x1540200, 0x154020c,
+ 0x1540208,
+ 0x1540204
+};
+
+static struct attn_hw_reg *ysem_prty_k2_regs[3] = {
+ &ysem_prty0_k2, &ysem_prty1_k2, &ysem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_int_attn_desc[46] = {
+ "psem_address_error",
+ "psem_fic_last_error",
+ "psem_fic_length_error",
+ "psem_fic_fifo_error",
+ "psem_pas_buf_fifo_error",
+ "psem_sync_fin_pop_error",
+ "psem_sync_dra_wr_push_error",
+ "psem_sync_dra_wr_pop_error",
+ "psem_sync_dra_rd_push_error",
+ "psem_sync_dra_rd_pop_error",
+ "psem_sync_fin_push_error",
+ "psem_sem_fast_address_error",
+ "psem_cam_lsb_inp_fifo",
+ "psem_cam_msb_inp_fifo",
+ "psem_cam_out_fifo",
+ "psem_fin_fifo",
+ "psem_thread_fifo_error",
+ "psem_thread_overrun",
+ "psem_sync_ext_store_push_error",
+ "psem_sync_ext_store_pop_error",
+ "psem_sync_ext_load_push_error",
+ "psem_sync_ext_load_pop_error",
+ "psem_sync_ram_rd_push_error",
+ "psem_sync_ram_rd_pop_error",
+ "psem_sync_ram_wr_pop_error",
+ "psem_sync_ram_wr_push_error",
+ "psem_sync_dbg_push_error",
+ "psem_sync_dbg_pop_error",
+ "psem_dbg_fifo_error",
+ "psem_cam_msb2_inp_fifo",
+ "psem_vfc_interrupt",
+ "psem_vfc_out_fifo_error",
+ "psem_storm_stack_uf_attn",
+ "psem_storm_stack_of_attn",
+ "psem_storm_runtime_error",
+ "psem_ext_load_pend_wr_error",
+ "psem_thread_rls_orun_error",
+ "psem_thread_rls_aloc_error",
+ "psem_thread_rls_vld_error",
+ "psem_ext_thread_oor_error",
+ "psem_ord_id_fifo_error",
+ "psem_invld_foc_error",
+ "psem_ext_ld_len_error",
+ "psem_thrd_ord_fifo_error",
+ "psem_invld_thrd_ord_error",
+ "psem_fast_memory_address_error",
+};
+#else
+#define psem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_a0 = {
+ 0, 32, psem_int0_bb_a0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_a0 = {
+ 1, 13, psem_int1_bb_a0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_a0 = {
+ 2, 1, psem_fast_memory_int0_bb_a0_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_a0_regs[3] = {
+ &psem_int0_bb_a0, &psem_int1_bb_a0, &psem_fast_memory_int0_bb_a0,
+};
+
+static const u16 psem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+ 0, 32, psem_int0_bb_b0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+ 1, 13, psem_int1_bb_b0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+ 2, 1, psem_fast_memory_int0_bb_b0_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+ &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0,
+};
+
+static const u16 psem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_k2 = {
+ 0, 32, psem_int0_k2_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_k2 = {
+ 1, 13, psem_int1_k2_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_k2 = {
+ 2, 1, psem_fast_memory_int0_k2_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048,
+ 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_k2_regs[3] = {
+ &psem_int0_k2, &psem_int1_k2, &psem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_prty_attn_desc[23] = {
+ "psem_vfc_rbc_parity_error",
+ "psem_storm_rf_parity_error",
+ "psem_reg_gen_parity_error",
+ "psem_mem005_i_ecc_0_rf_int",
+ "psem_mem005_i_ecc_1_rf_int",
+ "psem_mem004_i_mem_prty",
+ "psem_mem002_i_mem_prty",
+ "psem_mem003_i_mem_prty",
+ "psem_mem001_i_mem_prty",
+ "psem_fast_memory_mem024_i_mem_prty",
+ "psem_fast_memory_mem023_i_mem_prty",
+ "psem_fast_memory_mem022_i_mem_prty",
+ "psem_fast_memory_mem021_i_mem_prty",
+ "psem_fast_memory_mem020_i_mem_prty",
+ "psem_fast_memory_mem019_i_mem_prty",
+ "psem_fast_memory_mem018_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "psem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "psem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define psem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_a0 = {
+ 0, 3, psem_prty0_bb_a0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_a0 = {
+ 1, 6, psem_prty1_bb_a0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_a0 = {
+ 2, 6, psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x164a200,
+ 0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_a0_regs[3] = {
+ &psem_prty0_bb_a0, &psem_prty1_bb_a0,
+ &psem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 psem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+ 0, 3, psem_prty0_bb_b0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+ 1, 6, psem_prty1_bb_b0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x164a200,
+ 0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+ &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+ &psem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 psem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_k2 = {
+ 0, 3, psem_prty0_k2_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_k2 = {
+ 1, 6, psem_prty1_k2_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg psem_fast_memory_prty1_k2 = {
+ 2, 7, psem_fast_memory_prty1_k2_attn_idx, 0x1640200, 0x164020c,
+ 0x1640208,
+ 0x1640204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+ 16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_k2 = {
+ 3, 6, psem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x164a200,
+ 0x164a20c,
+ 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_k2_regs[4] = {
+ &psem_prty0_k2, &psem_prty1_k2, &psem_fast_memory_prty1_k2,
+ &psem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_int_attn_desc[12] = {
+ "rss_address_error",
+ "rss_msg_inp_cnt_error",
+ "rss_msg_out_cnt_error",
+ "rss_inp_state_error",
+ "rss_out_state_error",
+ "rss_main_state_error",
+ "rss_calc_state_error",
+ "rss_inp_fifo_error",
+ "rss_cmd_fifo_error",
+ "rss_msg_fifo_error",
+ "rss_rsp_fifo_error",
+ "rss_hdr_fifo_error",
+};
+#else
+#define rss_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_int0_bb_a0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_a0 = {
+ 0, 12, rss_int0_bb_a0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_a0_regs[1] = {
+ &rss_int0_bb_a0,
+};
+
+static const u16 rss_int0_bb_b0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+ 0, 12, rss_int0_bb_b0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+ &rss_int0_bb_b0,
+};
+
+static const u16 rss_int0_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_k2 = {
+ 0, 12, rss_int0_k2_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_k2_regs[1] = {
+ &rss_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_prty_attn_desc[4] = {
+ "rss_mem002_i_ecc_rf_int",
+ "rss_mem001_i_ecc_rf_int",
+ "rss_mem003_i_mem_prty",
+ "rss_mem004_i_mem_prty",
+};
+#else
+#define rss_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_a0 = {
+ 0, 4, rss_prty1_bb_a0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_a0_regs[1] = {
+ &rss_prty1_bb_a0,
+};
+
+static const u16 rss_prty1_bb_b0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+ 0, 4, rss_prty1_bb_b0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+ &rss_prty1_bb_b0,
+};
+
+static const u16 rss_prty1_k2_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_k2 = {
+ 0, 4, rss_prty1_k2_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_k2_regs[1] = {
+ &rss_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_int_attn_desc[6] = {
+ "tmld_address_error",
+ "tmld_ld_hdr_err",
+ "tmld_ld_seg_msg_err",
+ "tmld_ld_tid_mini_cache_err",
+ "tmld_ld_cid_mini_cache_err",
+ "tmld_ld_long_message",
+};
+#else
+#define tmld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_a0 = {
+ 0, 6, tmld_int0_bb_a0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_a0_regs[1] = {
+ &tmld_int0_bb_a0,
+};
+
+static const u16 tmld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+ 0, 6, tmld_int0_bb_b0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+ &tmld_int0_bb_b0,
+};
+
+static const u16 tmld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_k2 = {
+ 0, 6, tmld_int0_k2_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_k2_regs[1] = {
+ &tmld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_prty_attn_desc[8] = {
+ "tmld_mem006_i_ecc_rf_int",
+ "tmld_mem002_i_ecc_rf_int",
+ "tmld_mem003_i_mem_prty",
+ "tmld_mem004_i_mem_prty",
+ "tmld_mem007_i_mem_prty",
+ "tmld_mem008_i_mem_prty",
+ "tmld_mem005_i_mem_prty",
+ "tmld_mem001_i_mem_prty",
+};
+#else
+#define tmld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_prty1_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_a0 = {
+ 0, 8, tmld_prty1_bb_a0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_a0_regs[1] = {
+ &tmld_prty1_bb_a0,
+};
+
+static const u16 tmld_prty1_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+ 0, 8, tmld_prty1_bb_b0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+ &tmld_prty1_bb_b0,
+};
+
+static const u16 tmld_prty1_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_k2 = {
+ 0, 8, tmld_prty1_k2_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_k2_regs[1] = {
+ &tmld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_int_attn_desc[6] = {
+ "muld_address_error",
+ "muld_ld_hdr_err",
+ "muld_ld_seg_msg_err",
+ "muld_ld_tid_mini_cache_err",
+ "muld_ld_cid_mini_cache_err",
+ "muld_ld_long_message",
+};
+#else
+#define muld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_a0 = {
+ 0, 6, muld_int0_bb_a0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_a0_regs[1] = {
+ &muld_int0_bb_a0,
+};
+
+static const u16 muld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+ 0, 6, muld_int0_bb_b0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+ &muld_int0_bb_b0,
+};
+
+static const u16 muld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_k2 = {
+ 0, 6, muld_int0_k2_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_k2_regs[1] = {
+ &muld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_prty_attn_desc[10] = {
+ "muld_mem005_i_ecc_rf_int",
+ "muld_mem001_i_ecc_rf_int",
+ "muld_mem008_i_ecc_rf_int",
+ "muld_mem007_i_ecc_rf_int",
+ "muld_mem002_i_mem_prty",
+ "muld_mem003_i_mem_prty",
+ "muld_mem009_i_mem_prty",
+ "muld_mem010_i_mem_prty",
+ "muld_mem004_i_mem_prty",
+ "muld_mem006_i_mem_prty",
+};
+#else
+#define muld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_a0 = {
+ 0, 10, muld_prty1_bb_a0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+ 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_a0_regs[1] = {
+ &muld_prty1_bb_a0,
+};
+
+static const u16 muld_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+ 0, 10, muld_prty1_bb_b0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+ 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+ &muld_prty1_bb_b0,
+};
+
+static const u16 muld_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_k2 = {
+ 0, 10, muld_prty1_k2_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_k2_regs[1] = {
+ &muld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_int_attn_desc[6] = {
+ "yuld_address_error",
+ "yuld_ld_hdr_err",
+ "yuld_ld_seg_msg_err",
+ "yuld_ld_tid_mini_cache_err",
+ "yuld_ld_cid_mini_cache_err",
+ "yuld_ld_long_message",
+};
+#else
+#define yuld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_a0 = {
+ 0, 6, yuld_int0_bb_a0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_a0_regs[1] = {
+ &yuld_int0_bb_a0,
+};
+
+static const u16 yuld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+ 0, 6, yuld_int0_bb_b0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+ &yuld_int0_bb_b0,
+};
+
+static const u16 yuld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_k2 = {
+ 0, 6, yuld_int0_k2_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_k2_regs[1] = {
+ &yuld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_prty_attn_desc[6] = {
+ "yuld_mem001_i_mem_prty",
+ "yuld_mem002_i_mem_prty",
+ "yuld_mem005_i_mem_prty",
+ "yuld_mem006_i_mem_prty",
+ "yuld_mem004_i_mem_prty",
+ "yuld_mem003_i_mem_prty",
+};
+#else
+#define yuld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_prty1_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_a0 = {
+ 0, 6, yuld_prty1_bb_a0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_a0_regs[1] = {
+ &yuld_prty1_bb_a0,
+};
+
+static const u16 yuld_prty1_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+ 0, 6, yuld_prty1_bb_b0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+ &yuld_prty1_bb_b0,
+};
+
+static const u16 yuld_prty1_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_k2 = {
+ 0, 6, yuld_prty1_k2_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_k2_regs[1] = {
+ &yuld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_int_attn_desc[6] = {
+ "xyld_address_error",
+ "xyld_ld_hdr_err",
+ "xyld_ld_seg_msg_err",
+ "xyld_ld_tid_mini_cache_err",
+ "xyld_ld_cid_mini_cache_err",
+ "xyld_ld_long_message",
+};
+#else
+#define xyld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_a0 = {
+ 0, 6, xyld_int0_bb_a0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_a0_regs[1] = {
+ &xyld_int0_bb_a0,
+};
+
+static const u16 xyld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+ 0, 6, xyld_int0_bb_b0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+ &xyld_int0_bb_b0,
+};
+
+static const u16 xyld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_k2 = {
+ 0, 6, xyld_int0_k2_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_k2_regs[1] = {
+ &xyld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_prty_attn_desc[9] = {
+ "xyld_mem004_i_ecc_rf_int",
+ "xyld_mem006_i_ecc_rf_int",
+ "xyld_mem001_i_mem_prty",
+ "xyld_mem002_i_mem_prty",
+ "xyld_mem008_i_mem_prty",
+ "xyld_mem009_i_mem_prty",
+ "xyld_mem003_i_mem_prty",
+ "xyld_mem005_i_mem_prty",
+ "xyld_mem007_i_mem_prty",
+};
+#else
+#define xyld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_a0 = {
+ 0, 9, xyld_prty1_bb_a0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_a0_regs[1] = {
+ &xyld_prty1_bb_a0,
+};
+
+static const u16 xyld_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+ 0, 9, xyld_prty1_bb_b0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+ &xyld_prty1_bb_b0,
+};
+
+static const u16 xyld_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_k2 = {
+ 0, 9, xyld_prty1_k2_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_k2_regs[1] = {
+ &xyld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_int_attn_desc[11] = {
+ "prm_address_error",
+ "prm_ififo_error",
+ "prm_immed_fifo_error",
+ "prm_ofst_pend_error",
+ "prm_pad_pend_error",
+ "prm_pbinp_pend_error",
+ "prm_tag_pend_error",
+ "prm_mstorm_eop_err",
+ "prm_ustorm_eop_err",
+ "prm_mstorm_que_err",
+ "prm_ustorm_que_err",
+};
+#else
+#define prm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_a0 = {
+ 0, 11, prm_int0_bb_a0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_a0_regs[1] = {
+ &prm_int0_bb_a0,
+};
+
+static const u16 prm_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+ 0, 11, prm_int0_bb_b0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+ &prm_int0_bb_b0,
+};
+
+static const u16 prm_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_k2 = {
+ 0, 11, prm_int0_k2_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_k2_regs[1] = {
+ &prm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_prty_attn_desc[30] = {
+ "prm_datapath_registers",
+ "prm_mem012_i_ecc_rf_int",
+ "prm_mem013_i_ecc_rf_int",
+ "prm_mem014_i_ecc_rf_int",
+ "prm_mem020_i_ecc_rf_int",
+ "prm_mem004_i_mem_prty",
+ "prm_mem024_i_mem_prty",
+ "prm_mem016_i_mem_prty",
+ "prm_mem017_i_mem_prty",
+ "prm_mem008_i_mem_prty",
+ "prm_mem009_i_mem_prty",
+ "prm_mem010_i_mem_prty",
+ "prm_mem015_i_mem_prty",
+ "prm_mem011_i_mem_prty",
+ "prm_mem003_i_mem_prty",
+ "prm_mem002_i_mem_prty",
+ "prm_mem005_i_mem_prty",
+ "prm_mem023_i_mem_prty",
+ "prm_mem006_i_mem_prty",
+ "prm_mem007_i_mem_prty",
+ "prm_mem001_i_mem_prty",
+ "prm_mem022_i_mem_prty",
+ "prm_mem021_i_mem_prty",
+ "prm_mem019_i_mem_prty",
+ "prm_mem015_i_ecc_rf_int",
+ "prm_mem021_i_ecc_rf_int",
+ "prm_mem025_i_mem_prty",
+ "prm_mem018_i_mem_prty",
+ "prm_mem012_i_mem_prty",
+ "prm_mem020_i_mem_prty",
+};
+#else
+#define prm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_prty1_bb_a0_attn_idx[25] = {
+ 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24,
+ 25, 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_a0 = {
+ 0, 25, prm_prty1_bb_a0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_a0_regs[1] = {
+ &prm_prty1_bb_a0,
+};
+
+static const u16 prm_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+ 0, 1, prm_prty0_bb_b0_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_bb_b0_attn_idx[24] = {
+ 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24, 25,
+ 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+ 1, 24, prm_prty1_bb_b0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+ &prm_prty0_bb_b0, &prm_prty1_bb_b0,
+};
+
+static const u16 prm_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prm_prty0_k2 = {
+ 0, 1, prm_prty0_k2_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_k2_attn_idx[23] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23,
+};
+
+static struct attn_hw_reg prm_prty1_k2 = {
+ 1, 23, prm_prty1_k2_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_k2_regs[2] = {
+ &prm_prty0_k2, &prm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_int_attn_desc[9] = {
+ "pbf_pb1_address_error",
+ "pbf_pb1_eop_error",
+ "pbf_pb1_ififo_error",
+ "pbf_pb1_pfifo_error",
+ "pbf_pb1_db_buf_error",
+ "pbf_pb1_th_exec_error",
+ "pbf_pb1_tq_error_wr",
+ "pbf_pb1_tq_error_rd_th",
+ "pbf_pb1_tq_error_rd_ih",
+};
+#else
+#define pbf_pb1_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_a0 = {
+ 0, 9, pbf_pb1_int0_bb_a0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+ 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_a0_regs[1] = {
+ &pbf_pb1_int0_bb_a0,
+};
+
+static const u16 pbf_pb1_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+ 0, 9, pbf_pb1_int0_bb_b0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+ 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+ &pbf_pb1_int0_bb_b0,
+};
+
+static const u16 pbf_pb1_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_k2 = {
+ 0, 9, pbf_pb1_int0_k2_attn_idx, 0xda0040, 0xda004c, 0xda0048, 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_k2_regs[1] = {
+ &pbf_pb1_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_prty_attn_desc[1] = {
+ "pbf_pb1_datapath_registers",
+};
+#else
+#define pbf_pb1_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+ 0, 1, pbf_pb1_prty0_bb_b0_attn_idx, 0xda0050, 0xda005c, 0xda0058,
+ 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+ &pbf_pb1_prty0_bb_b0,
+};
+
+static const u16 pbf_pb1_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_k2 = {
+ 0, 1, pbf_pb1_prty0_k2_attn_idx, 0xda0050, 0xda005c, 0xda0058, 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_k2_regs[1] = {
+ &pbf_pb1_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_int_attn_desc[9] = {
+ "pbf_pb2_address_error",
+ "pbf_pb2_eop_error",
+ "pbf_pb2_ififo_error",
+ "pbf_pb2_pfifo_error",
+ "pbf_pb2_db_buf_error",
+ "pbf_pb2_th_exec_error",
+ "pbf_pb2_tq_error_wr",
+ "pbf_pb2_tq_error_rd_th",
+ "pbf_pb2_tq_error_rd_ih",
+};
+#else
+#define pbf_pb2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_a0 = {
+ 0, 9, pbf_pb2_int0_bb_a0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+ 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_a0_regs[1] = {
+ &pbf_pb2_int0_bb_a0,
+};
+
+static const u16 pbf_pb2_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+ 0, 9, pbf_pb2_int0_bb_b0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+ 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+ &pbf_pb2_int0_bb_b0,
+};
+
+static const u16 pbf_pb2_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_k2 = {
+ 0, 9, pbf_pb2_int0_k2_attn_idx, 0xda4040, 0xda404c, 0xda4048, 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_k2_regs[1] = {
+ &pbf_pb2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_prty_attn_desc[1] = {
+ "pbf_pb2_datapath_registers",
+};
+#else
+#define pbf_pb2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+ 0, 1, pbf_pb2_prty0_bb_b0_attn_idx, 0xda4050, 0xda405c, 0xda4058,
+ 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+ &pbf_pb2_prty0_bb_b0,
+};
+
+static const u16 pbf_pb2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_k2 = {
+ 0, 1, pbf_pb2_prty0_k2_attn_idx, 0xda4050, 0xda405c, 0xda4058, 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_k2_regs[1] = {
+ &pbf_pb2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_int_attn_desc[9] = {
+ "rpb_address_error",
+ "rpb_eop_error",
+ "rpb_ififo_error",
+ "rpb_pfifo_error",
+ "rpb_db_buf_error",
+ "rpb_th_exec_error",
+ "rpb_tq_error_wr",
+ "rpb_tq_error_rd_th",
+ "rpb_tq_error_rd_ih",
+};
+#else
+#define rpb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_a0 = {
+ 0, 9, rpb_int0_bb_a0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_a0_regs[1] = {
+ &rpb_int0_bb_a0,
+};
+
+static const u16 rpb_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+ 0, 9, rpb_int0_bb_b0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+ &rpb_int0_bb_b0,
+};
+
+static const u16 rpb_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_k2 = {
+ 0, 9, rpb_int0_k2_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_k2_regs[1] = {
+ &rpb_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_prty_attn_desc[1] = {
+ "rpb_datapath_registers",
+};
+#else
+#define rpb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+ 0, 1, rpb_prty0_bb_b0_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+ &rpb_prty0_bb_b0,
+};
+
+static const u16 rpb_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg rpb_prty0_k2 = {
+ 0, 1, rpb_prty0_k2_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_k2_regs[1] = {
+ &rpb_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_int_attn_desc[139] = {
+ "btb_address_error",
+ "btb_rc_pkt0_rls_error",
+ "btb_unused_0",
+ "btb_rc_pkt0_len_error",
+ "btb_unused_1",
+ "btb_rc_pkt0_protocol_error",
+ "btb_rc_pkt1_rls_error",
+ "btb_unused_2",
+ "btb_rc_pkt1_len_error",
+ "btb_unused_3",
+ "btb_rc_pkt1_protocol_error",
+ "btb_rc_pkt2_rls_error",
+ "btb_unused_4",
+ "btb_rc_pkt2_len_error",
+ "btb_unused_5",
+ "btb_rc_pkt2_protocol_error",
+ "btb_rc_pkt3_rls_error",
+ "btb_unused_6",
+ "btb_rc_pkt3_len_error",
+ "btb_unused_7",
+ "btb_rc_pkt3_protocol_error",
+ "btb_rc_sop_req_tc_port_error",
+ "btb_unused_8",
+ "btb_wc0_protocol_error",
+ "btb_unused_9",
+ "btb_ll_blk_error",
+ "btb_ll_arb_calc_error",
+ "btb_fc_alm_calc_error",
+ "btb_wc0_inp_fifo_error",
+ "btb_wc0_sop_fifo_error",
+ "btb_wc0_len_fifo_error",
+ "btb_wc0_eop_fifo_error",
+ "btb_wc0_queue_fifo_error",
+ "btb_wc0_free_point_fifo_error",
+ "btb_wc0_next_point_fifo_error",
+ "btb_wc0_strt_fifo_error",
+ "btb_wc0_second_dscr_fifo_error",
+ "btb_wc0_pkt_avail_fifo_error",
+ "btb_wc0_notify_fifo_error",
+ "btb_wc0_ll_req_fifo_error",
+ "btb_wc0_ll_pa_cnt_error",
+ "btb_wc0_bb_pa_cnt_error",
+ "btb_wc_dup_upd_data_fifo_error",
+ "btb_wc_dup_rsp_dscr_fifo_error",
+ "btb_wc_dup_upd_point_fifo_error",
+ "btb_wc_dup_pkt_avail_fifo_error",
+ "btb_wc_dup_pkt_avail_cnt_error",
+ "btb_rc_pkt0_side_fifo_error",
+ "btb_rc_pkt0_req_fifo_error",
+ "btb_rc_pkt0_blk_fifo_error",
+ "btb_rc_pkt0_rls_left_fifo_error",
+ "btb_rc_pkt0_strt_ptr_fifo_error",
+ "btb_rc_pkt0_second_ptr_fifo_error",
+ "btb_rc_pkt0_rsp_fifo_error",
+ "btb_rc_pkt0_dscr_fifo_error",
+ "btb_rc_pkt1_side_fifo_error",
+ "btb_rc_pkt1_req_fifo_error",
+ "btb_rc_pkt1_blk_fifo_error",
+ "btb_rc_pkt1_rls_left_fifo_error",
+ "btb_rc_pkt1_strt_ptr_fifo_error",
+ "btb_rc_pkt1_second_ptr_fifo_error",
+ "btb_rc_pkt1_rsp_fifo_error",
+ "btb_rc_pkt1_dscr_fifo_error",
+ "btb_rc_pkt2_side_fifo_error",
+ "btb_rc_pkt2_req_fifo_error",
+ "btb_rc_pkt2_blk_fifo_error",
+ "btb_rc_pkt2_rls_left_fifo_error",
+ "btb_rc_pkt2_strt_ptr_fifo_error",
+ "btb_rc_pkt2_second_ptr_fifo_error",
+ "btb_rc_pkt2_rsp_fifo_error",
+ "btb_rc_pkt2_dscr_fifo_error",
+ "btb_rc_pkt3_side_fifo_error",
+ "btb_rc_pkt3_req_fifo_error",
+ "btb_rc_pkt3_blk_fifo_error",
+ "btb_rc_pkt3_rls_left_fifo_error",
+ "btb_rc_pkt3_strt_ptr_fifo_error",
+ "btb_rc_pkt3_second_ptr_fifo_error",
+ "btb_rc_pkt3_rsp_fifo_error",
+ "btb_rc_pkt3_dscr_fifo_error",
+ "btb_rc_sop_queue_fifo_error",
+ "btb_ll_arb_rls_fifo_error",
+ "btb_ll_arb_prefetch_fifo_error",
+ "btb_rc_pkt0_rls_fifo_error",
+ "btb_rc_pkt1_rls_fifo_error",
+ "btb_rc_pkt2_rls_fifo_error",
+ "btb_rc_pkt3_rls_fifo_error",
+ "btb_rc_pkt4_rls_fifo_error",
+ "btb_rc_pkt5_rls_fifo_error",
+ "btb_rc_pkt6_rls_fifo_error",
+ "btb_rc_pkt7_rls_fifo_error",
+ "btb_rc_pkt4_rls_error",
+ "btb_rc_pkt4_len_error",
+ "btb_rc_pkt4_protocol_error",
+ "btb_rc_pkt4_side_fifo_error",
+ "btb_rc_pkt4_req_fifo_error",
+ "btb_rc_pkt4_blk_fifo_error",
+ "btb_rc_pkt4_rls_left_fifo_error",
+ "btb_rc_pkt4_strt_ptr_fifo_error",
+ "btb_rc_pkt4_second_ptr_fifo_error",
+ "btb_rc_pkt4_rsp_fifo_error",
+ "btb_rc_pkt4_dscr_fifo_error",
+ "btb_rc_pkt5_rls_error",
+ "btb_rc_pkt5_len_error",
+ "btb_rc_pkt5_protocol_error",
+ "btb_rc_pkt5_side_fifo_error",
+ "btb_rc_pkt5_req_fifo_error",
+ "btb_rc_pkt5_blk_fifo_error",
+ "btb_rc_pkt5_rls_left_fifo_error",
+ "btb_rc_pkt5_strt_ptr_fifo_error",
+ "btb_rc_pkt5_second_ptr_fifo_error",
+ "btb_rc_pkt5_rsp_fifo_error",
+ "btb_rc_pkt5_dscr_fifo_error",
+ "btb_rc_pkt6_rls_error",
+ "btb_rc_pkt6_len_error",
+ "btb_rc_pkt6_protocol_error",
+ "btb_rc_pkt6_side_fifo_error",
+ "btb_rc_pkt6_req_fifo_error",
+ "btb_rc_pkt6_blk_fifo_error",
+ "btb_rc_pkt6_rls_left_fifo_error",
+ "btb_rc_pkt6_strt_ptr_fifo_error",
+ "btb_rc_pkt6_second_ptr_fifo_error",
+ "btb_rc_pkt6_rsp_fifo_error",
+ "btb_rc_pkt6_dscr_fifo_error",
+ "btb_rc_pkt7_rls_error",
+ "btb_rc_pkt7_len_error",
+ "btb_rc_pkt7_protocol_error",
+ "btb_rc_pkt7_side_fifo_error",
+ "btb_rc_pkt7_req_fifo_error",
+ "btb_rc_pkt7_blk_fifo_error",
+ "btb_rc_pkt7_rls_left_fifo_error",
+ "btb_rc_pkt7_strt_ptr_fifo_error",
+ "btb_rc_pkt7_second_ptr_fifo_error",
+ "btb_rc_pkt7_rsp_fifo_error",
+ "btb_packet_available_sync_fifo_push_error",
+ "btb_wc6_notify_fifo_error",
+ "btb_wc9_queue_fifo_error",
+ "btb_wc0_sync_fifo_push_error",
+ "btb_rls_sync_fifo_push_error",
+ "btb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define btb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_a0 = {
+ 0, 16, btb_int0_bb_a0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_a0_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_a0 = {
+ 1, 16, btb_int1_bb_a0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_a0_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_a0 = {
+ 2, 4, btb_int2_bb_a0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_a0_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_a0 = {
+ 3, 32, btb_int3_bb_a0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_a0_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_a0 = {
+ 4, 23, btb_int4_bb_a0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_a0_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_bb_a0 = {
+ 5, 32, btb_int5_bb_a0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_a0_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_bb_a0 = {
+ 6, 1, btb_int6_bb_a0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_a0_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_bb_a0 = {
+ 7, 1, btb_int8_bb_a0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_a0_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_bb_a0 = {
+ 8, 1, btb_int9_bb_a0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_a0_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_bb_a0 = {
+ 9, 1, btb_int10_bb_a0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_a0_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_a0 = {
+ 10, 2, btb_int11_bb_a0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_a0_regs[11] = {
+ &btb_int0_bb_a0, &btb_int1_bb_a0, &btb_int2_bb_a0, &btb_int3_bb_a0,
+ &btb_int4_bb_a0, &btb_int5_bb_a0, &btb_int6_bb_a0, &btb_int8_bb_a0,
+ &btb_int9_bb_a0, &btb_int10_bb_a0,
+ &btb_int11_bb_a0,
+};
+
+static const u16 btb_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+ 0, 16, btb_int0_bb_b0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_b0_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+ 1, 16, btb_int1_bb_b0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_b0_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+ 2, 4, btb_int2_bb_b0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_b0_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+ 3, 32, btb_int3_bb_b0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_b0_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+ 4, 23, btb_int4_bb_b0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_b0_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+ 5, 32, btb_int5_bb_b0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_b0_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+ 6, 1, btb_int6_bb_b0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_b0_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+ 7, 1, btb_int8_bb_b0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_b0_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+ 8, 1, btb_int9_bb_b0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_b0_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+ 9, 1, btb_int10_bb_b0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_b0_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+ 10, 2, btb_int11_bb_b0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+ &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+ &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+ &btb_int9_bb_b0, &btb_int10_bb_b0,
+ &btb_int11_bb_b0,
+};
+
+static const u16 btb_int0_k2_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_k2 = {
+ 0, 16, btb_int0_k2_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_k2_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_k2 = {
+ 1, 16, btb_int1_k2_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_k2_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_k2 = {
+ 2, 4, btb_int2_k2_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_k2_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_k2 = {
+ 3, 32, btb_int3_k2_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_k2_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_k2 = {
+ 4, 23, btb_int4_k2_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_k2_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_k2 = {
+ 5, 32, btb_int5_k2_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_k2_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_k2 = {
+ 6, 1, btb_int6_k2_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_k2_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_k2 = {
+ 7, 1, btb_int8_k2_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_k2_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_k2 = {
+ 8, 1, btb_int9_k2_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_k2_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_k2 = {
+ 9, 1, btb_int10_k2_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_k2_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_k2 = {
+ 10, 2, btb_int11_k2_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_k2_regs[11] = {
+ &btb_int0_k2, &btb_int1_k2, &btb_int2_k2, &btb_int3_k2, &btb_int4_k2,
+ &btb_int5_k2, &btb_int6_k2, &btb_int8_k2, &btb_int9_k2, &btb_int10_k2,
+ &btb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_prty_attn_desc[36] = {
+ "btb_ll_bank0_mem_prty",
+ "btb_ll_bank1_mem_prty",
+ "btb_ll_bank2_mem_prty",
+ "btb_ll_bank3_mem_prty",
+ "btb_datapath_registers",
+ "btb_mem001_i_ecc_rf_int",
+ "btb_mem008_i_ecc_rf_int",
+ "btb_mem009_i_ecc_rf_int",
+ "btb_mem010_i_ecc_rf_int",
+ "btb_mem011_i_ecc_rf_int",
+ "btb_mem012_i_ecc_rf_int",
+ "btb_mem013_i_ecc_rf_int",
+ "btb_mem014_i_ecc_rf_int",
+ "btb_mem015_i_ecc_rf_int",
+ "btb_mem016_i_ecc_rf_int",
+ "btb_mem002_i_ecc_rf_int",
+ "btb_mem003_i_ecc_rf_int",
+ "btb_mem004_i_ecc_rf_int",
+ "btb_mem005_i_ecc_rf_int",
+ "btb_mem006_i_ecc_rf_int",
+ "btb_mem007_i_ecc_rf_int",
+ "btb_mem033_i_mem_prty",
+ "btb_mem035_i_mem_prty",
+ "btb_mem034_i_mem_prty",
+ "btb_mem032_i_mem_prty",
+ "btb_mem031_i_mem_prty",
+ "btb_mem021_i_mem_prty",
+ "btb_mem022_i_mem_prty",
+ "btb_mem023_i_mem_prty",
+ "btb_mem024_i_mem_prty",
+ "btb_mem025_i_mem_prty",
+ "btb_mem026_i_mem_prty",
+ "btb_mem027_i_mem_prty",
+ "btb_mem028_i_mem_prty",
+ "btb_mem030_i_mem_prty",
+ "btb_mem029_i_mem_prty",
+};
+#else
+#define btb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_prty1_bb_a0_attn_idx[27] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 26, 27,
+ 28,
+ 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_a0 = {
+ 0, 27, btb_prty1_bb_a0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_a0_regs[1] = {
+ &btb_prty1_bb_a0,
+};
+
+static const u16 btb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+ 0, 5, btb_prty0_bb_b0_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_bb_b0_attn_idx[23] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 31,
+ 32,
+ 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+ 1, 23, btb_prty1_bb_b0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+ &btb_prty0_bb_b0, &btb_prty1_bb_b0,
+};
+
+static const u16 btb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_k2 = {
+ 0, 5, btb_prty0_k2_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_k2 = {
+ 1, 31, btb_prty1_k2_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_k2_regs[2] = {
+ &btb_prty0_k2, &btb_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_int_attn_desc[1] = {
+ "pbf_address_error",
+};
+#else
+#define pbf_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_a0 = {
+ 0, 1, pbf_int0_bb_a0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_a0_regs[1] = {
+ &pbf_int0_bb_a0,
+};
+
+static const u16 pbf_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+ 0, 1, pbf_int0_bb_b0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+ &pbf_int0_bb_b0,
+};
+
+static const u16 pbf_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_k2 = {
+ 0, 1, pbf_int0_k2_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_k2_regs[1] = {
+ &pbf_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_prty_attn_desc[59] = {
+ "pbf_datapath_registers",
+ "pbf_mem041_i_ecc_rf_int",
+ "pbf_mem042_i_ecc_rf_int",
+ "pbf_mem033_i_ecc_rf_int",
+ "pbf_mem003_i_ecc_rf_int",
+ "pbf_mem018_i_ecc_rf_int",
+ "pbf_mem009_i_ecc_0_rf_int",
+ "pbf_mem009_i_ecc_1_rf_int",
+ "pbf_mem012_i_ecc_0_rf_int",
+ "pbf_mem012_i_ecc_1_rf_int",
+ "pbf_mem012_i_ecc_2_rf_int",
+ "pbf_mem012_i_ecc_3_rf_int",
+ "pbf_mem012_i_ecc_4_rf_int",
+ "pbf_mem012_i_ecc_5_rf_int",
+ "pbf_mem012_i_ecc_6_rf_int",
+ "pbf_mem012_i_ecc_7_rf_int",
+ "pbf_mem012_i_ecc_8_rf_int",
+ "pbf_mem012_i_ecc_9_rf_int",
+ "pbf_mem012_i_ecc_10_rf_int",
+ "pbf_mem012_i_ecc_11_rf_int",
+ "pbf_mem012_i_ecc_12_rf_int",
+ "pbf_mem012_i_ecc_13_rf_int",
+ "pbf_mem012_i_ecc_14_rf_int",
+ "pbf_mem012_i_ecc_15_rf_int",
+ "pbf_mem040_i_mem_prty",
+ "pbf_mem039_i_mem_prty",
+ "pbf_mem038_i_mem_prty",
+ "pbf_mem034_i_mem_prty",
+ "pbf_mem032_i_mem_prty",
+ "pbf_mem031_i_mem_prty",
+ "pbf_mem030_i_mem_prty",
+ "pbf_mem029_i_mem_prty",
+ "pbf_mem022_i_mem_prty",
+ "pbf_mem023_i_mem_prty",
+ "pbf_mem021_i_mem_prty",
+ "pbf_mem020_i_mem_prty",
+ "pbf_mem001_i_mem_prty",
+ "pbf_mem002_i_mem_prty",
+ "pbf_mem006_i_mem_prty",
+ "pbf_mem007_i_mem_prty",
+ "pbf_mem005_i_mem_prty",
+ "pbf_mem004_i_mem_prty",
+ "pbf_mem028_i_mem_prty",
+ "pbf_mem026_i_mem_prty",
+ "pbf_mem027_i_mem_prty",
+ "pbf_mem019_i_mem_prty",
+ "pbf_mem016_i_mem_prty",
+ "pbf_mem017_i_mem_prty",
+ "pbf_mem008_i_mem_prty",
+ "pbf_mem011_i_mem_prty",
+ "pbf_mem010_i_mem_prty",
+ "pbf_mem024_i_mem_prty",
+ "pbf_mem025_i_mem_prty",
+ "pbf_mem037_i_mem_prty",
+ "pbf_mem036_i_mem_prty",
+ "pbf_mem035_i_mem_prty",
+ "pbf_mem014_i_mem_prty",
+ "pbf_mem015_i_mem_prty",
+ "pbf_mem013_i_mem_prty",
+};
+#else
+#define pbf_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_a0 = {
+ 0, 31, pbf_prty1_bb_a0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_a0_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_a0 = {
+ 1, 27, pbf_prty2_bb_a0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_a0_regs[2] = {
+ &pbf_prty1_bb_a0, &pbf_prty2_bb_a0,
+};
+
+static const u16 pbf_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+ 0, 1, pbf_prty0_bb_b0_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+ 1, 31, pbf_prty1_bb_b0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_b0_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+ 2, 27, pbf_prty2_bb_b0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+ &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0,
+};
+
+static const u16 pbf_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_prty0_k2 = {
+ 0, 1, pbf_prty0_k2_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_k2 = {
+ 1, 31, pbf_prty1_k2_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_k2_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_k2 = {
+ 2, 27, pbf_prty2_k2_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_k2_regs[3] = {
+ &pbf_prty0_k2, &pbf_prty1_k2, &pbf_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_int_attn_desc[9] = {
+ "rdif_address_error",
+ "rdif_fatal_dix_err",
+ "rdif_fatal_config_err",
+ "rdif_cmd_fifo_err",
+ "rdif_order_fifo_err",
+ "rdif_rdata_fifo_err",
+ "rdif_dif_stop_err",
+ "rdif_partial_dif_w_eob",
+ "rdif_l1_dirty_bit",
+};
+#else
+#define rdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_a0 = {
+ 0, 8, rdif_int0_bb_a0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_a0_regs[1] = {
+ &rdif_int0_bb_a0,
+};
+
+static const u16 rdif_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+ 0, 8, rdif_int0_bb_b0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+ &rdif_int0_bb_b0,
+};
+
+static const u16 rdif_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rdif_int0_k2 = {
+ 0, 9, rdif_int0_k2_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_k2_regs[1] = {
+ &rdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_prty_attn_desc[2] = {
+ "rdif_unused_0",
+ "rdif_datapath_registers",
+};
+#else
+#define rdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_prty0_bb_b0_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+ 0, 1, rdif_prty0_bb_b0_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+ &rdif_prty0_bb_b0,
+};
+
+static const u16 rdif_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg rdif_prty0_k2 = {
+ 0, 1, rdif_prty0_k2_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_k2_regs[1] = {
+ &rdif_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_int_attn_desc[9] = {
+ "tdif_address_error",
+ "tdif_fatal_dix_err",
+ "tdif_fatal_config_err",
+ "tdif_cmd_fifo_err",
+ "tdif_order_fifo_err",
+ "tdif_rdata_fifo_err",
+ "tdif_dif_stop_err",
+ "tdif_partial_dif_w_eob",
+ "tdif_l1_dirty_bit",
+};
+#else
+#define tdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_a0 = {
+ 0, 8, tdif_int0_bb_a0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_a0_regs[1] = {
+ &tdif_int0_bb_a0,
+};
+
+static const u16 tdif_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+ 0, 8, tdif_int0_bb_b0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+ &tdif_int0_bb_b0,
+};
+
+static const u16 tdif_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tdif_int0_k2 = {
+ 0, 9, tdif_int0_k2_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_k2_regs[1] = {
+ &tdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_prty_attn_desc[13] = {
+ "tdif_unused_0",
+ "tdif_datapath_registers",
+ "tdif_mem005_i_ecc_rf_int",
+ "tdif_mem009_i_ecc_rf_int",
+ "tdif_mem010_i_ecc_rf_int",
+ "tdif_mem011_i_ecc_rf_int",
+ "tdif_mem001_i_mem_prty",
+ "tdif_mem003_i_mem_prty",
+ "tdif_mem002_i_mem_prty",
+ "tdif_mem006_i_mem_prty",
+ "tdif_mem007_i_mem_prty",
+ "tdif_mem008_i_mem_prty",
+ "tdif_mem004_i_mem_prty",
+};
+#else
+#define tdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_prty1_bb_a0_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_a0 = {
+ 0, 11, tdif_prty1_bb_a0_attn_idx, 0x310200, 0x31020c, 0x310208,
+ 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_a0_regs[1] = {
+ &tdif_prty1_bb_a0,
+};
+
+static const u16 tdif_prty0_bb_b0_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+ 0, 1, tdif_prty0_bb_b0_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_bb_b0_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+ 1, 11, tdif_prty1_bb_b0_attn_idx, 0x310200, 0x31020c, 0x310208,
+ 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+ &tdif_prty0_bb_b0, &tdif_prty1_bb_b0,
+};
+
+static const u16 tdif_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg tdif_prty0_k2 = {
+ 0, 1, tdif_prty0_k2_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_k2_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_k2 = {
+ 1, 11, tdif_prty1_k2_attn_idx, 0x310200, 0x31020c, 0x310208, 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_k2_regs[2] = {
+ &tdif_prty0_k2, &tdif_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_int_attn_desc[8] = {
+ "cdu_address_error",
+ "cdu_ccfc_ld_l1_num_error",
+ "cdu_tcfc_ld_l1_num_error",
+ "cdu_ccfc_wb_l1_num_error",
+ "cdu_tcfc_wb_l1_num_error",
+ "cdu_ccfc_cvld_error",
+ "cdu_tcfc_cvld_error",
+ "cdu_bvalid_error",
+};
+#else
+#define cdu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_a0 = {
+ 0, 8, cdu_int0_bb_a0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_a0_regs[1] = {
+ &cdu_int0_bb_a0,
+};
+
+static const u16 cdu_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+ 0, 8, cdu_int0_bb_b0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+ &cdu_int0_bb_b0,
+};
+
+static const u16 cdu_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_k2 = {
+ 0, 8, cdu_int0_k2_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_k2_regs[1] = {
+ &cdu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_prty_attn_desc[5] = {
+ "cdu_mem001_i_mem_prty",
+ "cdu_mem004_i_mem_prty",
+ "cdu_mem002_i_mem_prty",
+ "cdu_mem005_i_mem_prty",
+ "cdu_mem003_i_mem_prty",
+};
+#else
+#define cdu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_prty1_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_a0 = {
+ 0, 5, cdu_prty1_bb_a0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_a0_regs[1] = {
+ &cdu_prty1_bb_a0,
+};
+
+static const u16 cdu_prty1_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+ 0, 5, cdu_prty1_bb_b0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+ &cdu_prty1_bb_b0,
+};
+
+static const u16 cdu_prty1_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_k2 = {
+ 0, 5, cdu_prty1_k2_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_k2_regs[1] = {
+ &cdu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_int_attn_desc[2] = {
+ "ccfc_address_error",
+ "ccfc_exe_error",
+};
+#else
+#define ccfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_a0 = {
+ 0, 2, ccfc_int0_bb_a0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_a0_regs[1] = {
+ &ccfc_int0_bb_a0,
+};
+
+static const u16 ccfc_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+ 0, 2, ccfc_int0_bb_b0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+ &ccfc_int0_bb_b0,
+};
+
+static const u16 ccfc_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_k2 = {
+ 0, 2, ccfc_int0_k2_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_k2_regs[1] = {
+ &ccfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_prty_attn_desc[10] = {
+ "ccfc_mem001_i_ecc_rf_int",
+ "ccfc_mem003_i_mem_prty",
+ "ccfc_mem007_i_mem_prty",
+ "ccfc_mem006_i_mem_prty",
+ "ccfc_ccam_par_err",
+ "ccfc_scam_par_err",
+ "ccfc_lc_que_ram_porta_lsb_par_err",
+ "ccfc_lc_que_ram_porta_msb_par_err",
+ "ccfc_lc_que_ram_portb_lsb_par_err",
+ "ccfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define ccfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_a0 = {
+ 0, 4, ccfc_prty1_bb_a0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_a0_attn_idx[2] = {
+ 4, 5,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_a0 = {
+ 1, 2, ccfc_prty0_bb_a0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_a0_regs[2] = {
+ &ccfc_prty1_bb_a0, &ccfc_prty0_bb_a0,
+};
+
+static const u16 ccfc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+ 0, 2, ccfc_prty1_bb_b0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_b0_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+ 1, 6, ccfc_prty0_bb_b0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+ &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0,
+};
+
+static const u16 ccfc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_k2 = {
+ 0, 2, ccfc_prty1_k2_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_k2_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_k2 = {
+ 1, 6, ccfc_prty0_k2_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_k2_regs[2] = {
+ &ccfc_prty1_k2, &ccfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_int_attn_desc[2] = {
+ "tcfc_address_error",
+ "tcfc_exe_error",
+};
+#else
+#define tcfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_a0 = {
+ 0, 2, tcfc_int0_bb_a0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_a0_regs[1] = {
+ &tcfc_int0_bb_a0,
+};
+
+static const u16 tcfc_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+ 0, 2, tcfc_int0_bb_b0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+ &tcfc_int0_bb_b0,
+};
+
+static const u16 tcfc_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_k2 = {
+ 0, 2, tcfc_int0_k2_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_k2_regs[1] = {
+ &tcfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_prty_attn_desc[10] = {
+ "tcfc_mem002_i_mem_prty",
+ "tcfc_mem001_i_mem_prty",
+ "tcfc_mem006_i_mem_prty",
+ "tcfc_mem005_i_mem_prty",
+ "tcfc_ccam_par_err",
+ "tcfc_scam_par_err",
+ "tcfc_lc_que_ram_porta_lsb_par_err",
+ "tcfc_lc_que_ram_porta_msb_par_err",
+ "tcfc_lc_que_ram_portb_lsb_par_err",
+ "tcfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define tcfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_a0 = {
+ 0, 4, tcfc_prty1_bb_a0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_a0_attn_idx[2] = {
+ 4, 5,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_a0 = {
+ 1, 2, tcfc_prty0_bb_a0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_a0_regs[2] = {
+ &tcfc_prty1_bb_a0, &tcfc_prty0_bb_a0,
+};
+
+static const u16 tcfc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+ 0, 2, tcfc_prty1_bb_b0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_b0_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+ 1, 6, tcfc_prty0_bb_b0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+ &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0,
+};
+
+static const u16 tcfc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_k2 = {
+ 0, 2, tcfc_prty1_k2_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_k2_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_k2 = {
+ 1, 6, tcfc_prty0_k2_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_k2_regs[2] = {
+ &tcfc_prty1_k2, &tcfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_int_attn_desc[11] = {
+ "igu_address_error",
+ "igu_ctrl_fifo_error_err",
+ "igu_pxp_req_length_too_big",
+ "igu_host_tries2access_prod_upd",
+ "igu_vf_tries2acc_attn_cmd",
+ "igu_mme_bigger_then_5",
+ "igu_sb_index_is_not_valid",
+ "igu_durin_int_read_with_simd_dis",
+ "igu_cmd_fid_not_match",
+ "igu_segment_access_invalid",
+ "igu_attn_prod_acc",
+};
+#else
+#define igu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_a0 = {
+ 0, 11, igu_int0_bb_a0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_a0_regs[1] = {
+ &igu_int0_bb_a0,
+};
+
+static const u16 igu_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+ 0, 11, igu_int0_bb_b0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+ &igu_int0_bb_b0,
+};
+
+static const u16 igu_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_k2 = {
+ 0, 11, igu_int0_k2_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_k2_regs[1] = {
+ &igu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_prty_attn_desc[42] = {
+ "igu_cam_parity",
+ "igu_mem009_i_ecc_rf_int",
+ "igu_mem015_i_mem_prty",
+ "igu_mem016_i_mem_prty",
+ "igu_mem017_i_mem_prty",
+ "igu_mem018_i_mem_prty",
+ "igu_mem019_i_mem_prty",
+ "igu_mem001_i_mem_prty",
+ "igu_mem002_i_mem_prty_0",
+ "igu_mem002_i_mem_prty_1",
+ "igu_mem004_i_mem_prty_0",
+ "igu_mem004_i_mem_prty_1",
+ "igu_mem004_i_mem_prty_2",
+ "igu_mem003_i_mem_prty",
+ "igu_mem005_i_mem_prty",
+ "igu_mem006_i_mem_prty_0",
+ "igu_mem006_i_mem_prty_1",
+ "igu_mem008_i_mem_prty_0",
+ "igu_mem008_i_mem_prty_1",
+ "igu_mem008_i_mem_prty_2",
+ "igu_mem007_i_mem_prty",
+ "igu_mem010_i_mem_prty_0",
+ "igu_mem010_i_mem_prty_1",
+ "igu_mem012_i_mem_prty_0",
+ "igu_mem012_i_mem_prty_1",
+ "igu_mem012_i_mem_prty_2",
+ "igu_mem011_i_mem_prty",
+ "igu_mem013_i_mem_prty",
+ "igu_mem014_i_mem_prty",
+ "igu_mem020_i_mem_prty",
+ "igu_mem003_i_mem_prty_0",
+ "igu_mem003_i_mem_prty_1",
+ "igu_mem003_i_mem_prty_2",
+ "igu_mem002_i_mem_prty",
+ "igu_mem007_i_mem_prty_0",
+ "igu_mem007_i_mem_prty_1",
+ "igu_mem007_i_mem_prty_2",
+ "igu_mem006_i_mem_prty",
+ "igu_mem010_i_mem_prty_2",
+ "igu_mem010_i_mem_prty_3",
+ "igu_mem013_i_mem_prty_0",
+ "igu_mem013_i_mem_prty_1",
+};
+#else
+#define igu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_a0 = {
+ 0, 1, igu_prty0_bb_a0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_a0_attn_idx[31] = {
+ 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+ 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_a0 = {
+ 1, 31, igu_prty1_bb_a0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_a0_attn_idx[1] = {
+ 2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_a0 = {
+ 2, 1, igu_prty2_bb_a0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_a0_regs[3] = {
+ &igu_prty0_bb_a0, &igu_prty1_bb_a0, &igu_prty2_bb_a0,
+};
+
+static const u16 igu_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+ 0, 1, igu_prty0_bb_b0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_b0_attn_idx[31] = {
+ 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+ 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+ 1, 31, igu_prty1_bb_b0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_b0_attn_idx[1] = {
+ 2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+ 2, 1, igu_prty2_bb_b0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+ &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0,
+};
+
+static const u16 igu_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_k2 = {
+ 0, 1, igu_prty0_k2_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_k2_attn_idx[28] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg igu_prty1_k2 = {
+ 1, 28, igu_prty1_k2_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static struct attn_hw_reg *igu_prty_k2_regs[2] = {
+ &igu_prty0_k2, &igu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_int_attn_desc[11] = {
+ "cau_address_error",
+ "cau_unauthorized_pxp_rd_cmd",
+ "cau_unauthorized_pxp_length_cmd",
+ "cau_pxp_sb_address_error",
+ "cau_pxp_pi_number_error",
+ "cau_cleanup_reg_sb_idx_error",
+ "cau_fsm_invalid_line",
+ "cau_cqe_fifo_err",
+ "cau_igu_wdata_fifo_err",
+ "cau_igu_req_fifo_err",
+ "cau_igu_cmd_fifo_err",
+};
+#else
+#define cau_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_a0 = {
+ 0, 11, cau_int0_bb_a0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_a0_regs[1] = {
+ &cau_int0_bb_a0,
+};
+
+static const u16 cau_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+ 0, 11, cau_int0_bb_b0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+ &cau_int0_bb_b0,
+};
+
+static const u16 cau_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_k2 = {
+ 0, 11, cau_int0_k2_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_k2_regs[1] = {
+ &cau_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_prty_attn_desc[15] = {
+ "cau_mem006_i_ecc_rf_int",
+ "cau_mem001_i_ecc_0_rf_int",
+ "cau_mem001_i_ecc_1_rf_int",
+ "cau_mem002_i_ecc_rf_int",
+ "cau_mem004_i_ecc_rf_int",
+ "cau_mem005_i_mem_prty",
+ "cau_mem007_i_mem_prty",
+ "cau_mem008_i_mem_prty",
+ "cau_mem009_i_mem_prty",
+ "cau_mem010_i_mem_prty",
+ "cau_mem011_i_mem_prty",
+ "cau_mem003_i_mem_prty_0",
+ "cau_mem003_i_mem_prty_1",
+ "cau_mem002_i_mem_prty",
+ "cau_mem004_i_mem_prty",
+};
+#else
+#define cau_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_prty1_bb_a0_attn_idx[13] = {
+ 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_a0 = {
+ 0, 13, cau_prty1_bb_a0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_a0_regs[1] = {
+ &cau_prty1_bb_a0,
+};
+
+static const u16 cau_prty1_bb_b0_attn_idx[13] = {
+ 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+ 0, 13, cau_prty1_bb_b0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+ &cau_prty1_bb_b0,
+};
+
+static const u16 cau_prty1_k2_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg cau_prty1_k2 = {
+ 0, 13, cau_prty1_k2_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_k2_regs[1] = {
+ &cau_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *umac_int_attn_desc[2] = {
+ "umac_address_error",
+ "umac_tx_overflow",
+};
+#else
+#define umac_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 umac_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg umac_int0_k2 = {
+ 0, 2, umac_int0_k2_attn_idx, 0x51180, 0x5118c, 0x51188, 0x51184
+};
+
+static struct attn_hw_reg *umac_int_k2_regs[1] = {
+ &umac_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_int_attn_desc[1] = {
+ "dbg_address_error",
+};
+#else
+#define dbg_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_a0 = {
+ 0, 1, dbg_int0_bb_a0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_a0_regs[1] = {
+ &dbg_int0_bb_a0,
+};
+
+static const u16 dbg_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+ 0, 1, dbg_int0_bb_b0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+ &dbg_int0_bb_b0,
+};
+
+static const u16 dbg_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_k2 = {
+ 0, 1, dbg_int0_k2_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_k2_regs[1] = {
+ &dbg_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_prty_attn_desc[1] = {
+ "dbg_mem001_i_mem_prty",
+};
+#else
+#define dbg_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_prty1_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_a0 = {
+ 0, 1, dbg_prty1_bb_a0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_a0_regs[1] = {
+ &dbg_prty1_bb_a0,
+};
+
+static const u16 dbg_prty1_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+ 0, 1, dbg_prty1_bb_b0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+ &dbg_prty1_bb_b0,
+};
+
+static const u16 dbg_prty1_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_k2 = {
+ 0, 1, dbg_prty1_k2_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_k2_regs[1] = {
+ &dbg_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_int_attn_desc[196] = {
+ "nig_address_error",
+ "nig_debug_fifo_error",
+ "nig_dorq_fifo_error",
+ "nig_dbg_syncfifo_error_wr",
+ "nig_dorq_syncfifo_error_wr",
+ "nig_storm_syncfifo_error_wr",
+ "nig_dbgmux_syncfifo_error_wr",
+ "nig_msdm_syncfifo_error_wr",
+ "nig_tsdm_syncfifo_error_wr",
+ "nig_usdm_syncfifo_error_wr",
+ "nig_xsdm_syncfifo_error_wr",
+ "nig_ysdm_syncfifo_error_wr",
+ "nig_tx_sopq0_error",
+ "nig_tx_sopq1_error",
+ "nig_tx_sopq2_error",
+ "nig_tx_sopq3_error",
+ "nig_tx_sopq4_error",
+ "nig_tx_sopq5_error",
+ "nig_tx_sopq6_error",
+ "nig_tx_sopq7_error",
+ "nig_tx_sopq8_error",
+ "nig_tx_sopq9_error",
+ "nig_tx_sopq10_error",
+ "nig_tx_sopq11_error",
+ "nig_tx_sopq12_error",
+ "nig_tx_sopq13_error",
+ "nig_tx_sopq14_error",
+ "nig_tx_sopq15_error",
+ "nig_lb_sopq0_error",
+ "nig_lb_sopq1_error",
+ "nig_lb_sopq2_error",
+ "nig_lb_sopq3_error",
+ "nig_lb_sopq4_error",
+ "nig_lb_sopq5_error",
+ "nig_lb_sopq6_error",
+ "nig_lb_sopq7_error",
+ "nig_lb_sopq8_error",
+ "nig_lb_sopq9_error",
+ "nig_lb_sopq10_error",
+ "nig_lb_sopq11_error",
+ "nig_lb_sopq12_error",
+ "nig_lb_sopq13_error",
+ "nig_lb_sopq14_error",
+ "nig_lb_sopq15_error",
+ "nig_p0_purelb_sopq_error",
+ "nig_p0_rx_macfifo_error",
+ "nig_p0_tx_macfifo_error",
+ "nig_p0_tx_bmb_fifo_error",
+ "nig_p0_lb_bmb_fifo_error",
+ "nig_p0_tx_btb_fifo_error",
+ "nig_p0_lb_btb_fifo_error",
+ "nig_p0_rx_llh_dfifo_error",
+ "nig_p0_tx_llh_dfifo_error",
+ "nig_p0_lb_llh_dfifo_error",
+ "nig_p0_rx_llh_hfifo_error",
+ "nig_p0_tx_llh_hfifo_error",
+ "nig_p0_lb_llh_hfifo_error",
+ "nig_p0_rx_llh_rfifo_error",
+ "nig_p0_tx_llh_rfifo_error",
+ "nig_p0_lb_llh_rfifo_error",
+ "nig_p0_storm_fifo_error",
+ "nig_p0_storm_dscr_fifo_error",
+ "nig_p0_tx_gnt_fifo_error",
+ "nig_p0_lb_gnt_fifo_error",
+ "nig_p0_tx_pause_too_long_int",
+ "nig_p0_tc0_pause_too_long_int",
+ "nig_p0_tc1_pause_too_long_int",
+ "nig_p0_tc2_pause_too_long_int",
+ "nig_p0_tc3_pause_too_long_int",
+ "nig_p0_tc4_pause_too_long_int",
+ "nig_p0_tc5_pause_too_long_int",
+ "nig_p0_tc6_pause_too_long_int",
+ "nig_p0_tc7_pause_too_long_int",
+ "nig_p0_lb_tc0_pause_too_long_int",
+ "nig_p0_lb_tc1_pause_too_long_int",
+ "nig_p0_lb_tc2_pause_too_long_int",
+ "nig_p0_lb_tc3_pause_too_long_int",
+ "nig_p0_lb_tc4_pause_too_long_int",
+ "nig_p0_lb_tc5_pause_too_long_int",
+ "nig_p0_lb_tc6_pause_too_long_int",
+ "nig_p0_lb_tc7_pause_too_long_int",
+ "nig_p0_lb_tc8_pause_too_long_int",
+ "nig_p1_purelb_sopq_error",
+ "nig_p1_rx_macfifo_error",
+ "nig_p1_tx_macfifo_error",
+ "nig_p1_tx_bmb_fifo_error",
+ "nig_p1_lb_bmb_fifo_error",
+ "nig_p1_tx_btb_fifo_error",
+ "nig_p1_lb_btb_fifo_error",
+ "nig_p1_rx_llh_dfifo_error",
+ "nig_p1_tx_llh_dfifo_error",
+ "nig_p1_lb_llh_dfifo_error",
+ "nig_p1_rx_llh_hfifo_error",
+ "nig_p1_tx_llh_hfifo_error",
+ "nig_p1_lb_llh_hfifo_error",
+ "nig_p1_rx_llh_rfifo_error",
+ "nig_p1_tx_llh_rfifo_error",
+ "nig_p1_lb_llh_rfifo_error",
+ "nig_p1_storm_fifo_error",
+ "nig_p1_storm_dscr_fifo_error",
+ "nig_p1_tx_gnt_fifo_error",
+ "nig_p1_lb_gnt_fifo_error",
+ "nig_p1_tx_pause_too_long_int",
+ "nig_p1_tc0_pause_too_long_int",
+ "nig_p1_tc1_pause_too_long_int",
+ "nig_p1_tc2_pause_too_long_int",
+ "nig_p1_tc3_pause_too_long_int",
+ "nig_p1_tc4_pause_too_long_int",
+ "nig_p1_tc5_pause_too_long_int",
+ "nig_p1_tc6_pause_too_long_int",
+ "nig_p1_tc7_pause_too_long_int",
+ "nig_p1_lb_tc0_pause_too_long_int",
+ "nig_p1_lb_tc1_pause_too_long_int",
+ "nig_p1_lb_tc2_pause_too_long_int",
+ "nig_p1_lb_tc3_pause_too_long_int",
+ "nig_p1_lb_tc4_pause_too_long_int",
+ "nig_p1_lb_tc5_pause_too_long_int",
+ "nig_p1_lb_tc6_pause_too_long_int",
+ "nig_p1_lb_tc7_pause_too_long_int",
+ "nig_p1_lb_tc8_pause_too_long_int",
+ "nig_p2_purelb_sopq_error",
+ "nig_p2_rx_macfifo_error",
+ "nig_p2_tx_macfifo_error",
+ "nig_p2_tx_bmb_fifo_error",
+ "nig_p2_lb_bmb_fifo_error",
+ "nig_p2_tx_btb_fifo_error",
+ "nig_p2_lb_btb_fifo_error",
+ "nig_p2_rx_llh_dfifo_error",
+ "nig_p2_tx_llh_dfifo_error",
+ "nig_p2_lb_llh_dfifo_error",
+ "nig_p2_rx_llh_hfifo_error",
+ "nig_p2_tx_llh_hfifo_error",
+ "nig_p2_lb_llh_hfifo_error",
+ "nig_p2_rx_llh_rfifo_error",
+ "nig_p2_tx_llh_rfifo_error",
+ "nig_p2_lb_llh_rfifo_error",
+ "nig_p2_storm_fifo_error",
+ "nig_p2_storm_dscr_fifo_error",
+ "nig_p2_tx_gnt_fifo_error",
+ "nig_p2_lb_gnt_fifo_error",
+ "nig_p2_tx_pause_too_long_int",
+ "nig_p2_tc0_pause_too_long_int",
+ "nig_p2_tc1_pause_too_long_int",
+ "nig_p2_tc2_pause_too_long_int",
+ "nig_p2_tc3_pause_too_long_int",
+ "nig_p2_tc4_pause_too_long_int",
+ "nig_p2_tc5_pause_too_long_int",
+ "nig_p2_tc6_pause_too_long_int",
+ "nig_p2_tc7_pause_too_long_int",
+ "nig_p2_lb_tc0_pause_too_long_int",
+ "nig_p2_lb_tc1_pause_too_long_int",
+ "nig_p2_lb_tc2_pause_too_long_int",
+ "nig_p2_lb_tc3_pause_too_long_int",
+ "nig_p2_lb_tc4_pause_too_long_int",
+ "nig_p2_lb_tc5_pause_too_long_int",
+ "nig_p2_lb_tc6_pause_too_long_int",
+ "nig_p2_lb_tc7_pause_too_long_int",
+ "nig_p2_lb_tc8_pause_too_long_int",
+ "nig_p3_purelb_sopq_error",
+ "nig_p3_rx_macfifo_error",
+ "nig_p3_tx_macfifo_error",
+ "nig_p3_tx_bmb_fifo_error",
+ "nig_p3_lb_bmb_fifo_error",
+ "nig_p3_tx_btb_fifo_error",
+ "nig_p3_lb_btb_fifo_error",
+ "nig_p3_rx_llh_dfifo_error",
+ "nig_p3_tx_llh_dfifo_error",
+ "nig_p3_lb_llh_dfifo_error",
+ "nig_p3_rx_llh_hfifo_error",
+ "nig_p3_tx_llh_hfifo_error",
+ "nig_p3_lb_llh_hfifo_error",
+ "nig_p3_rx_llh_rfifo_error",
+ "nig_p3_tx_llh_rfifo_error",
+ "nig_p3_lb_llh_rfifo_error",
+ "nig_p3_storm_fifo_error",
+ "nig_p3_storm_dscr_fifo_error",
+ "nig_p3_tx_gnt_fifo_error",
+ "nig_p3_lb_gnt_fifo_error",
+ "nig_p3_tx_pause_too_long_int",
+ "nig_p3_tc0_pause_too_long_int",
+ "nig_p3_tc1_pause_too_long_int",
+ "nig_p3_tc2_pause_too_long_int",
+ "nig_p3_tc3_pause_too_long_int",
+ "nig_p3_tc4_pause_too_long_int",
+ "nig_p3_tc5_pause_too_long_int",
+ "nig_p3_tc6_pause_too_long_int",
+ "nig_p3_tc7_pause_too_long_int",
+ "nig_p3_lb_tc0_pause_too_long_int",
+ "nig_p3_lb_tc1_pause_too_long_int",
+ "nig_p3_lb_tc2_pause_too_long_int",
+ "nig_p3_lb_tc3_pause_too_long_int",
+ "nig_p3_lb_tc4_pause_too_long_int",
+ "nig_p3_lb_tc5_pause_too_long_int",
+ "nig_p3_lb_tc6_pause_too_long_int",
+ "nig_p3_lb_tc7_pause_too_long_int",
+ "nig_p3_lb_tc8_pause_too_long_int",
+};
+#else
+#define nig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_int0_bb_a0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_a0 = {
+ 0, 12, nig_int0_bb_a0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_a0_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_a0 = {
+ 1, 32, nig_int1_bb_a0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_a0_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_a0 = {
+ 2, 20, nig_int2_bb_a0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_a0_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_a0 = {
+ 3, 18, nig_int3_bb_a0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_a0_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_a0 = {
+ 4, 20, nig_int4_bb_a0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_a0_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_a0 = {
+ 5, 18, nig_int5_bb_a0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_a0_regs[6] = {
+ &nig_int0_bb_a0, &nig_int1_bb_a0, &nig_int2_bb_a0, &nig_int3_bb_a0,
+ &nig_int4_bb_a0, &nig_int5_bb_a0,
+};
+
+static const u16 nig_int0_bb_b0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+ 0, 12, nig_int0_bb_b0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_b0_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+ 1, 32, nig_int1_bb_b0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_b0_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+ 2, 20, nig_int2_bb_b0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_b0_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+ 3, 18, nig_int3_bb_b0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_b0_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+ 4, 20, nig_int4_bb_b0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_b0_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+ 5, 18, nig_int5_bb_b0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+ &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+ &nig_int4_bb_b0, &nig_int5_bb_b0,
+};
+
+static const u16 nig_int0_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_k2 = {
+ 0, 12, nig_int0_k2_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_k2_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_k2 = {
+ 1, 32, nig_int1_k2_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_k2_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_k2 = {
+ 2, 20, nig_int2_k2_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_k2_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_k2 = {
+ 3, 18, nig_int3_k2_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_k2_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_k2 = {
+ 4, 20, nig_int4_k2_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_k2_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_k2 = {
+ 5, 18, nig_int5_k2_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static const u16 nig_int6_k2_attn_idx[20] = {
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134,
+ 135, 136, 137, 138, 139,
+};
+
+static struct attn_hw_reg nig_int6_k2 = {
+ 6, 20, nig_int6_k2_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_int7_k2_attn_idx[18] = {
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154,
+ 155, 156, 157,
+};
+
+static struct attn_hw_reg nig_int7_k2 = {
+ 7, 18, nig_int7_k2_attn_idx, 0x5000b0, 0x5000bc, 0x5000b8, 0x5000b4
+};
+
+static const u16 nig_int8_k2_attn_idx[20] = {
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172,
+ 173, 174, 175, 176, 177,
+};
+
+static struct attn_hw_reg nig_int8_k2 = {
+ 8, 20, nig_int8_k2_attn_idx, 0x5000c0, 0x5000cc, 0x5000c8, 0x5000c4
+};
+
+static const u16 nig_int9_k2_attn_idx[18] = {
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192,
+ 193, 194, 195,
+};
+
+static struct attn_hw_reg nig_int9_k2 = {
+ 9, 18, nig_int9_k2_attn_idx, 0x5000d0, 0x5000dc, 0x5000d8, 0x5000d4
+};
+
+static struct attn_hw_reg *nig_int_k2_regs[10] = {
+ &nig_int0_k2, &nig_int1_k2, &nig_int2_k2, &nig_int3_k2, &nig_int4_k2,
+ &nig_int5_k2, &nig_int6_k2, &nig_int7_k2, &nig_int8_k2, &nig_int9_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_prty_attn_desc[113] = {
+ "nig_datapath_parity_error",
+ "nig_mem107_i_mem_prty",
+ "nig_mem103_i_mem_prty",
+ "nig_mem104_i_mem_prty",
+ "nig_mem105_i_mem_prty",
+ "nig_mem106_i_mem_prty",
+ "nig_mem072_i_mem_prty",
+ "nig_mem071_i_mem_prty",
+ "nig_mem074_i_mem_prty",
+ "nig_mem073_i_mem_prty",
+ "nig_mem076_i_mem_prty",
+ "nig_mem075_i_mem_prty",
+ "nig_mem078_i_mem_prty",
+ "nig_mem077_i_mem_prty",
+ "nig_mem055_i_mem_prty",
+ "nig_mem062_i_mem_prty",
+ "nig_mem063_i_mem_prty",
+ "nig_mem064_i_mem_prty",
+ "nig_mem065_i_mem_prty",
+ "nig_mem066_i_mem_prty",
+ "nig_mem067_i_mem_prty",
+ "nig_mem068_i_mem_prty",
+ "nig_mem069_i_mem_prty",
+ "nig_mem070_i_mem_prty",
+ "nig_mem056_i_mem_prty",
+ "nig_mem057_i_mem_prty",
+ "nig_mem058_i_mem_prty",
+ "nig_mem059_i_mem_prty",
+ "nig_mem060_i_mem_prty",
+ "nig_mem061_i_mem_prty",
+ "nig_mem035_i_mem_prty",
+ "nig_mem046_i_mem_prty",
+ "nig_mem051_i_mem_prty",
+ "nig_mem052_i_mem_prty",
+ "nig_mem090_i_mem_prty",
+ "nig_mem089_i_mem_prty",
+ "nig_mem092_i_mem_prty",
+ "nig_mem091_i_mem_prty",
+ "nig_mem109_i_mem_prty",
+ "nig_mem110_i_mem_prty",
+ "nig_mem001_i_mem_prty",
+ "nig_mem008_i_mem_prty",
+ "nig_mem009_i_mem_prty",
+ "nig_mem010_i_mem_prty",
+ "nig_mem011_i_mem_prty",
+ "nig_mem012_i_mem_prty",
+ "nig_mem013_i_mem_prty",
+ "nig_mem014_i_mem_prty",
+ "nig_mem015_i_mem_prty",
+ "nig_mem016_i_mem_prty",
+ "nig_mem002_i_mem_prty",
+ "nig_mem003_i_mem_prty",
+ "nig_mem004_i_mem_prty",
+ "nig_mem005_i_mem_prty",
+ "nig_mem006_i_mem_prty",
+ "nig_mem007_i_mem_prty",
+ "nig_mem080_i_mem_prty",
+ "nig_mem081_i_mem_prty",
+ "nig_mem082_i_mem_prty",
+ "nig_mem083_i_mem_prty",
+ "nig_mem048_i_mem_prty",
+ "nig_mem049_i_mem_prty",
+ "nig_mem102_i_mem_prty",
+ "nig_mem087_i_mem_prty",
+ "nig_mem086_i_mem_prty",
+ "nig_mem088_i_mem_prty",
+ "nig_mem079_i_mem_prty",
+ "nig_mem047_i_mem_prty",
+ "nig_mem050_i_mem_prty",
+ "nig_mem053_i_mem_prty",
+ "nig_mem054_i_mem_prty",
+ "nig_mem036_i_mem_prty",
+ "nig_mem037_i_mem_prty",
+ "nig_mem038_i_mem_prty",
+ "nig_mem039_i_mem_prty",
+ "nig_mem040_i_mem_prty",
+ "nig_mem041_i_mem_prty",
+ "nig_mem042_i_mem_prty",
+ "nig_mem043_i_mem_prty",
+ "nig_mem044_i_mem_prty",
+ "nig_mem045_i_mem_prty",
+ "nig_mem093_i_mem_prty",
+ "nig_mem094_i_mem_prty",
+ "nig_mem027_i_mem_prty",
+ "nig_mem028_i_mem_prty",
+ "nig_mem029_i_mem_prty",
+ "nig_mem030_i_mem_prty",
+ "nig_mem017_i_mem_prty",
+ "nig_mem018_i_mem_prty",
+ "nig_mem095_i_mem_prty",
+ "nig_mem084_i_mem_prty",
+ "nig_mem085_i_mem_prty",
+ "nig_mem099_i_mem_prty",
+ "nig_mem100_i_mem_prty",
+ "nig_mem096_i_mem_prty",
+ "nig_mem097_i_mem_prty",
+ "nig_mem098_i_mem_prty",
+ "nig_mem031_i_mem_prty",
+ "nig_mem032_i_mem_prty",
+ "nig_mem033_i_mem_prty",
+ "nig_mem034_i_mem_prty",
+ "nig_mem019_i_mem_prty",
+ "nig_mem020_i_mem_prty",
+ "nig_mem021_i_mem_prty",
+ "nig_mem022_i_mem_prty",
+ "nig_mem101_i_mem_prty",
+ "nig_mem023_i_mem_prty",
+ "nig_mem024_i_mem_prty",
+ "nig_mem025_i_mem_prty",
+ "nig_mem026_i_mem_prty",
+ "nig_mem108_i_mem_prty",
+ "nig_mem031_ext_i_mem_prty",
+ "nig_mem034_ext_i_mem_prty",
+};
+#else
+#define nig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 5, 12, 13, 23, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 60, 61, 62, 63, 64, 65, 66,
+};
+
+static struct attn_hw_reg nig_prty1_bb_a0 = {
+ 0, 31, nig_prty1_bb_a0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_a0_attn_idx[31] = {
+ 33, 69, 70, 90, 91, 8, 11, 10, 14, 17, 18, 19, 20, 21, 22, 7, 6, 24, 25,
+ 26, 27, 28, 29, 15, 16, 57, 58, 59, 9, 94, 95,
+};
+
+static struct attn_hw_reg nig_prty2_bb_a0 = {
+ 1, 31, nig_prty2_bb_a0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_a0_attn_idx[31] = {
+ 96, 97, 98, 103, 104, 92, 93, 105, 106, 107, 108, 109, 80, 31, 67, 83,
+ 84,
+ 3, 68, 85, 86, 89, 77, 78, 79, 4, 32, 36, 81, 82, 87,
+};
+
+static struct attn_hw_reg nig_prty3_bb_a0 = {
+ 2, 31, nig_prty3_bb_a0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_a0_attn_idx[14] = {
+ 88, 101, 102, 75, 71, 74, 76, 73, 72, 34, 37, 99, 30, 100,
+};
+
+static struct attn_hw_reg nig_prty4_bb_a0 = {
+ 3, 14, nig_prty4_bb_a0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_a0_regs[4] = {
+ &nig_prty1_bb_a0, &nig_prty2_bb_a0, &nig_prty3_bb_a0, &nig_prty4_bb_a0,
+};
+
+static const u16 nig_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+ 0, 1, nig_prty0_bb_b0_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_prty1_bb_b0_attn_idx[31] = {
+ 4, 5, 9, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+ 1, 31, nig_prty1_bb_b0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_b0_attn_idx[31] = {
+ 90, 91, 64, 63, 65, 8, 11, 10, 13, 12, 66, 14, 17, 18, 19, 20, 21, 22,
+ 23,
+ 7, 6, 24, 25, 26, 27, 28, 29, 15, 16, 92, 93,
+};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+ 2, 31, nig_prty2_bb_b0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_b0_attn_idx[31] = {
+ 94, 95, 96, 97, 99, 100, 103, 104, 105, 62, 108, 109, 80, 31, 1, 67, 60,
+ 69, 83, 84, 2, 3, 110, 61, 68, 70, 85, 86, 111, 112, 89,
+};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+ 3, 31, nig_prty3_bb_b0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_b0_attn_idx[17] = {
+ 106, 107, 87, 88, 81, 82, 101, 102, 75, 71, 74, 76, 77, 78, 79, 73, 72,
+};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+ 4, 17, nig_prty4_bb_b0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+ &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, &nig_prty3_bb_b0,
+ &nig_prty4_bb_b0,
+};
+
+static const u16 nig_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg nig_prty0_k2 = {
+ 0, 1, nig_prty0_k2_attn_idx, 0x5000e0, 0x5000ec, 0x5000e8, 0x5000e4
+};
+
+static const u16 nig_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg nig_prty1_k2 = {
+ 1, 31, nig_prty1_k2_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_k2_attn_idx[31] = {
+ 67, 60, 61, 68, 32, 33, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 37, 36, 81, 82, 83, 84, 85, 86, 48, 49, 87, 88, 89,
+};
+
+static struct attn_hw_reg nig_prty2_k2 = {
+ 2, 31, nig_prty2_k2_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_k2_attn_idx[31] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 92, 93, 105, 62, 106,
+ 107, 108, 109, 59, 90, 91, 64, 55, 41, 42, 43, 63, 65, 35, 34,
+};
+
+static struct attn_hw_reg nig_prty3_k2 = {
+ 3, 31, nig_prty3_k2_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_k2_attn_idx[14] = {
+ 44, 45, 46, 47, 40, 50, 66, 56, 57, 58, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg nig_prty4_k2 = {
+ 4, 14, nig_prty4_k2_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_k2_regs[5] = {
+ &nig_prty0_k2, &nig_prty1_k2, &nig_prty2_k2, &nig_prty3_k2,
+ &nig_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_int_attn_desc[1] = {
+ "wol_address_error",
+};
+#else
+#define wol_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg wol_int0_k2 = {
+ 0, 1, wol_int0_k2_attn_idx, 0x600040, 0x60004c, 0x600048, 0x600044
+};
+
+static struct attn_hw_reg *wol_int_k2_regs[1] = {
+ &wol_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_prty_attn_desc[24] = {
+ "wol_mem017_i_mem_prty",
+ "wol_mem018_i_mem_prty",
+ "wol_mem019_i_mem_prty",
+ "wol_mem020_i_mem_prty",
+ "wol_mem021_i_mem_prty",
+ "wol_mem022_i_mem_prty",
+ "wol_mem023_i_mem_prty",
+ "wol_mem024_i_mem_prty",
+ "wol_mem001_i_mem_prty",
+ "wol_mem008_i_mem_prty",
+ "wol_mem009_i_mem_prty",
+ "wol_mem010_i_mem_prty",
+ "wol_mem011_i_mem_prty",
+ "wol_mem012_i_mem_prty",
+ "wol_mem013_i_mem_prty",
+ "wol_mem014_i_mem_prty",
+ "wol_mem015_i_mem_prty",
+ "wol_mem016_i_mem_prty",
+ "wol_mem002_i_mem_prty",
+ "wol_mem003_i_mem_prty",
+ "wol_mem004_i_mem_prty",
+ "wol_mem005_i_mem_prty",
+ "wol_mem006_i_mem_prty",
+ "wol_mem007_i_mem_prty",
+};
+#else
+#define wol_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_prty1_k2_attn_idx[24] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg wol_prty1_k2 = {
+ 0, 24, wol_prty1_k2_attn_idx, 0x600200, 0x60020c, 0x600208, 0x600204
+};
+
+static struct attn_hw_reg *wol_prty_k2_regs[1] = {
+ &wol_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmbn_int_attn_desc[1] = {
+ "bmbn_address_error",
+};
+#else
+#define bmbn_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmbn_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg bmbn_int0_k2 = {
+ 0, 1, bmbn_int0_k2_attn_idx, 0x610040, 0x61004c, 0x610048, 0x610044
+};
+
+static struct attn_hw_reg *bmbn_int_k2_regs[1] = {
+ &bmbn_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_int_attn_desc[14] = {
+ "ipc_address_error",
+ "ipc_unused_0",
+ "ipc_vmain_por_assert",
+ "ipc_vmain_por_deassert",
+ "ipc_perst_assert",
+ "ipc_perst_deassert",
+ "ipc_otp_ecc_ded_0",
+ "ipc_otp_ecc_ded_1",
+ "ipc_otp_ecc_ded_2",
+ "ipc_otp_ecc_ded_3",
+ "ipc_otp_ecc_ded_4",
+ "ipc_otp_ecc_ded_5",
+ "ipc_otp_ecc_ded_6",
+ "ipc_otp_ecc_ded_7",
+};
+#else
+#define ipc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_int0_bb_a0_attn_idx[5] = {
+ 0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_bb_a0 = {
+ 0, 5, ipc_int0_bb_a0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_a0_regs[1] = {
+ &ipc_int0_bb_a0,
+};
+
+static const u16 ipc_int0_bb_b0_attn_idx[13] = {
+ 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+ 0, 13, ipc_int0_bb_b0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+ &ipc_int0_bb_b0,
+};
+
+static const u16 ipc_int0_k2_attn_idx[5] = {
+ 0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_k2 = {
+ 0, 5, ipc_int0_k2_attn_idx, 0x202dc, 0x202e8, 0x202e4, 0x202e0
+};
+
+static struct attn_hw_reg *ipc_int_k2_regs[1] = {
+ &ipc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_prty_attn_desc[1] = {
+ "ipc_fake_par_err",
+};
+#else
+#define ipc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_a0 = {
+ 0, 1, ipc_prty0_bb_a0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_a0_regs[1] = {
+ &ipc_prty0_bb_a0,
+};
+
+static const u16 ipc_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+ 0, 1, ipc_prty0_bb_b0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+ &ipc_prty0_bb_b0,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_int_attn_desc[18] = {
+ "nwm_address_error",
+ "nwm_tx_overflow_0",
+ "nwm_tx_underflow_0",
+ "nwm_tx_overflow_1",
+ "nwm_tx_underflow_1",
+ "nwm_tx_overflow_2",
+ "nwm_tx_underflow_2",
+ "nwm_tx_overflow_3",
+ "nwm_tx_underflow_3",
+ "nwm_unused_0",
+ "nwm_ln0_at_10M",
+ "nwm_ln0_at_100M",
+ "nwm_ln1_at_10M",
+ "nwm_ln1_at_100M",
+ "nwm_ln2_at_10M",
+ "nwm_ln2_at_100M",
+ "nwm_ln3_at_10M",
+ "nwm_ln3_at_100M",
+};
+#else
+#define nwm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg nwm_int0_k2 = {
+ 0, 17, nwm_int0_k2_attn_idx, 0x800004, 0x800010, 0x80000c, 0x800008
+};
+
+static struct attn_hw_reg *nwm_int_k2_regs[1] = {
+ &nwm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_prty_attn_desc[72] = {
+ "nwm_mem020_i_mem_prty",
+ "nwm_mem028_i_mem_prty",
+ "nwm_mem036_i_mem_prty",
+ "nwm_mem044_i_mem_prty",
+ "nwm_mem023_i_mem_prty",
+ "nwm_mem031_i_mem_prty",
+ "nwm_mem039_i_mem_prty",
+ "nwm_mem047_i_mem_prty",
+ "nwm_mem024_i_mem_prty",
+ "nwm_mem032_i_mem_prty",
+ "nwm_mem040_i_mem_prty",
+ "nwm_mem048_i_mem_prty",
+ "nwm_mem018_i_mem_prty",
+ "nwm_mem026_i_mem_prty",
+ "nwm_mem034_i_mem_prty",
+ "nwm_mem042_i_mem_prty",
+ "nwm_mem017_i_mem_prty",
+ "nwm_mem025_i_mem_prty",
+ "nwm_mem033_i_mem_prty",
+ "nwm_mem041_i_mem_prty",
+ "nwm_mem021_i_mem_prty",
+ "nwm_mem029_i_mem_prty",
+ "nwm_mem037_i_mem_prty",
+ "nwm_mem045_i_mem_prty",
+ "nwm_mem019_i_mem_prty",
+ "nwm_mem027_i_mem_prty",
+ "nwm_mem035_i_mem_prty",
+ "nwm_mem043_i_mem_prty",
+ "nwm_mem022_i_mem_prty",
+ "nwm_mem030_i_mem_prty",
+ "nwm_mem038_i_mem_prty",
+ "nwm_mem046_i_mem_prty",
+ "nwm_mem057_i_mem_prty",
+ "nwm_mem059_i_mem_prty",
+ "nwm_mem061_i_mem_prty",
+ "nwm_mem063_i_mem_prty",
+ "nwm_mem058_i_mem_prty",
+ "nwm_mem060_i_mem_prty",
+ "nwm_mem062_i_mem_prty",
+ "nwm_mem064_i_mem_prty",
+ "nwm_mem009_i_mem_prty",
+ "nwm_mem010_i_mem_prty",
+ "nwm_mem011_i_mem_prty",
+ "nwm_mem012_i_mem_prty",
+ "nwm_mem013_i_mem_prty",
+ "nwm_mem014_i_mem_prty",
+ "nwm_mem015_i_mem_prty",
+ "nwm_mem016_i_mem_prty",
+ "nwm_mem001_i_mem_prty",
+ "nwm_mem002_i_mem_prty",
+ "nwm_mem003_i_mem_prty",
+ "nwm_mem004_i_mem_prty",
+ "nwm_mem005_i_mem_prty",
+ "nwm_mem006_i_mem_prty",
+ "nwm_mem007_i_mem_prty",
+ "nwm_mem008_i_mem_prty",
+ "nwm_mem049_i_mem_prty",
+ "nwm_mem053_i_mem_prty",
+ "nwm_mem050_i_mem_prty",
+ "nwm_mem054_i_mem_prty",
+ "nwm_mem051_i_mem_prty",
+ "nwm_mem055_i_mem_prty",
+ "nwm_mem052_i_mem_prty",
+ "nwm_mem056_i_mem_prty",
+ "nwm_mem066_i_mem_prty",
+ "nwm_mem068_i_mem_prty",
+ "nwm_mem070_i_mem_prty",
+ "nwm_mem072_i_mem_prty",
+ "nwm_mem065_i_mem_prty",
+ "nwm_mem067_i_mem_prty",
+ "nwm_mem069_i_mem_prty",
+ "nwm_mem071_i_mem_prty",
+};
+#else
+#define nwm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg nwm_prty1_k2 = {
+ 0, 31, nwm_prty1_k2_attn_idx, 0x800200, 0x80020c, 0x800208, 0x800204
+};
+
+static const u16 nwm_prty2_k2_attn_idx[31] = {
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+};
+
+static struct attn_hw_reg nwm_prty2_k2 = {
+ 1, 31, nwm_prty2_k2_attn_idx, 0x800210, 0x80021c, 0x800218, 0x800214
+};
+
+static const u16 nwm_prty3_k2_attn_idx[10] = {
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+};
+
+static struct attn_hw_reg nwm_prty3_k2 = {
+ 2, 10, nwm_prty3_k2_attn_idx, 0x800220, 0x80022c, 0x800228, 0x800224
+};
+
+static struct attn_hw_reg *nwm_prty_k2_regs[3] = {
+ &nwm_prty1_k2, &nwm_prty2_k2, &nwm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_int_attn_desc[38] = {
+ "nws_address_error",
+ "nws_ln0_an_resolve_50g_cr2",
+ "nws_ln0_an_resolve_50g_kr2",
+ "nws_ln0_an_resolve_40g_cr4",
+ "nws_ln0_an_resolve_40g_kr4",
+ "nws_ln0_an_resolve_25g_gr",
+ "nws_ln0_an_resolve_25g_cr",
+ "nws_ln0_an_resolve_25g_kr",
+ "nws_ln0_an_resolve_10g_kr",
+ "nws_ln0_an_resolve_1g_kx",
+ "nws_unused_0",
+ "nws_ln1_an_resolve_50g_cr2",
+ "nws_ln1_an_resolve_50g_kr2",
+ "nws_ln1_an_resolve_40g_cr4",
+ "nws_ln1_an_resolve_40g_kr4",
+ "nws_ln1_an_resolve_25g_gr",
+ "nws_ln1_an_resolve_25g_cr",
+ "nws_ln1_an_resolve_25g_kr",
+ "nws_ln1_an_resolve_10g_kr",
+ "nws_ln1_an_resolve_1g_kx",
+ "nws_ln2_an_resolve_50g_cr2",
+ "nws_ln2_an_resolve_50g_kr2",
+ "nws_ln2_an_resolve_40g_cr4",
+ "nws_ln2_an_resolve_40g_kr4",
+ "nws_ln2_an_resolve_25g_gr",
+ "nws_ln2_an_resolve_25g_cr",
+ "nws_ln2_an_resolve_25g_kr",
+ "nws_ln2_an_resolve_10g_kr",
+ "nws_ln2_an_resolve_1g_kx",
+ "nws_ln3_an_resolve_50g_cr2",
+ "nws_ln3_an_resolve_50g_kr2",
+ "nws_ln3_an_resolve_40g_cr4",
+ "nws_ln3_an_resolve_40g_kr4",
+ "nws_ln3_an_resolve_25g_gr",
+ "nws_ln3_an_resolve_25g_cr",
+ "nws_ln3_an_resolve_25g_kr",
+ "nws_ln3_an_resolve_10g_kr",
+ "nws_ln3_an_resolve_1g_kx",
+};
+#else
+#define nws_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_int0_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg nws_int0_k2 = {
+ 0, 10, nws_int0_k2_attn_idx, 0x700180, 0x70018c, 0x700188, 0x700184
+};
+
+static const u16 nws_int1_k2_attn_idx[9] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19,
+};
+
+static struct attn_hw_reg nws_int1_k2 = {
+ 1, 9, nws_int1_k2_attn_idx, 0x700190, 0x70019c, 0x700198, 0x700194
+};
+
+static const u16 nws_int2_k2_attn_idx[9] = {
+ 20, 21, 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg nws_int2_k2 = {
+ 2, 9, nws_int2_k2_attn_idx, 0x7001a0, 0x7001ac, 0x7001a8, 0x7001a4
+};
+
+static const u16 nws_int3_k2_attn_idx[9] = {
+ 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg nws_int3_k2 = {
+ 3, 9, nws_int3_k2_attn_idx, 0x7001b0, 0x7001bc, 0x7001b8, 0x7001b4
+};
+
+static struct attn_hw_reg *nws_int_k2_regs[4] = {
+ &nws_int0_k2, &nws_int1_k2, &nws_int2_k2, &nws_int3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_prty_attn_desc[4] = {
+ "nws_mem003_i_mem_prty",
+ "nws_mem001_i_mem_prty",
+ "nws_mem004_i_mem_prty",
+ "nws_mem002_i_mem_prty",
+};
+#else
+#define nws_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_prty1_k2_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg nws_prty1_k2 = {
+ 0, 4, nws_prty1_k2_attn_idx, 0x700200, 0x70020c, 0x700208, 0x700204
+};
+
+static struct attn_hw_reg *nws_prty_k2_regs[1] = {
+ &nws_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ms_int_attn_desc[1] = {
+ "ms_address_error",
+};
+#else
+#define ms_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ms_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ms_int0_k2 = {
+ 0, 1, ms_int0_k2_attn_idx, 0x6a0180, 0x6a018c, 0x6a0188, 0x6a0184
+};
+
+static struct attn_hw_reg *ms_int_k2_regs[1] = {
+ &ms_int0_k2,
+};
+
+static struct attn_hw_block attn_blocks[] = {
+ {"grc", grc_int_attn_desc, grc_prty_attn_desc, {
+ {1, 1,
+ grc_int_bb_a0_regs,
+ grc_prty_bb_a0_regs},
+ {1, 1,
+ grc_int_bb_b0_regs,
+ grc_prty_bb_b0_regs},
+ {1, 1, grc_int_k2_regs,
+ grc_prty_k2_regs} } },
+ {"miscs", miscs_int_attn_desc, miscs_prty_attn_desc, {
+ {2, 0,
+
+ miscs_int_bb_a0_regs,
+ OSAL_NULL},
+ {2, 1,
+
+ miscs_int_bb_b0_regs,
+
+ miscs_prty_bb_b0_regs},
+ {1, 1,
+
+ miscs_int_k2_regs,
+
+ miscs_prty_k2_regs } } },
+ {"misc", misc_int_attn_desc, OSAL_NULL, {
+ {1, 0, misc_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, misc_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, misc_int_k2_regs,
+ OSAL_NULL } } },
+ {"dbu", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"pglue_b", pglue_b_int_attn_desc, pglue_b_prty_attn_desc, {
+ {1, 1,
+
+ pglue_b_int_bb_a0_regs,
+
+ pglue_b_prty_bb_a0_regs},
+ {1, 2,
+
+ pglue_b_int_bb_b0_regs,
+
+ pglue_b_prty_bb_b0_regs},
+ {1, 3,
+
+ pglue_b_int_k2_regs,
+
+ pglue_b_prty_k2_regs } } },
+ {"cnig", cnig_int_attn_desc, cnig_prty_attn_desc, {
+ {1, 0,
+ cnig_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ cnig_int_bb_b0_regs,
+
+ cnig_prty_bb_b0_regs},
+ {1, 1,
+ cnig_int_k2_regs,
+
+ cnig_prty_k2_regs } } },
+ {"cpmu", cpmu_int_attn_desc, OSAL_NULL, {
+ {1, 0, cpmu_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, cpmu_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, cpmu_int_k2_regs,
+ OSAL_NULL } } },
+ {"ncsi", ncsi_int_attn_desc, ncsi_prty_attn_desc, {
+ {1, 1,
+ ncsi_int_bb_a0_regs,
+
+ ncsi_prty_bb_a0_regs},
+ {1, 1,
+ ncsi_int_bb_b0_regs,
+
+ ncsi_prty_bb_b0_regs},
+ {1, 1,
+ ncsi_int_k2_regs,
+
+ ncsi_prty_k2_regs } } },
+ {"opte", OSAL_NULL, opte_prty_attn_desc, {
+ {0, 1, OSAL_NULL,
+ opte_prty_bb_a0_regs},
+ {0, 2, OSAL_NULL,
+ opte_prty_bb_b0_regs},
+ {0, 2, OSAL_NULL,
+ opte_prty_k2_regs } } },
+ {"bmb", bmb_int_attn_desc, bmb_prty_attn_desc, {
+ {12, 2,
+ bmb_int_bb_a0_regs,
+ bmb_prty_bb_a0_regs},
+ {12, 3,
+ bmb_int_bb_b0_regs,
+ bmb_prty_bb_b0_regs},
+ {12, 3, bmb_int_k2_regs,
+ bmb_prty_k2_regs } } },
+ {"pcie", pcie_int_attn_desc, pcie_prty_attn_desc, {
+ {0, 1, OSAL_NULL,
+
+ pcie_prty_bb_a0_regs},
+ {0, 1, OSAL_NULL,
+
+ pcie_prty_bb_b0_regs},
+ {1, 2,
+ pcie_int_k2_regs,
+
+ pcie_prty_k2_regs } } },
+ {"mcp", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"mcp2", OSAL_NULL, mcp2_prty_attn_desc, {
+ {0, 2, OSAL_NULL,
+ mcp2_prty_bb_a0_regs},
+ {0, 2, OSAL_NULL,
+ mcp2_prty_bb_b0_regs},
+ {0, 2, OSAL_NULL,
+ mcp2_prty_k2_regs } } },
+ {"pswhst", pswhst_int_attn_desc, pswhst_prty_attn_desc, {
+ {1, 1,
+
+ pswhst_int_bb_a0_regs,
+
+ pswhst_prty_bb_a0_regs},
+ {1, 2,
+
+ pswhst_int_bb_b0_regs,
+
+ pswhst_prty_bb_b0_regs},
+ {1, 2,
+
+ pswhst_int_k2_regs,
+
+ pswhst_prty_k2_regs } } },
+ {"pswhst2", pswhst2_int_attn_desc, pswhst2_prty_attn_desc, {
+ {1, 0,
+
+ pswhst2_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswhst2_int_bb_b0_regs,
+
+ pswhst2_prty_bb_b0_regs},
+ {1, 1,
+
+ pswhst2_int_k2_regs,
+
+ pswhst2_prty_k2_regs } } },
+ {"pswrd", pswrd_int_attn_desc, pswrd_prty_attn_desc, {
+ {1, 0,
+
+ pswrd_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswrd_int_bb_b0_regs,
+
+ pswrd_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrd_int_k2_regs,
+
+ pswrd_prty_k2_regs } } },
+ {"pswrd2", pswrd2_int_attn_desc, pswrd2_prty_attn_desc, {
+ {1, 2,
+
+ pswrd2_int_bb_a0_regs,
+
+ pswrd2_prty_bb_a0_regs},
+ {1, 3,
+
+ pswrd2_int_bb_b0_regs,
+
+ pswrd2_prty_bb_b0_regs},
+ {1, 3,
+
+ pswrd2_int_k2_regs,
+
+ pswrd2_prty_k2_regs } } },
+ {"pswwr", pswwr_int_attn_desc, pswwr_prty_attn_desc, {
+ {1, 0,
+
+ pswwr_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswwr_int_bb_b0_regs,
+
+ pswwr_prty_bb_b0_regs},
+ {1, 1,
+
+ pswwr_int_k2_regs,
+
+ pswwr_prty_k2_regs } } },
+ {"pswwr2", pswwr2_int_attn_desc, pswwr2_prty_attn_desc, {
+ {1, 4,
+
+ pswwr2_int_bb_a0_regs,
+
+ pswwr2_prty_bb_a0_regs},
+ {1, 5,
+
+ pswwr2_int_bb_b0_regs,
+
+ pswwr2_prty_bb_b0_regs},
+ {1, 5,
+
+ pswwr2_int_k2_regs,
+
+ pswwr2_prty_k2_regs } } },
+ {"pswrq", pswrq_int_attn_desc, pswrq_prty_attn_desc, {
+ {1, 0,
+
+ pswrq_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswrq_int_bb_b0_regs,
+
+ pswrq_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrq_int_k2_regs,
+
+ pswrq_prty_k2_regs } } },
+ {"pswrq2", pswrq2_int_attn_desc, pswrq2_prty_attn_desc, {
+ {1, 1,
+
+ pswrq2_int_bb_a0_regs,
+
+ pswrq2_prty_bb_a0_regs},
+ {1, 1,
+
+ pswrq2_int_bb_b0_regs,
+
+ pswrq2_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrq2_int_k2_regs,
+
+ pswrq2_prty_k2_regs } } },
+ {"pglcs", pglcs_int_attn_desc, OSAL_NULL, {
+ {1, 0, pglcs_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, pglcs_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, pglcs_int_k2_regs,
+ OSAL_NULL } } },
+ {"dmae", dmae_int_attn_desc, dmae_prty_attn_desc, {
+ {1, 1,
+ dmae_int_bb_a0_regs,
+
+ dmae_prty_bb_a0_regs},
+ {1, 1,
+ dmae_int_bb_b0_regs,
+
+ dmae_prty_bb_b0_regs},
+ {1, 1,
+ dmae_int_k2_regs,
+
+ dmae_prty_k2_regs } } },
+ {"ptu", ptu_int_attn_desc, ptu_prty_attn_desc, {
+ {1, 1,
+ ptu_int_bb_a0_regs,
+ ptu_prty_bb_a0_regs},
+ {1, 1,
+ ptu_int_bb_b0_regs,
+ ptu_prty_bb_b0_regs},
+ {1, 1, ptu_int_k2_regs,
+ ptu_prty_k2_regs } } },
+ {"tcm", tcm_int_attn_desc, tcm_prty_attn_desc, {
+ {3, 2,
+ tcm_int_bb_a0_regs,
+ tcm_prty_bb_a0_regs},
+ {3, 2,
+ tcm_int_bb_b0_regs,
+ tcm_prty_bb_b0_regs},
+ {3, 2, tcm_int_k2_regs,
+ tcm_prty_k2_regs } } },
+ {"mcm", mcm_int_attn_desc, mcm_prty_attn_desc, {
+ {3, 2,
+ mcm_int_bb_a0_regs,
+ mcm_prty_bb_a0_regs},
+ {3, 2,
+ mcm_int_bb_b0_regs,
+ mcm_prty_bb_b0_regs},
+ {3, 2, mcm_int_k2_regs,
+ mcm_prty_k2_regs } } },
+ {"ucm", ucm_int_attn_desc, ucm_prty_attn_desc, {
+ {3, 2,
+ ucm_int_bb_a0_regs,
+ ucm_prty_bb_a0_regs},
+ {3, 2,
+ ucm_int_bb_b0_regs,
+ ucm_prty_bb_b0_regs},
+ {3, 2, ucm_int_k2_regs,
+ ucm_prty_k2_regs } } },
+ {"xcm", xcm_int_attn_desc, xcm_prty_attn_desc, {
+ {3, 2,
+ xcm_int_bb_a0_regs,
+ xcm_prty_bb_a0_regs},
+ {3, 2,
+ xcm_int_bb_b0_regs,
+ xcm_prty_bb_b0_regs},
+ {3, 2, xcm_int_k2_regs,
+ xcm_prty_k2_regs } } },
+ {"ycm", ycm_int_attn_desc, ycm_prty_attn_desc, {
+ {3, 2,
+ ycm_int_bb_a0_regs,
+ ycm_prty_bb_a0_regs},
+ {3, 2,
+ ycm_int_bb_b0_regs,
+ ycm_prty_bb_b0_regs},
+ {3, 2, ycm_int_k2_regs,
+ ycm_prty_k2_regs } } },
+ {"pcm", pcm_int_attn_desc, pcm_prty_attn_desc, {
+ {3, 1,
+ pcm_int_bb_a0_regs,
+ pcm_prty_bb_a0_regs},
+ {3, 1,
+ pcm_int_bb_b0_regs,
+ pcm_prty_bb_b0_regs},
+ {3, 1, pcm_int_k2_regs,
+ pcm_prty_k2_regs } } },
+ {"qm", qm_int_attn_desc, qm_prty_attn_desc, {
+ {1, 4, qm_int_bb_a0_regs,
+ qm_prty_bb_a0_regs},
+ {1, 4, qm_int_bb_b0_regs,
+ qm_prty_bb_b0_regs},
+ {1, 4, qm_int_k2_regs,
+ qm_prty_k2_regs } } },
+ {"tm", tm_int_attn_desc, tm_prty_attn_desc, {
+ {2, 1, tm_int_bb_a0_regs,
+ tm_prty_bb_a0_regs},
+ {2, 1, tm_int_bb_b0_regs,
+ tm_prty_bb_b0_regs},
+ {2, 1, tm_int_k2_regs,
+ tm_prty_k2_regs } } },
+ {"dorq", dorq_int_attn_desc, dorq_prty_attn_desc, {
+ {1, 1,
+ dorq_int_bb_a0_regs,
+
+ dorq_prty_bb_a0_regs},
+ {1, 2,
+ dorq_int_bb_b0_regs,
+
+ dorq_prty_bb_b0_regs},
+ {1, 2,
+ dorq_int_k2_regs,
+
+ dorq_prty_k2_regs } } },
+ {"brb", brb_int_attn_desc, brb_prty_attn_desc, {
+ {12, 2,
+ brb_int_bb_a0_regs,
+ brb_prty_bb_a0_regs},
+ {12, 3,
+ brb_int_bb_b0_regs,
+ brb_prty_bb_b0_regs},
+ {12, 3, brb_int_k2_regs,
+ brb_prty_k2_regs } } },
+ {"src", src_int_attn_desc, OSAL_NULL, {
+ {1, 0, src_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, src_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, src_int_k2_regs,
+ OSAL_NULL } } },
+ {"prs", prs_int_attn_desc, prs_prty_attn_desc, {
+ {1, 3,
+ prs_int_bb_a0_regs,
+ prs_prty_bb_a0_regs},
+ {1, 3,
+ prs_int_bb_b0_regs,
+ prs_prty_bb_b0_regs},
+ {1, 3, prs_int_k2_regs,
+ prs_prty_k2_regs } } },
+ {"tsdm", tsdm_int_attn_desc, tsdm_prty_attn_desc, {
+ {1, 1,
+ tsdm_int_bb_a0_regs,
+
+ tsdm_prty_bb_a0_regs},
+ {1, 1,
+ tsdm_int_bb_b0_regs,
+
+ tsdm_prty_bb_b0_regs},
+ {1, 1,
+ tsdm_int_k2_regs,
+
+ tsdm_prty_k2_regs } } },
+ {"msdm", msdm_int_attn_desc, msdm_prty_attn_desc, {
+ {1, 1,
+ msdm_int_bb_a0_regs,
+
+ msdm_prty_bb_a0_regs},
+ {1, 1,
+ msdm_int_bb_b0_regs,
+
+ msdm_prty_bb_b0_regs},
+ {1, 1,
+ msdm_int_k2_regs,
+
+ msdm_prty_k2_regs } } },
+ {"usdm", usdm_int_attn_desc, usdm_prty_attn_desc, {
+ {1, 1,
+ usdm_int_bb_a0_regs,
+
+ usdm_prty_bb_a0_regs},
+ {1, 1,
+ usdm_int_bb_b0_regs,
+
+ usdm_prty_bb_b0_regs},
+ {1, 1,
+ usdm_int_k2_regs,
+
+ usdm_prty_k2_regs } } },
+ {"xsdm", xsdm_int_attn_desc, xsdm_prty_attn_desc, {
+ {1, 1,
+ xsdm_int_bb_a0_regs,
+
+ xsdm_prty_bb_a0_regs},
+ {1, 1,
+ xsdm_int_bb_b0_regs,
+
+ xsdm_prty_bb_b0_regs},
+ {1, 1,
+ xsdm_int_k2_regs,
+
+ xsdm_prty_k2_regs } } },
+ {"ysdm", ysdm_int_attn_desc, ysdm_prty_attn_desc, {
+ {1, 1,
+ ysdm_int_bb_a0_regs,
+
+ ysdm_prty_bb_a0_regs},
+ {1, 1,
+ ysdm_int_bb_b0_regs,
+
+ ysdm_prty_bb_b0_regs},
+ {1, 1,
+ ysdm_int_k2_regs,
+
+ ysdm_prty_k2_regs } } },
+ {"psdm", psdm_int_attn_desc, psdm_prty_attn_desc, {
+ {1, 1,
+ psdm_int_bb_a0_regs,
+
+ psdm_prty_bb_a0_regs},
+ {1, 1,
+ psdm_int_bb_b0_regs,
+
+ psdm_prty_bb_b0_regs},
+ {1, 1,
+ psdm_int_k2_regs,
+
+ psdm_prty_k2_regs } } },
+ {"tsem", tsem_int_attn_desc, tsem_prty_attn_desc, {
+ {3, 3,
+ tsem_int_bb_a0_regs,
+
+ tsem_prty_bb_a0_regs},
+ {3, 3,
+ tsem_int_bb_b0_regs,
+
+ tsem_prty_bb_b0_regs},
+ {3, 4,
+ tsem_int_k2_regs,
+
+ tsem_prty_k2_regs } } },
+ {"msem", msem_int_attn_desc, msem_prty_attn_desc, {
+ {3, 2,
+ msem_int_bb_a0_regs,
+
+ msem_prty_bb_a0_regs},
+ {3, 2,
+ msem_int_bb_b0_regs,
+
+ msem_prty_bb_b0_regs},
+ {3, 3,
+ msem_int_k2_regs,
+
+ msem_prty_k2_regs } } },
+ {"usem", usem_int_attn_desc, usem_prty_attn_desc, {
+ {3, 2,
+ usem_int_bb_a0_regs,
+
+ usem_prty_bb_a0_regs},
+ {3, 2,
+ usem_int_bb_b0_regs,
+
+ usem_prty_bb_b0_regs},
+ {3, 3,
+ usem_int_k2_regs,
+
+ usem_prty_k2_regs } } },
+ {"xsem", xsem_int_attn_desc, xsem_prty_attn_desc, {
+ {3, 2,
+ xsem_int_bb_a0_regs,
+
+ xsem_prty_bb_a0_regs},
+ {3, 2,
+ xsem_int_bb_b0_regs,
+
+ xsem_prty_bb_b0_regs},
+ {3, 3,
+ xsem_int_k2_regs,
+
+ xsem_prty_k2_regs } } },
+ {"ysem", ysem_int_attn_desc, ysem_prty_attn_desc, {
+ {3, 2,
+ ysem_int_bb_a0_regs,
+
+ ysem_prty_bb_a0_regs},
+ {3, 2,
+ ysem_int_bb_b0_regs,
+
+ ysem_prty_bb_b0_regs},
+ {3, 3,
+ ysem_int_k2_regs,
+
+ ysem_prty_k2_regs } } },
+ {"psem", psem_int_attn_desc, psem_prty_attn_desc, {
+ {3, 3,
+ psem_int_bb_a0_regs,
+
+ psem_prty_bb_a0_regs},
+ {3, 3,
+ psem_int_bb_b0_regs,
+
+ psem_prty_bb_b0_regs},
+ {3, 4,
+ psem_int_k2_regs,
+
+ psem_prty_k2_regs } } },
+ {"rss", rss_int_attn_desc, rss_prty_attn_desc, {
+ {1, 1,
+ rss_int_bb_a0_regs,
+ rss_prty_bb_a0_regs},
+ {1, 1,
+ rss_int_bb_b0_regs,
+ rss_prty_bb_b0_regs},
+ {1, 1, rss_int_k2_regs,
+ rss_prty_k2_regs } } },
+ {"tmld", tmld_int_attn_desc, tmld_prty_attn_desc, {
+ {1, 1,
+ tmld_int_bb_a0_regs,
+
+ tmld_prty_bb_a0_regs},
+ {1, 1,
+ tmld_int_bb_b0_regs,
+
+ tmld_prty_bb_b0_regs},
+ {1, 1,
+ tmld_int_k2_regs,
+
+ tmld_prty_k2_regs } } },
+ {"muld", muld_int_attn_desc, muld_prty_attn_desc, {
+ {1, 1,
+ muld_int_bb_a0_regs,
+
+ muld_prty_bb_a0_regs},
+ {1, 1,
+ muld_int_bb_b0_regs,
+
+ muld_prty_bb_b0_regs},
+ {1, 1,
+ muld_int_k2_regs,
+
+ muld_prty_k2_regs } } },
+ {"yuld", yuld_int_attn_desc, yuld_prty_attn_desc, {
+ {1, 1,
+ yuld_int_bb_a0_regs,
+
+ yuld_prty_bb_a0_regs},
+ {1, 1,
+ yuld_int_bb_b0_regs,
+
+ yuld_prty_bb_b0_regs},
+ {1, 1,
+ yuld_int_k2_regs,
+
+ yuld_prty_k2_regs } } },
+ {"xyld", xyld_int_attn_desc, xyld_prty_attn_desc, {
+ {1, 1,
+ xyld_int_bb_a0_regs,
+
+ xyld_prty_bb_a0_regs},
+ {1, 1,
+ xyld_int_bb_b0_regs,
+
+ xyld_prty_bb_b0_regs},
+ {1, 1,
+ xyld_int_k2_regs,
+
+ xyld_prty_k2_regs } } },
+ {"prm", prm_int_attn_desc, prm_prty_attn_desc, {
+ {1, 1,
+ prm_int_bb_a0_regs,
+ prm_prty_bb_a0_regs},
+ {1, 2,
+ prm_int_bb_b0_regs,
+ prm_prty_bb_b0_regs},
+ {1, 2, prm_int_k2_regs,
+ prm_prty_k2_regs } } },
+ {"pbf_pb1", pbf_pb1_int_attn_desc, pbf_pb1_prty_attn_desc, {
+ {1, 0,
+
+ pbf_pb1_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pbf_pb1_int_bb_b0_regs,
+
+ pbf_pb1_prty_bb_b0_regs},
+ {1, 1,
+
+ pbf_pb1_int_k2_regs,
+
+ pbf_pb1_prty_k2_regs } } },
+ {"pbf_pb2", pbf_pb2_int_attn_desc, pbf_pb2_prty_attn_desc, {
+ {1, 0,
+
+ pbf_pb2_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pbf_pb2_int_bb_b0_regs,
+
+ pbf_pb2_prty_bb_b0_regs},
+ {1, 1,
+
+ pbf_pb2_int_k2_regs,
+
+ pbf_pb2_prty_k2_regs } } },
+ {"rpb", rpb_int_attn_desc, rpb_prty_attn_desc, {
+ {1, 0,
+ rpb_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ rpb_int_bb_b0_regs,
+ rpb_prty_bb_b0_regs},
+ {1, 1, rpb_int_k2_regs,
+ rpb_prty_k2_regs } } },
+ {"btb", btb_int_attn_desc, btb_prty_attn_desc, {
+ {11, 1,
+ btb_int_bb_a0_regs,
+ btb_prty_bb_a0_regs},
+ {11, 2,
+ btb_int_bb_b0_regs,
+ btb_prty_bb_b0_regs},
+ {11, 2, btb_int_k2_regs,
+ btb_prty_k2_regs } } },
+ {"pbf", pbf_int_attn_desc, pbf_prty_attn_desc, {
+ {1, 2,
+ pbf_int_bb_a0_regs,
+ pbf_prty_bb_a0_regs},
+ {1, 3,
+ pbf_int_bb_b0_regs,
+ pbf_prty_bb_b0_regs},
+ {1, 3, pbf_int_k2_regs,
+ pbf_prty_k2_regs } } },
+ {"rdif", rdif_int_attn_desc, rdif_prty_attn_desc, {
+ {1, 0,
+ rdif_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ rdif_int_bb_b0_regs,
+
+ rdif_prty_bb_b0_regs},
+ {1, 1,
+ rdif_int_k2_regs,
+
+ rdif_prty_k2_regs } } },
+ {"tdif", tdif_int_attn_desc, tdif_prty_attn_desc, {
+ {1, 1,
+ tdif_int_bb_a0_regs,
+
+ tdif_prty_bb_a0_regs},
+ {1, 2,
+ tdif_int_bb_b0_regs,
+
+ tdif_prty_bb_b0_regs},
+ {1, 2,
+ tdif_int_k2_regs,
+
+ tdif_prty_k2_regs } } },
+ {"cdu", cdu_int_attn_desc, cdu_prty_attn_desc, {
+ {1, 1,
+ cdu_int_bb_a0_regs,
+ cdu_prty_bb_a0_regs},
+ {1, 1,
+ cdu_int_bb_b0_regs,
+ cdu_prty_bb_b0_regs},
+ {1, 1, cdu_int_k2_regs,
+ cdu_prty_k2_regs } } },
+ {"ccfc", ccfc_int_attn_desc, ccfc_prty_attn_desc, {
+ {1, 2,
+ ccfc_int_bb_a0_regs,
+
+ ccfc_prty_bb_a0_regs},
+ {1, 2,
+ ccfc_int_bb_b0_regs,
+
+ ccfc_prty_bb_b0_regs},
+ {1, 2,
+ ccfc_int_k2_regs,
+
+ ccfc_prty_k2_regs } } },
+ {"tcfc", tcfc_int_attn_desc, tcfc_prty_attn_desc, {
+ {1, 2,
+ tcfc_int_bb_a0_regs,
+
+ tcfc_prty_bb_a0_regs},
+ {1, 2,
+ tcfc_int_bb_b0_regs,
+
+ tcfc_prty_bb_b0_regs},
+ {1, 2,
+ tcfc_int_k2_regs,
+
+ tcfc_prty_k2_regs } } },
+ {"igu", igu_int_attn_desc, igu_prty_attn_desc, {
+ {1, 3,
+ igu_int_bb_a0_regs,
+ igu_prty_bb_a0_regs},
+ {1, 3,
+ igu_int_bb_b0_regs,
+ igu_prty_bb_b0_regs},
+ {1, 2, igu_int_k2_regs,
+ igu_prty_k2_regs } } },
+ {"cau", cau_int_attn_desc, cau_prty_attn_desc, {
+ {1, 1,
+ cau_int_bb_a0_regs,
+ cau_prty_bb_a0_regs},
+ {1, 1,
+ cau_int_bb_b0_regs,
+ cau_prty_bb_b0_regs},
+ {1, 1, cau_int_k2_regs,
+ cau_prty_k2_regs } } },
+ {"umac", umac_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, umac_int_k2_regs,
+ OSAL_NULL } } },
+ {"xmac", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"dbg", dbg_int_attn_desc, dbg_prty_attn_desc, {
+ {1, 1,
+ dbg_int_bb_a0_regs,
+ dbg_prty_bb_a0_regs},
+ {1, 1,
+ dbg_int_bb_b0_regs,
+ dbg_prty_bb_b0_regs},
+ {1, 1, dbg_int_k2_regs,
+ dbg_prty_k2_regs } } },
+ {"nig", nig_int_attn_desc, nig_prty_attn_desc, {
+ {6, 4,
+ nig_int_bb_a0_regs,
+ nig_prty_bb_a0_regs},
+ {6, 5,
+ nig_int_bb_b0_regs,
+ nig_prty_bb_b0_regs},
+ {10, 5, nig_int_k2_regs,
+ nig_prty_k2_regs } } },
+ {"wol", wol_int_attn_desc, wol_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {1, 1, wol_int_k2_regs,
+ wol_prty_k2_regs } } },
+ {"bmbn", bmbn_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, bmbn_int_k2_regs,
+ OSAL_NULL } } },
+ {"ipc", ipc_int_attn_desc, ipc_prty_attn_desc, {
+ {1, 1,
+ ipc_int_bb_a0_regs,
+ ipc_prty_bb_a0_regs},
+ {1, 1,
+ ipc_int_bb_b0_regs,
+ ipc_prty_bb_b0_regs},
+ {1, 0, ipc_int_k2_regs,
+ OSAL_NULL } } },
+ {"nwm", nwm_int_attn_desc, nwm_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {1, 3, nwm_int_k2_regs,
+ nwm_prty_k2_regs } } },
+ {"nws", nws_int_attn_desc, nws_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {4, 1, nws_int_k2_regs,
+ nws_prty_k2_regs } } },
+ {"ms", ms_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, ms_int_k2_regs,
+ OSAL_NULL } } },
+ {"phy_pcie", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"misc_aeu", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"bar0_map", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+};
+
+#define NUM_INT_REGS 423
+#define NUM_PRTY_REGS 378
+
+#endif /* __PREVENT_INT_ATTN__ */
+
+#endif /* __ATTN_VALUES_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_chain.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_chain.h
new file mode 100644
index 00000000..ba272a91
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_chain.h
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CHAIN_H__
+#define __ECORE_CHAIN_H__
+
+#include <assert.h> /* @DPDK */
+
+#include "common_hsi.h"
+#include "ecore_utils.h"
+
+enum ecore_chain_mode {
+ /* Each Page contains a next pointer at its end */
+ ECORE_CHAIN_MODE_NEXT_PTR,
+
+ /* Chain is a single page (next ptr) is unrequired */
+ ECORE_CHAIN_MODE_SINGLE,
+
+ /* Page pointers are located in a side list */
+ ECORE_CHAIN_MODE_PBL,
+};
+
+enum ecore_chain_use_mode {
+ ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+};
+
+enum ecore_chain_cnt_type {
+ /* The chain's size/prod/cons are kept in 16-bit variables */
+ ECORE_CHAIN_CNT_TYPE_U16,
+
+ /* The chain's size/prod/cons are kept in 32-bit variables */
+ ECORE_CHAIN_CNT_TYPE_U32,
+};
+
+struct ecore_chain_next {
+ struct regpair next_phys;
+ void *next_virt;
+};
+
+struct ecore_chain_pbl_u16 {
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct ecore_chain_pbl_u32 {
+ u32 prod_page_idx;
+ u32 cons_page_idx;
+};
+
+struct ecore_chain_ext_pbl {
+ dma_addr_t p_pbl_phys;
+ void *p_pbl_virt;
+};
+
+struct ecore_chain_u16 {
+ /* Cyclic index of next element to produce/consme */
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct ecore_chain_u32 {
+ /* Cyclic index of next element to produce/consme */
+ u32 prod_idx;
+ u32 cons_idx;
+};
+
+struct ecore_chain {
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
+ /* Point to next element to produce/consume */
+ void *p_prod_elem;
+ void *p_cons_elem;
+
+ /* Fastpath portions of the PBL [if exists] */
+
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ union {
+ struct ecore_chain_pbl_u16 u16;
+ struct ecore_chain_pbl_u32 u32;
+ } c;
+ } pbl;
+
+ union {
+ struct ecore_chain_u16 chain16;
+ struct ecore_chain_u32 chain32;
+ } u;
+
+ /* Capacity counts only usable elements */
+ u32 capacity;
+ u32 page_cnt;
+
+ /* A u8 would suffice for mode, but it would save as a lot of headaches
+ * on castings & defaults.
+ */
+ enum ecore_chain_mode mode;
+
+ /* Elements information for fast calculations */
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
+
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
+ */
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
+ u32 size;
+
+ u8 intended_use;
+
+ /* TBD - do we really need this? Couldn't find usage for it */
+ bool b_external_pbl;
+
+ void *dp_ctx;
+};
+
+#define ECORE_CHAIN_PBL_ENTRY_SIZE (8)
+#define ECORE_CHAIN_PAGE_SIZE (0x1000)
+#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) / \
+ (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((u32)(ELEMS_PER_PAGE(elem_size) - \
+ UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
+
+/* Accessors */
+static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u16(p_chain));
+ return p_chain->u.chain16.prod_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u32(p_chain));
+ return p_chain->u.chain32.prod_idx;
+}
+
+static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u16(p_chain));
+ return p_chain->u.chain16.cons_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u32(p_chain));
+ return p_chain->u.chain32.cons_idx;
+}
+
+/* FIXME:
+ * Should create OSALs for the below definitions.
+ * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
+ * kernel versions that lack them.
+ */
+#define ECORE_U16_MAX ((u16)~0U)
+#define ECORE_U32_MAX ((u32)~0U)
+
+static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
+{
+ u16 used;
+
+ OSAL_ASSERT(is_chain_u16(p_chain));
+
+ used = (u16)(((u32)ECORE_U16_MAX + 1 +
+ (u32)(p_chain->u.chain16.prod_idx)) -
+ (u32)p_chain->u.chain16.cons_idx);
+ if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+
+ return (u16)(p_chain->capacity - used);
+}
+
+static OSAL_INLINE u32
+ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
+{
+ u32 used;
+
+ OSAL_ASSERT(is_chain_u32(p_chain));
+
+ used = (u32)(((u64)ECORE_U32_MAX + 1 +
+ (u64)(p_chain->u.chain32.prod_idx)) -
+ (u64)p_chain->u.chain32.cons_idx);
+ if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+
+ return p_chain->capacity - used;
+}
+
+static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ return (ecore_chain_get_elem_left(p_chain) ==
+ p_chain->capacity);
+ else
+ return (ecore_chain_get_elem_left_u32(p_chain) ==
+ p_chain->capacity);
+}
+
+static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ return (ecore_chain_get_elem_left(p_chain) == 0);
+ else
+ return (ecore_chain_get_elem_left_u32(p_chain) == 0);
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->elem_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->usable_per_page;
+}
+
+static OSAL_INLINE
+u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->elem_unusable;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
+{
+ return p_chain->size;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
+{
+ return p_chain->page_cnt;
+}
+
+static OSAL_INLINE
+dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
+{
+ return p_chain->pbl_sp.p_phys_table;
+}
+
+/**
+ * @brief ecore_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static OSAL_INLINE void
+ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
+ void *idx_to_inc, void *page_to_inc)
+{
+ struct ecore_chain_next *p_next = OSAL_NULL;
+ u32 page_index = 0;
+
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ p_next = (struct ecore_chain_next *)(*p_next_elem);
+ *p_next_elem = p_next->next_virt;
+ if (is_chain_u16(p_chain))
+ *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
+ else
+ *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ *p_next_elem = p_chain->p_virt_addr;
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ if (is_chain_u16(p_chain)) {
+ if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+ *(u16 *)page_to_inc = 0;
+ page_index = *(u16 *)page_to_inc;
+ } else {
+ if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+ *(u32 *)page_to_inc = 0;
+ page_index = *(u32 *)page_to_inc;
+ }
+ *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ }
+}
+
+#define is_unusable_idx(p, idx) \
+ (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx) \
+ (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->u.chain16.idx + 1) & \
+ (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx_u32(p, idx) \
+ ((((p)->u.chain32.idx + 1) & \
+ (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define test_and_skip(p, idx) \
+ do { \
+ if (is_chain_u16(p)) { \
+ if (is_unusable_idx(p, idx)) \
+ (p)->u.chain16.idx += \
+ (p)->elem_unusable; \
+ } else { \
+ if (is_unusable_idx_u32(p, idx)) \
+ (p)->u.chain32.idx += \
+ (p)->elem_unusable; \
+ } \
+ } while (0)
+
+/**
+ * @brief ecore_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static OSAL_INLINE
+void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx += (u16)num;
+ else
+ p_chain->u.chain32.cons_idx += num;
+ test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx++;
+ else
+ p_chain->u.chain32.cons_idx++;
+ test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
+{
+ void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain16.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain16.prod_idx++;
+ } else {
+ if ((p_chain->u.chain32.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain32.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain32.prod_idx++;
+ }
+
+ p_ret = p_chain->p_prod_elem;
+ p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+ p_chain->elem_size);
+
+ return p_ret;
+}
+
+/**
+ * @brief ecore_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return number of unusable BDs
+ */
+static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
+{
+ return p_chain->capacity;
+}
+
+/**
+ * @brief ecore_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE
+void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
+{
+ test_and_skip(p_chain, prod_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx++;
+ else
+ p_chain->u.chain32.prod_idx++;
+}
+
+/**
+ * @brief ecore_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
+{
+ void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain16.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain16.cons_idx++;
+ } else {
+ if ((p_chain->u.chain32.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain32.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain32.cons_idx++;
+ }
+
+ p_ret = p_chain->p_cons_elem;
+ p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+ p_chain->elem_size);
+
+ return p_ret;
+}
+
+/**
+ * @brief ecore_chain_reset -
+ *
+ * Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
+{
+ u32 i;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->u.chain16.prod_idx = 0;
+ p_chain->u.chain16.cons_idx = 0;
+ } else {
+ p_chain->u.chain32.prod_idx = 0;
+ p_chain->u.chain32.cons_idx = 0;
+ }
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ * indices, to avoid unnecessary page advancing on the first
+ * call to ecore_chain_produce/consume. Instead, the indices
+ * will be advanced to page_cnt and then will be wrapped to 0.
+ */
+ u32 reset_val = p_chain->page_cnt - 1;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
+ } else {
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
+ }
+ }
+
+ switch (p_chain->intended_use) {
+ case ECORE_CHAIN_USE_TO_CONSUME:
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
+ ecore_chain_recycle_consumed(p_chain);
+ break;
+
+ case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case ECORE_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
+ }
+}
+
+/**
+ * @brief ecore_chain_init_params -
+ *
+ * Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param intended_use
+ * @param mode
+ * @param cnt_type
+ * @param dp_ctx
+ */
+static OSAL_INLINE void
+ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type, void *dp_ctx)
+{
+ /* chain fixed parameters */
+ p_chain->p_virt_addr = OSAL_NULL;
+ p_chain->p_phys_addr = 0;
+ p_chain->elem_size = elem_size;
+ p_chain->intended_use = (u8)intended_use;
+ p_chain->mode = mode;
+ p_chain->cnt_type = (u8)cnt_type;
+
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+ p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->next_page_mask = (p_chain->usable_per_page &
+ p_chain->elem_per_page_mask);
+
+ p_chain->page_cnt = page_cnt;
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
+ p_chain->b_external_pbl = false;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = OSAL_NULL;
+ p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+
+ p_chain->dp_ctx = dp_ctx;
+}
+
+/**
+ * @brief ecore_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
+ * @param p_chain
+ * @param p_virt_addr virtual address of allocated buffer's beginning
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr)
+{
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+}
+
+/**
+ * @brief ecore_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl pointer to a pre allocated side table which will hold
+ * virtual page addresses.
+ * @param p_phys_pbl pointer to a pre-allocated side table which will hold
+ * physical page addresses.
+ * @param pp_virt_addr_tbl
+ * pointer to a pre-allocated side table which will hold
+ * the virtual addresses of the chain pages.
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
+ void *p_virt_pbl,
+ dma_addr_t p_phys_pbl,
+ void **pp_virt_addr_tbl)
+{
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
+ p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+}
+
+/**
+ * @brief ecore_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr virtual address of a chain page of which the next
+ * pointer element is initialized
+ * @param p_virt_next virtual address of the next chain page
+ * @param p_phys_next physical address of the next chain page
+ *
+ */
+static OSAL_INLINE void
+ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
+ void *p_virt_next, dma_addr_t p_phys_next)
+{
+ struct ecore_chain_next *p_next;
+ u32 size;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
+
+ DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+ p_next->next_virt = p_virt_next;
+}
+
+/**
+ * @brief ecore_chain_get_last_elem -
+ *
+ * Returns a pointer to the last element of the chain
+ *
+ * @param p_chain
+ *
+ * @return void*
+ */
+static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
+{
+ struct ecore_chain_next *p_next = OSAL_NULL;
+ void *p_virt_addr = OSAL_NULL;
+ u32 size, last_page_idx;
+
+ if (!p_chain->p_virt_addr)
+ goto out;
+
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_virt_addr = p_chain->p_virt_addr;
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
+ while (p_next->next_virt != p_chain->p_virt_addr) {
+ p_virt_addr = p_next->next_virt;
+ p_next =
+ (struct ecore_chain_next *)((u8 *)p_virt_addr +
+ size);
+ }
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ p_virt_addr = p_chain->p_virt_addr;
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ last_page_idx = p_chain->page_cnt - 1;
+ p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ break;
+ }
+ /* p_virt_addr points at this stage to the last page of the chain */
+ size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+ p_virt_addr = ((u8 *)p_virt_addr + size);
+out:
+ return p_virt_addr;
+}
+
+/**
+ * @brief ecore_chain_set_prod - sets the prod to the given value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
+ u32 prod_idx, void *p_prod_elem)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx = (u16)prod_idx;
+ else
+ p_chain->u.chain32.prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief ecore_chain_pbl_zero_mem - set chain memory to 0
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
+{
+ u32 i, page_cnt;
+
+ if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
+ return;
+
+ page_cnt = ecore_chain_get_page_cnt(p_chain);
+
+ for (i = 0; i < page_cnt; i++)
+ OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
+ ECORE_CHAIN_PAGE_SIZE);
+}
+
+int ecore_chain_print(struct ecore_chain *p_chain, char *buffer,
+ u32 buffer_size, u32 *element_indx, u32 stop_indx,
+ bool print_metadata,
+ int (*func_ptr_print_element)(struct ecore_chain *p_chain,
+ void *p_element,
+ char *buffer),
+ int (*func_ptr_print_metadata)(struct ecore_chain
+ *p_chain,
+ char *buffer));
+
+#endif /* __ECORE_CHAIN_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.c
new file mode 100644
index 00000000..688118bb
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.c
@@ -0,0 +1,2253 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "common_hsi.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_rt_defs.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_init_ops.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_hw.h"
+#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
+#include "ecore_mcp.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN (1 << TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
+/* ILT constants */
+/* If for some reason, HW P size is modified to be less than 32K,
+ * special handling needs to be made for CDU initialization
+ */
+#define ILT_DEFAULT_HW_P_SIZE 3
+
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+};
+
+/* TYPE-0 task context - iSCSI, FCOE */
+union type0_task_context {
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+};
+
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+#define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct ecore_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct ecore_conn_type_cfg {
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct ecore_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_cnt;
+};
+
+struct ecore_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct ecore_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ osal_size_t size;
+};
+
+#define MAP_WORD_SIZE sizeof(unsigned long)
+#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
+
+struct ecore_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct ecore_cxt_mngr {
+ /* Per protocl configuration */
+ struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
+ /* Acquired CIDs */
+ struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+ /* TBD - do we want this allocated to reserve space? */
+ struct ecore_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
+
+ /* ILT shadow table */
+ struct ecore_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ osal_mutex_t mutex;
+
+ /* SRC T2 */
+ struct ecore_dma_mem *t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
+
+ /* The infrastructure originally was very generic and context/task
+ * oriented - per connection-type we would set how many of those
+ * are needed, and later when determining how much memory we're
+ * needing for a given block we'd iterate over all the relevant
+ * connection-types.
+ * But since then we've had some additional resources, some of which
+ * require memory which is indepent of the general context/task
+ * scheme. We add those here explicitly per-feature.
+ */
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ /* TODO - VF arfs filters ? */
+};
+
+/* check if resources/configuration is required according to protocol type */
+static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return type == PROTOCOLID_TOE;
+}
+
+static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_TOE;
+}
+
+static bool tm_tid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_FCOE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct ecore_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_cdu_iids *iids)
+{
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct ecore_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_mngr *p_mngr,
+ struct ecore_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(p_hwfn, i))
+ continue;
+
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
+}
+
+/* counts the iids for the Timers block configuration */
+struct ecore_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_tm_iids *iids)
+{
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ u32 i, j;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
+ iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
+ }
+
+ if (tm_tid_proto(i)) {
+ struct ecore_tid_seg *segs = p_cfg->tid_seg;
+
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->pf_tids[j] += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+ }
+
+ iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
+}
+
+static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_iids *iids)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ }
+ return OSAL_NULL;
+}
+
+/* set the iids (cid/tid) count per protocol */
+static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count, u32 vf_cid_cnt)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static OSAL_INLINE void
+ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
+}
+
+/* the *p_line parameter must be either 0 for the first invocation or the
+ * value returned in the previous invocation.
+ */
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 start_line,
+ u32 total_size, u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify that it's called once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
+ " [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val, p_cli->last.val,
+ p_blk->total_size, p_blk->real_size_in_page,
+ p_blk->start_line);
+}
+
+static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ /* TBD MK: ILT code should be simplified once PROTO enum is changed */
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32)CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ struct ecore_cdu_iids cdu_iids;
+ struct ecore_src_iids src_iids;
+ struct ecore_qm_iids qm_iids;
+ struct ecore_tm_iids tm_iids;
+ struct ecore_tid_seg *p_seg;
+
+ OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
+ OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+
+ curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC,CDUC and QM clients */
+ ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
+ /* CDUT PF */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segment behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
+ " num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* SRC */
+ p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = OSAL_MAX_T(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
+
+ p_blk = &p_cli->pf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = &p_cli->pf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = &p_cli->vf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = ecore_cxt_get_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, ECORE_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ if (!p_mngr->t2)
+ return;
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++)
+ if (p_mngr->t2[i].p_virt)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_mngr->t2[i].p_virt,
+ p_mngr->t2[i].p_phys,
+ p_mngr->t2[i].size);
+
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+}
+
+static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct ecore_ilt_client_cfg *p_src;
+ struct ecore_src_iids src_iids;
+ struct ecore_dma_mem *p_t2;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return ECORE_SUCCESS;
+
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ p_mngr->t2_num_pages *
+ sizeof(struct ecore_dma_mem));
+ if (!p_mngr->t2) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+ rc = ECORE_NOMEM;
+ goto t2_fail;
+ }
+
+ /* allocate t2 pages */
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 size = OSAL_MIN_T(u32, total_size, psz);
+ void **p_virt = &p_mngr->t2[i].p_virt;
+
+ *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_mngr->t2[i].p_phys, size);
+ if (!p_mngr->t2[i].p_virt) {
+ rc = ECORE_NOMEM;
+ goto t2_fail;
+ }
+ OSAL_MEM_ZERO(*p_virt, size);
+ p_mngr->t2[i].size = size;
+ total_size -= size;
+ }
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
+
+ p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+ p_mngr->last_free = (u64)p_t2->p_phys +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
+ struct src_ent *entries = p_mngr->t2[i].p_virt;
+ u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = OSAL_CPU_TO_BE64(val);
+ }
+
+ if (i < p_mngr->t2_num_pages - 1)
+ val = (u64)p_mngr->t2[i + 1].p_phys;
+ else
+ val = 0;
+ entries[j].next = OSAL_CPU_TO_BE64(val);
+
+ conn_num -= ent_num;
+ }
+
+ return ECORE_SUCCESS;
+
+t2_fail:
+ ecore_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
+
+
+/* Total number of ILT lines used by this PF */
+static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+
+ return size;
+}
+
+static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_dma->p_virt,
+ p_dma->p_phys, p_dma->size);
+ p_dma->p_virt = OSAL_NULL;
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+}
+
+static enum _ecore_status_t
+ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client, u32 start_line_offset)
+{
+ struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
+ return ECORE_SUCCESS;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
+
+ if (!p_blk->total_size)
+ return ECORE_SUCCESS;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
+
+/* @DPDK */
+#define ILT_BLOCK_ALIGN_SIZE 0x1000
+ p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
+ &p_phys, size,
+ ILT_BLOCK_ALIGN_SIZE);
+ if (!p_virt)
+ return ECORE_NOMEM;
+ OSAL_MEM_ZERO(p_virt, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%lx"
+ " Virtual %p Size %d\n",
+ line, (unsigned long)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_ilt_client_cfg *clients = p_mngr->clients;
+ struct ecore_ilt_cli_blk *p_blk;
+ u32 size, i, j, k;
+ enum _ecore_status_t rc;
+
+ size = ecore_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ size * sizeof(struct ecore_dma_mem));
+
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate ilt shadow table\n");
+ rc = ECORE_NOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct ecore_dma_mem)));
+
+ for_each_ilt_valid_client(i, clients) {
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != ECORE_SUCCESS)
+ goto ilt_shadow_fail;
+ }
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
+ i, lines);
+ if (rc != ECORE_SUCCESS)
+ goto ilt_shadow_fail;
+ }
+ }
+ }
+
+ return ECORE_SUCCESS;
+
+ilt_shadow_fail:
+ ecore_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type, vf;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ OSAL_FREE(p_hwfn->p_dev,
+ p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
+ }
+}
+
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+ u32 cid_start, u32 cid_count,
+ struct ecore_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return ECORE_SUCCESS;
+
+ size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
+ p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (p_map->cid_map == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct ecore_cid_acquired_map *p_map;
+
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
+ goto cid_map_fail;
+
+ /* Handle VF maps */
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (ecore_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf,
+ p_map))
+ goto cid_map_fail;
+ }
+
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
+ }
+
+ return ECORE_SUCCESS;
+
+cid_map_fail:
+ ecore_cid_map_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *clients;
+ struct ecore_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_cxt_mngr'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
+
+ /* default ILT page size for all clients is 32K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* due to removal of ISCSI/FCoE files union type0_task_context
+ * task_type_size will be 0. So hardcoded for now.
+ */
+ p_mngr->task_type_size[0] = 512; /* @DPDK */
+ p_mngr->task_type_size[1] = 128; /* @DPDK */
+
+ if (p_hwfn->p_dev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ /* Initialize the dynamic ILT allocation mutex */
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+ OSAL_MUTEX_INIT(&p_mngr->mutex);
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc;
+
+ /* Allocate the ILT shadow table */
+ rc = ecore_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate the T2 table */
+ rc = ecore_cxt_src_t2_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = ecore_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return ECORE_SUCCESS;
+
+tables_alloc_fail:
+ ecore_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ ecore_cid_map_free(p_hwfn);
+ ecore_cxt_src_t2_free(p_hwfn);
+ ecore_ilt_shadow_free(p_hwfn);
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+}
+
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
+ int type;
+ u32 len;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
+
+ if (!p_cfg->cids_per_vf)
+ continue;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
+ }
+}
+
+/* HW initialization helper (per Block, per phase) */
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
+static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
+}
+
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_qm_iids iids;
+
+ OSAL_MEM_ZERO(&iids, sizeof(iids));
+ ecore_cxt_qm_iids(p_hwfn, &iids);
+
+ ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+ p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+ p_hwfn->first_on_engine,
+ iids.cids, iids.vf_cids, iids.tids,
+ qm_info->start_pq,
+ qm_info->num_pqs - qm_info->num_vf_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->start_vport,
+ qm_info->num_vports, qm_info->pf_wfq,
+ qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+ p_hwfn->qm_info.qm_vport_params);
+}
+
+/* CM PF */
+void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
+}
+
+/* DQ PF */
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *clients;
+ struct ecore_cxt_mngr *p_mngr;
+ struct ecore_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ ecore_ilt_bounds_init(p_hwfn);
+ ecore_ilt_vf_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ /* Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be OSAL_NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt != OSAL_NULL) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Setting RT[0x%08x] from"
+ " ILT[0x%08x] [Client is %d] to"
+ " Physical addr: 0x%lx\n",
+ rt_offst, line, i,
+ (unsigned long)(p_shdw[line].
+ p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+/* SRC (Searcher) PF */
+static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct ecore_src_iids src_iids;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ OSAL_LOG2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->last_free);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Configured SEARCHER for 0x%08x connections\n",
+ conn_num);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct ecore_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+ ecore_cxt_tm_iids(p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
+static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+ struct ecore_tid_seg *p_tid;
+
+ /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
+ if (!p_fcoe->cid_count)
+ return;
+
+ p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+ p_tid->count);
+}
+
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
+{
+ /* CDU configuration */
+ ecore_cdu_init_common(p_hwfn);
+}
+
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ ecore_qm_init_pf(p_hwfn);
+ ecore_cm_init_pf(p_hwfn);
+ ecore_dq_init_pf(p_hwfn);
+ ecore_cdu_init_pf(p_hwfn);
+ ecore_ilt_init_pf(p_hwfn);
+ ecore_src_init_pf(p_hwfn);
+ ecore_tm_init_pf(p_hwfn);
+ ecore_prs_init_pf(p_hwfn);
+}
+
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Determine the right map to take this CID from */
+ if (vfid == ECORE_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (p_map->cid_map == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
+ p_map->max_count);
+
+ if (rel_cid >= p_map->max_count) {
+ DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
+ type);
+ return ECORE_NORESOURCES;
+ }
+
+ OSAL_SET_BIT(rel_cid, p_map->cid_map);
+
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
+}
+
+static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid,
+ enum protocol_type *p_type,
+ struct ecore_cid_acquired_map **pp_map)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == ECORE_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
+
+ if (!((*pp_map)->cid_map))
+ continue;
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
+ break;
+ }
+ }
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
+ }
+
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, true,
+ "CID %d [vifd %02x] not acquired", cid, vfid);
+ goto fail;
+ }
+
+ return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = OSAL_NULL;
+ return false;
+}
+
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
+{
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn, true,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_map->start_cid;
+ OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+ _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
+}
+
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_info *p_info)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ ECORE_CXT_PF_CID,
+ &type, &p_map);
+
+ if (!b_acquired)
+ return ECORE_INVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return ECORE_INVAL;
+
+ p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
+}
+
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+{
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ {
+ struct ecore_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ /* TODO - we probably want to add VF number to the PF
+ * params;
+ * As of now, allocates 16 * 2 per-VF [to retain regular
+ * functionality].
+ */
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 32);
+ p_hwfn->p_cxt_mngr->arfs_count =
+ p_params->num_arfs_filters;
+ break;
+ }
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ struct ecore_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case ECORE_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ goto out0;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = ECORE_TIMEOUT;
+ goto out0;
+ }
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_phys,
+ p_blk->real_size_in_page);
+ if (!p_virt) {
+ rc = ECORE_NOMEM;
+ goto out1;
+ }
+ OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry,
+ ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
+ 0 /* no flags */);
+
+ if (elem_type == ECORE_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RoCE search */
+ ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ ecore_ptt_release(p_hwfn, p_ptt);
+out0:
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct ecore_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case ECORE_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ return ECORE_TIMEOUT;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ continue;
+
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ 0 /* no flags */);
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto)
+{
+ enum _ecore_status_t rc;
+ u32 cid;
+
+ /* Free Connection CXT */
+ rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
+ ecore_cxt_get_proto_cid_start(p_hwfn,
+ proto),
+ ecore_cxt_get_proto_cid_count(p_hwfn,
+ proto,
+ &cid));
+
+ if (rc)
+ return rc;
+
+ /* Free Task CXT */
+ rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
+ ecore_cxt_get_proto_tid_count(p_hwfn,
+ proto));
+ if (rc)
+ return rc;
+
+ /* Free TSDM CXT */
+ rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
+ ecore_cxt_get_srq_count(p_hwfn));
+
+ return rc;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.h
new file mode 100644
index 00000000..6ff823a5
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_CID_
+#define _ECORE_CID_
+
+#include "ecore_hsi_common.h"
+#include "ecore_proto_if.h"
+#include "ecore_cxt_api.h"
+
+/* Tasks segments definitions */
+#define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */
+#define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */
+#define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */
+
+enum ecore_cxt_elem_type {
+ ECORE_ELEM_CXT,
+ ECORE_ELEM_SRQ,
+ ECORE_ELEM_TASK
+};
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid);
+
+u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type);
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
+ * map
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
+ * path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+
+ /**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#define ECORE_CXT_PF_CID (0xff)
+
+/**
+ * @brief ecore_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ */
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+ * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_get_tid_mem_info - function checks if the
+ * page containing the iid in the ilt is already
+ * allocated, if it is not it allocates the page.
+ *
+ * @param p_hwfn
+ * @param elem_type
+ * @param iid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 iid);
+
+/**
+ * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
+ * associated with the protocol passed.
+ *
+ * @param p_hwfn
+ * @param proto
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto);
+
+#define ECORE_CTX_WORKING_MEM 0
+#define ECORE_CTX_FL_MEM 1
+
+#endif /* _ECORE_CID_ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt_api.h
new file mode 100644
index 00000000..6d87620d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_cxt_api.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CXT_API_H__
+#define __ECORE_CXT_API_H__
+
+struct ecore_hwfn;
+
+struct ecore_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+#define MAX_TID_BLOCKS 512
+struct ecore_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
+/**
+* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+*
+*
+* @param p_hwfn
+* @param p_info in/out
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_info *p_info);
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.c
new file mode 100644
index 00000000..4f1b0698
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.c
@@ -0,0 +1,1314 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dcbx.h"
+#include "ecore_cxt.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_iov_api.h"
+
+#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
+#define ECORE_ETH_TYPE_DEFAULT (0)
+
+#define ECORE_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
+static bool ecore_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+ u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+}
+
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
+}
+
+static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
+ u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (!p_hwfn->p_dcbx_info->iwarp_port)
+ return false;
+
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == p_hwfn->p_dcbx_info->iwarp_port));
+}
+
+static void
+ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
+ id = ecore_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d,"
+ " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ ecore_dcbx_app_update[i].name,
+ p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
+ p_data->arr[id].dscp_enable,
+ p_data->arr[id].dscp_val);
+ }
+}
+
+static void
+ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
+ struct ecore_hwfn *p_hwfn,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type,
+ enum ecore_pci_personality personality)
+{
+ struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
+
+ /* PF update ramrod data */
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+ p_data->arr[type].dscp_enable = dscp->enabled;
+ if (p_data->arr[type].dscp_enable) {
+ u8 i;
+
+ for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
+ if (prio == dscp->dscp_pri_map[i]) {
+ p_data->arr[type].dscp_val = i;
+ break;
+ }
+ }
+
+ if (enable && p_data->arr[type].dscp_enable)
+ p_data->arr[type].update = UPDATE_DCB_DSCP;
+ else if (enable)
+ p_data->arr[type].update = UPDATE_DCB;
+ else
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+
+ /* QM reconf data */
+ if (p_hwfn->hw_info.personality == personality)
+ p_hwfn->hw_info.offload_tc = tc;
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
+ struct ecore_hwfn *p_hwfn,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
+{
+ enum ecore_pci_personality personality;
+ enum dcbx_protocol_type id;
+ const char *name; /* @DPDK */
+ int i;
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
+ id = ecore_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = ecore_dcbx_app_update[i].personality;
+ name = ecore_dcbx_app_update[i].name;
+
+ ecore_dcbx_set_params(p_data, p_hwfn, enable,
+ prio, tc, type, personality);
+ }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority)
+{
+ u32 pri_mask, pri = ECORE_MAX_PFC_PRIORITIES;
+ u32 index = ECORE_MAX_PFC_PRIORITIES - 1;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Bitmap 1 corresponds to priority 0, return priority 0 */
+ if (pri_bitmap == 1) {
+ *priority = 0;
+ return rc;
+ }
+
+ /* Choose the highest priority */
+ while ((pri == ECORE_MAX_PFC_PRIORITIES) && index) {
+ pri_mask = 1 << index;
+ if (pri_bitmap & pri_mask)
+ pri = index;
+ index--;
+ }
+
+ if (pri < ECORE_MAX_PFC_PRIORITIES)
+ *priority = (u8)pri;
+ else
+ rc = ECORE_INVAL;
+
+ return rc;
+}
+
+static bool
+ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
+ u32 app_prio_bitmap, u16 id,
+ enum dcbx_protocol_type *type, bool ieee)
+{
+ if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_ERR(p_hwfn,
+ "No action required, App TLV id = 0x%x"
+ " app_prio_bitmap = 0x%x\n",
+ id, app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
+ int count, u8 dcbx_version)
+{
+ enum dcbx_protocol_type type;
+ u8 tc, priority_map;
+ bool enable, ieee;
+ u16 protocol_id;
+ u8 priority;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Num APP entries = %d pri_tc_tbl = 0x%x dcbx_version = %u\n",
+ count, pri_tc_tbl, dcbx_version);
+
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
+ protocol_id, priority_map);
+ rc = ecore_dcbx_get_app_priority(priority_map, &priority);
+ if (rc == ECORE_INVAL) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return ECORE_INVAL;
+ }
+
+ tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type,
+ ieee)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ enable = !(type == DCBX_PROTOCOL_ETH);
+
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
+ priority, tc, type);
+ }
+ }
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
+ priority, tc, type);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dcbx_results data = { 0 };
+ struct dcbx_app_priority_entry *p_tbl;
+ struct dcbx_ets_feature *p_ets;
+ struct ecore_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ u8 dcbx_version;
+ int num_entries;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_version = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_version);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = !!dcbx_version;
+
+ ecore_dcbx_dp_protocol(p_hwfn, &data);
+
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct ecore_dcbx_results));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_mib_meta_data *p_data,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num ="
+ " %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count ="
+ " %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = ECORE_IO;
+ }
+
+ return rc;
+}
+
+static void
+ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_app_prio *p_prio,
+ struct ecore_dcbx_results *p_results)
+{
+ u8 val;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Priorities: eth %d\n",
+ p_prio->eth);
+}
+
+static void
+ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct ecore_dcbx_params *p_params, bool ieee)
+{
+ struct ecore_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_ENABLED);
+ p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT :
+ ECORE_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee =
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
+ pri_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ ecore_dcbx_get_app_priority(pri_map, &entry->prio);
+ entry->proto_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type, ieee);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
+ u32 pfc, struct ecore_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
+}
+
+static void
+ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct ecore_dcbx_params *p_params)
+{
+ u32 bw_map[2], tsa_map[2], pri_map;
+ int i;
+
+ p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->ets_cbs = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ bw_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
+ pri_map = p_ets->pri_tc_tbl[0];
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = ECORE_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct ecore_dcbx_params *p_params,
+ bool ieee)
+{
+ ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
+ ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
+}
+
+static void
+ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params,
+ false);
+ params->remote.valid = true;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_operational_params *p_operational;
+ struct ecore_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
+ return ECORE_INVAL;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
+
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
+ p_operational->ieee);
+ ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
+ p_results);
+ err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+
+ return ECORE_SUCCESS;
+}
+
+static void
+ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_dscp_params *p_dscp;
+ struct dcb_dscp_map *p_dscp_map;
+ int i, j, entry;
+ u32 pri_map;
+
+ p_dscp = &params->dscp;
+ p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ p_dscp->enabled = ECORE_MFW_GET_FIELD(p_dscp_map->flags,
+ DCB_DSCP_ENABLE);
+ /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
+ * where each entry holds the 4bit priority map for 8 dscp entries.
+ */
+ for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
+ pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
+ entry, pri_map);
+ for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
+ p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
+ (j * 4)) & 0xf;
+ }
+}
+
+static void
+ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ OSAL_MEMCPY(params->lldp_local.local_chassis_id,
+ p_local->local_chassis_id,
+ OSAL_ARRAY_SIZE(p_local->local_chassis_id));
+ OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
+ OSAL_ARRAY_SIZE(p_local->local_port_id));
+}
+
+static void
+ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
+ p_remote->peer_chassis_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
+ OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_port_id));
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *p_params,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (type) {
+ case ECORE_DCBX_REMOTE_MIB:
+ ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_LOCAL_MIB:
+ ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_OPERATIONAL_MIB:
+ ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_REMOTE_LLDP_MIB:
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_LOCAL_LLDP_MIB:
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin,
+ data.addr, data.size);
+
+ return rc;
+}
+
+static void
+ecore_dcbx_read_dscp_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, dcb_dscp_map);
+ data.dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ data.size = sizeof(struct dcb_dscp_map);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.dscp_map, data.addr, data.size);
+}
+
+static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_INVAL;
+
+ switch (type) {
+ case ECORE_DCBX_OPERATIONAL_MIB:
+ ecore_dcbx_read_dscp_mib(p_hwfn, p_ptt);
+ rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_REMOTE_MIB:
+ rc = ecore_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_LOCAL_MIB:
+ rc = ecore_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case ECORE_DCBX_REMOTE_LLDP_MIB:
+ rc = ecore_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_LOCAL_LLDP_MIB:
+ rc = ecore_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return rc;
+}
+
+/*
+ * Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+ ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
+ &p_hwfn->p_dcbx_info->get);
+
+ rc = ecore_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ bool enabled;
+
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ ecore_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update(p_hwfn);
+
+ /* set eagle enigne 1 flow control workaround
+ * according to negotiation results
+ */
+ enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
+ }
+ }
+ ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ /* Update the DSCP to TC mapping bit if required */
+ if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
+ p_hwfn->p_dcbx_info->dscp_nig_update) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, 0x1);
+ p_hwfn->p_dcbx_info->dscp_nig_update = false;
+ }
+
+ OSAL_DCBX_AEN(p_hwfn, type);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->p_dcbx_info));
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_dcbx_info'");
+ return ECORE_NOMEM;
+ }
+
+ p_hwfn->p_dcbx_info->iwarp_port =
+ p_hwfn->pf_params.rdma_pf_params.iwarp_port;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_info *p_dcbx_info)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
+}
+
+static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct ecore_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dscp_enable_flag = p_src->arr[type].dscp_enable;
+ p_data->dscp_val = p_src->arr[type].dscp_val;
+}
+
+/* Set pf update ramrod command params */
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ u8 update_flag;
+
+ p_dest->pf_id = p_src->pf_id;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_mode = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
+ p_dest->update_iwarp_dcb_data_mode = update_flag;
+
+ p_dcb_data = &p_dest->eth_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+ p_dcb_data = &p_dest->iwarp_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
+}
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = ECORE_TIMEOUT;
+ DP_ERR(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
+ u32 *pfc, struct ecore_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= (1 << i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct ecore_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n",
+ p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0],
+ p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0],
+ p_ets->tc_tsa_tbl[1]);
+}
+
+static void
+ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct ecore_dcbx_params *p_params, bool ieee)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
+ if (ieee) {
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ switch (p_params->app_entry[i].sf_ieee) {
+ case ECORE_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case ECORE_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case ECORE_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT;
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
+}
+
+static enum _ecore_status_t
+ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct ecore_dcbx_set *params)
+{
+ bool ieee = false;
+
+ local_admin->flags = 0;
+ OSAL_MEMCPY(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled) {
+ local_admin->config = params->ver_num;
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
+ ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_ETS_CFG)
+ ecore_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
+ ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params, ieee);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct dcb_dscp_map *p_dscp_map,
+ struct ecore_dcbx_set *p_params)
+{
+ int entry, i, j;
+ u32 val;
+
+ OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
+ sizeof(*p_dscp_map));
+
+ p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
+ if (p_params->dscp.enabled)
+ p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
+
+ for (i = 0, entry = 0; i < 8; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++, entry++)
+ val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
+ (j * 4));
+
+ p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
+ }
+
+ p_hwfn->p_dcbx_info->dscp_nig_update = true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_set *params,
+ bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct ecore_dcbx_mib_meta_data data;
+ struct dcb_dscp_map dscp_map;
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!hw_commit) {
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(p_hwfn->p_dcbx_info->set));
+ return ECORE_SUCCESS;
+ }
+
+ /* clear set-parmas cache */
+ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+ sizeof(struct ecore_dcbx_set));
+
+ OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
+ ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_DSCP_CFG) {
+ OSAL_MEMSET(&dscp_map, 0, sizeof(dscp_map));
+ ecore_dcbx_set_dscp_params(p_hwfn, &dscp_map, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, dcb_dscp_map);
+ data.dscp_map = &dscp_map;
+ data.size = sizeof(struct dcb_dscp_map);
+ ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.dscp_map,
+ data.size);
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send DCBX update request\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_set *params)
+{
+ struct ecore_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct ecore_dcbx_set));
+ return ECORE_SUCCESS;
+ }
+
+ dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*dcbx_info));
+ if (!dcbx_info) {
+ DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
+ return ECORE_NOMEM;
+ }
+
+ OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ OSAL_FREE(p_hwfn->p_dev, dcbx_info);
+ return rc;
+ }
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(struct ecore_dcbx_admin_params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct ecore_dcbx_set));
+
+ OSAL_FREE(p_hwfn->p_dev, dcbx_info);
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.h
new file mode 100644
index 00000000..eba2d91b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DCBX_H__
+#define __ECORE_DCBX_H__
+
+#include "ecore.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_hsi_common.h"
+#include "ecore_dcbx_api.h"
+
+struct ecore_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct ecore_dcbx_results results;
+ struct dcb_dscp_map dscp_map;
+ bool dscp_nig_update;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ struct ecore_dcbx_set set;
+ struct ecore_dcbx_get get;
+ u8 dcbx_cap;
+ u16 iwarp_port;
+};
+
+struct ecore_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcb_dscp_map *dscp_map;
+ struct dcbx_mib *mib;
+ osal_size_t size;
+ u32 addr;
+};
+
+/* ECORE local interface routines */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
+ enum ecore_mib_read_type);
+
+enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
+ struct ecore_ptt *);
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+#endif /* __ECORE_DCBX_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx_api.h
new file mode 100644
index 00000000..2dc76796
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_dcbx_api.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DCBX_API_H__
+#define __ECORE_DCBX_API_H__
+
+#include "ecore_status.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum ecore_mib_read_type {
+ ECORE_DCBX_OPERATIONAL_MIB,
+ ECORE_DCBX_REMOTE_MIB,
+ ECORE_DCBX_LOCAL_MIB,
+ ECORE_DCBX_REMOTE_LLDP_MIB,
+ ECORE_DCBX_LOCAL_LLDP_MIB
+};
+
+struct ecore_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ u8 update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+ bool dscp_enable; /* DSCP enabled */
+ u8 dscp_val; /* DSCP value */
+};
+
+#ifndef __EXTRACT__LINUX__
+enum dcbx_protocol_type {
+ DCBX_PROTOCOL_ISCSI,
+ DCBX_PROTOCOL_FCOE,
+ DCBX_PROTOCOL_ROCE,
+ DCBX_PROTOCOL_ROCE_V2,
+ DCBX_PROTOCOL_ETH,
+ DCBX_PROTOCOL_IWARP,
+ DCBX_MAX_PROTOCOL_TYPE
+};
+
+#define ECORE_LLDP_CHASSIS_ID_STAT_LEN 4
+#define ECORE_LLDP_PORT_ID_STAT_LEN 4
+#define ECORE_DCBX_MAX_APP_PROTOCOL 32
+#define ECORE_MAX_PFC_PRIORITIES 8
+#define ECORE_DCBX_DSCP_SIZE 64
+
+struct ecore_dcbx_lldp_remote {
+ u32 peer_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
+ bool enable_rx;
+ bool enable_tx;
+ u32 tx_interval;
+ u32 max_credit;
+};
+
+struct ecore_dcbx_lldp_local {
+ u32 local_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
+};
+
+struct ecore_dcbx_app_prio {
+ u8 roce;
+ u8 roce_v2;
+ u8 fcoe;
+ u8 iscsi;
+ u8 eth;
+};
+
+struct ecore_dbcx_pfc_params {
+ bool willing;
+ bool enabled;
+ u8 prio[ECORE_MAX_PFC_PRIORITIES];
+ u8 max_tc;
+};
+
+enum ecore_dcbx_sf_ieee_type {
+ ECORE_DCBX_SF_IEEE_ETHTYPE,
+ ECORE_DCBX_SF_IEEE_TCP_PORT,
+ ECORE_DCBX_SF_IEEE_UDP_PORT,
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
+struct ecore_app_entry {
+ bool ethtype;
+ enum ecore_dcbx_sf_ieee_type sf_ieee;
+ bool enabled;
+ u8 prio;
+ u16 proto_id;
+ enum dcbx_protocol_type proto_type;
+};
+
+struct ecore_dcbx_params {
+ struct ecore_app_entry app_entry[ECORE_DCBX_MAX_APP_PROTOCOL];
+ u16 num_app_entries;
+ bool app_willing;
+ bool app_valid;
+ bool app_error;
+ bool ets_willing;
+ bool ets_enabled;
+ bool ets_cbs;
+ bool valid; /* Indicate validity of params */
+ u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
+ u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
+ u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
+ struct ecore_dbcx_pfc_params pfc;
+ u8 max_ets_tc;
+};
+
+struct ecore_dcbx_admin_params {
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+};
+
+struct ecore_dcbx_remote_params {
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+};
+
+struct ecore_dcbx_operational_params {
+ struct ecore_dcbx_app_prio app_prio;
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+ bool enabled;
+ bool ieee;
+ bool cee;
+ bool local;
+ u32 err;
+};
+
+struct ecore_dcbx_dscp_params {
+ bool enabled;
+ u8 dscp_pri_map[ECORE_DCBX_DSCP_SIZE];
+};
+
+struct ecore_dcbx_get {
+ struct ecore_dcbx_operational_params operational;
+ struct ecore_dcbx_lldp_remote lldp_remote;
+ struct ecore_dcbx_lldp_local lldp_local;
+ struct ecore_dcbx_remote_params remote;
+ struct ecore_dcbx_admin_params local;
+ struct ecore_dcbx_dscp_params dscp;
+};
+#endif
+
+#define ECORE_DCBX_VERSION_DISABLED 0
+#define ECORE_DCBX_VERSION_IEEE 1
+#define ECORE_DCBX_VERSION_CEE 2
+#define ECORE_DCBX_VERSION_DYNAMIC 3
+
+struct ecore_dcbx_set {
+#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
+#define ECORE_DCBX_OVERRIDE_PFC_CFG (1 << 1)
+#define ECORE_DCBX_OVERRIDE_ETS_CFG (1 << 2)
+#define ECORE_DCBX_OVERRIDE_APP_CFG (1 << 3)
+#define ECORE_DCBX_OVERRIDE_DSCP_CFG (1 << 4)
+ u32 override_flags;
+ bool enabled;
+ struct ecore_dcbx_admin_params config;
+ u32 ver_num;
+ struct ecore_dcbx_dscp_params dscp;
+};
+
+struct ecore_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct ecore_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ const char *name; /* @DPDK */
+ enum ecore_pci_personality personality;
+};
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
+ struct ecore_dcbx_get *,
+ enum ecore_mib_read_type);
+
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
+ struct ecore_dcbx_set *);
+
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
+ struct ecore_ptt *,
+ struct ecore_dcbx_set *,
+ bool);
+
+static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
+ {DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
+ {DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
+ {DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
+};
+
+#endif /* __ECORE_DCBX_API_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_dev.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_dev.c
new file mode 100644
index 00000000..865103c6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_dev.c
@@ -0,0 +1,4874 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore.h"
+#include "ecore_chain.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_int.h"
+#include "ecore_cxt.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_mcp.h"
+#include "ecore_hw_defs.h"
+#include "mcp_public.h"
+#include "ecore_iro.h"
+#include "nvm_cfg.h"
+#include "ecore_dev_api.h"
+#include "ecore_dcbx.h"
+#include "ecore_l2.h"
+
+/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
+ * registers involved are not split and thus configuration is a race where
+ * some of the PFs configuration might be lost.
+ * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
+ * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
+ * there's more than a single compiled ecore component in system].
+ */
+static osal_spinlock_t qm_lock;
+static bool qm_lock_init;
+
+/* Configurable */
+#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
+ * load the driver. The number was
+ * arbitrarily set.
+ */
+
+/* Derived */
+#define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
+
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
+ * read from actual register, but we're currently not using
+ * it for actual doorbelling.
+ */
+ return 1 << 17;
+ }
+
+ val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ if (val)
+ return 1 << (val + 15);
+
+ /* The above registers were updated in the past only in CMT mode. Since
+ * they were found to be useful MFW started updating them from 8.7.7.0.
+ * In older MFW versions they are set to 0 which means disabled.
+ */
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 256kB"
+ " for GRC and 512kB for DB\n");
+ val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 512kB"
+ " for GRC and 512kB for DB\n");
+ val = 512 * 1024;
+ }
+
+ return val;
+}
+
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module, u8 dp_level, void *dp_ctx)
+{
+ u32 i;
+
+ p_dev->dp_level = dp_level;
+ p_dev->dp_module = dp_module;
+ p_dev->dp_ctx = dp_ctx;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ p_hwfn->dp_ctx = dp_ctx;
+ }
+}
+
+void ecore_init_struct(struct ecore_dev *p_dev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->p_dev = p_dev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+ OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ p_dev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 (may be overridden later) */
+ p_dev->cache_shift = 7;
+}
+
+static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+ OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+}
+
+void ecore_resc_free(struct ecore_dev *p_dev)
+{
+ int i;
+
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_free(&p_dev->hwfns[i]);
+ return;
+ }
+
+ OSAL_FREE(p_dev, p_dev->fw_data);
+
+ OSAL_FREE(p_dev, p_dev->reset_stats);
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ ecore_cxt_mngr_free(p_hwfn);
+ ecore_qm_info_free(p_hwfn);
+ ecore_spq_free(p_hwfn);
+ ecore_eq_free(p_hwfn);
+ ecore_consq_free(p_hwfn);
+ ecore_int_free(p_hwfn);
+ ecore_iov_free(p_hwfn);
+ ecore_l2_free(p_hwfn);
+ ecore_dmae_info_free(p_hwfn);
+ ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ /* @@@TBD Flush work-queue ? */
+ }
+}
+
+/******************** QM initialization *******************/
+
+/* bitmaps for indicating active traffic classes.
+ * Special case for Arrowhead 4 port
+ */
+/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
+#define ACTIVE_TCS_BMAP 0x9f
+/* 0..3 actually used, OOO and high priority stuff all use 3 */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
+{
+ u32 flags;
+
+ /* common flags */
+ flags = PQ_FLAGS_LB;
+
+ /* feature flags */
+ if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+ flags |= PQ_FLAGS_VFS;
+
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case ECORE_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+ PQ_FLAGS_OFLD;
+ break;
+ default:
+ DP_ERR(p_hwfn, "unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ return 0;
+ }
+ return flags;
+}
+
+/* Getters for resource amounts necessary for qm initialization */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
+
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
+{
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+ p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
+
+#define NUM_DEFAULT_RLS 1
+
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ /* @DPDK */
+ /* num RLs can't exceed resource amount of rls or vports or the
+ * dcqcn qps
+ */
+ num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+ (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
+
+ /* make sure after we reserve the default and VF rls we'll have
+ * something left
+ */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
+ DP_NOTICE(p_hwfn, false,
+ "no rate limiters left for PF rate limiting"
+ " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+ return 0;
+ }
+
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+ return num_pf_rls;
+}
+
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ /* all pqs share the same vport (hence the 1 below), except for vfs
+ * and pf_rl pqs
+ */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
+
+/* calc amount of PQs according to the requested flags */
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ ecore_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) +
+ (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
+
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
+
+ /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
+ * 4 otherwise
+ */
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
+{
+ /* Initialize qm port parameters */
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+
+ for (i = 0; i < num_ports; i++) {
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
+ p_qm_port->active = 1;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be
+ * shared)
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq =
+ ecore_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL ||
+ pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
+ " qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls,
+ ecore_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
+ sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return OSAL_NULL;
+}
+
+/* save pq index in qm info */
+static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
+
+ qm_info->num_vf_pqs = num_vfs;
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
+ PQ_INIT_VF_RL);
+}
+
+static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
+ PQ_INIT_PF_RL);
+}
+
+static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ ecore_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ ecore_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ ecore_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ ecore_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ ecore_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ ecore_init_qm_offload_pq(p_hwfn);
+
+ /* done sharing vports */
+ ecore_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ ecore_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+{
+ if (ecore_init_qm_get_num_vports(p_hwfn) >
+ RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/*
+ * Function for verbose printing of the qm initialization results
+ */
+static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d,"
+ " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
+ " num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
+ qm_info->num_vf_pqs, qm_info->num_vports,
+ qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
+ " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
+ qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
+ qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ port = &qm_info->qm_port_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d,"
+ " num_pbf_cmd_lines %d, num_btb_blocks %d,"
+ " reserved %d\n",
+ i, port->active, port->active_phys_tcs,
+ port->num_pbf_cmd_lines, port->num_btb_blocks,
+ port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &qm_info->qm_vport_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d,"
+ " first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->vport_rl,
+ vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
+ vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &qm_info->qm_pq_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
+ " rl_valid %d\n",
+ qm_info->start_pq + i, pq->vport_id, pq->tc_id,
+ pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ ecore_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ ecore_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ ecore_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ ecore_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ ecore_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ ecore_dp_init_qm_params(p_hwfn);
+}
+
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime array
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ enum _ecore_status_t rc;
+
+ /* initialize ecore's qm data structure */
+ ecore_init_qm_info(p_hwfn);
+
+ /* stop PF's qm queues */
+ OSAL_SPIN_LOCK(&qm_lock);
+ b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ OSAL_SPIN_UNLOCK(&qm_lock);
+ if (!b_rc)
+ return ECORE_INVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ ecore_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ ecore_qm_init_pf(p_hwfn);
+
+ /* activate init tool on runtime array */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* start PF's qm queues */
+ OSAL_SPIN_LOCK(&qm_lock);
+ b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ OSAL_SPIN_UNLOCK(&qm_lock);
+ if (!b_rc)
+ return ECORE_INVAL;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc;
+
+ rc = ecore_init_qm_sanity(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_pq_params) *
+ ecore_init_qm_get_num_pqs(p_hwfn));
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_vport_params) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_port_params) *
+ p_hwfn->p_dev->num_ports_in_engines);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_wfq_data) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return ECORE_SUCCESS;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+ ecore_qm_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+/******************** End QM initialization ***************/
+
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i) {
+ rc = ecore_l2_alloc(&p_dev->hwfns[i]);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+ return rc;
+ }
+
+ p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->fw_data));
+ if (!p_dev->fw_data)
+ return ECORE_NOMEM;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u32 n_eqes, num_cons;
+
+ /* First allocate the context manager structure */
+ rc = ecore_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = ecore_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_alloc_qm_data(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* init qm info */
+ ecore_init_qm_info(p_hwfn);
+
+ /* Compute the ILT client partition */
+ rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = ecore_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = ecore_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
+ /* Calculate the EQ size
+ * ---------------------
+ * Each ICID may generate up to one event at a time i.e.
+ * the event must be handled/cleared before a new one
+ * can be generated. We calculate the sum of events per
+ * protocol and create an EQ deep enough to handle the
+ * worst case:
+ * - Core - according to SPQ.
+ * - RoCE - per QP there are a couple of ICIDs, one
+ * responder and one requester, each can
+ * generate an EQE => n_eqes_qp = 2 * n_qp.
+ * Each CQ can generate an EQE. There are 2 CQs
+ * per QP => n_eqes_cq = 2 * n_qp.
+ * Hence the RoCE total is 4 * n_qp or
+ * 2 * num_cons.
+ * - ENet - There can be up to two events per VF. One
+ * for VF-PF channel and another for VF FLR
+ * initial cleanup. The number of VFs is
+ * bounded by MAX_NUM_VFS_BB, and is much
+ * smaller than RoCE's so we avoid exact
+ * calculation.
+ */
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ num_cons =
+ ecore_cxt_get_proto_cid_count(
+ p_hwfn,
+ PROTOCOLID_ROCE,
+ OSAL_NULL);
+ num_cons *= 2;
+ } else {
+ num_cons = ecore_cxt_get_proto_cid_count(
+ p_hwfn,
+ PROTOCOLID_IWARP,
+ OSAL_NULL);
+ }
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
+ num_cons =
+ ecore_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ OSAL_NULL);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
+ "The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
+ goto alloc_no_mem;
+ }
+
+ rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_l2_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ /* DMA info initialization */
+ rc = ecore_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate memory for dmae_info"
+ " structure\n");
+ goto alloc_err;
+ }
+
+ /* DCBX initialization */
+ rc = ecore_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate memory for dcbx structure\n");
+ goto alloc_err;
+ }
+ }
+
+ p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->reset_stats));
+ if (!p_dev->reset_stats) {
+ DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+ goto alloc_no_mem;
+ }
+
+ return ECORE_SUCCESS;
+
+alloc_no_mem:
+ rc = ECORE_NOMEM;
+alloc_err:
+ ecore_resc_free(p_dev);
+ return rc;
+}
+
+void ecore_resc_setup(struct ecore_dev *p_dev)
+{
+ int i;
+
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_setup(&p_dev->hwfns[i]);
+ return;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ ecore_cxt_mngr_setup(p_hwfn);
+ ecore_spq_setup(p_hwfn);
+ ecore_eq_setup(p_hwfn);
+ ecore_consq_setup(p_hwfn);
+
+ /* Read shadow of current MFW mailbox */
+ ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ ecore_l2_setup(p_hwfn);
+ ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ }
+}
+
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id, bool is_vf)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ enum _ecore_status_t rc = ECORE_TIMEOUT;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
+ CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+
+ if (is_vf)
+ id += 0x10;
+
+ command |= X_FINAL_CLEANUP_AGG_INT <<
+ SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+ command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+ command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+ command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+/* Make sure notification is not set before initiating final cleanup */
+
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected; Found final cleanup notification");
+ DP_NOTICE(p_hwfn, false,
+ " before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ id, command);
+
+ ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = ECORE_SUCCESS;
+ else
+ DP_NOTICE(p_hwfn, true,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
+ p_hwfn->p_dev->type);
+ return ECORE_INVAL;
+ }
+
+ /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
+ switch (p_hwfn->p_dev->num_ports_in_engines) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "num_ports_in_engine = %d not supported\n",
+ p_hwfn->p_dev->num_ports_in_engines);
+ return ECORE_INVAL;
+ }
+
+ switch (p_hwfn->p_dev->mf_mode) {
+ case ECORE_MF_DEFAULT:
+ case ECORE_MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
+ break;
+ case ECORE_MF_OVLAN:
+ hw_mode |= 1 << MODE_MF_SD;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Unsupported MF mode, init as DEFAULT\n");
+ hw_mode |= 1 << MODE_MF_SI;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_FPGA;
+ } else {
+ if (p_hwfn->p_dev->b_is_emul_full)
+ hw_mode |= 1 << MODE_EMUL_FULL;
+ else
+ hw_mode |= 1 << MODE_EMUL_REDUCED;
+ }
+ } else
+#endif
+ hw_mode |= 1 << MODE_ASIC;
+
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef ASIC_ONLY
+/* MFW-replacement initializations for non-ASIC */
+static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 pl_hv = 1;
+ int i;
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev))
+ pl_hv |= 0x600;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
+
+ if (CHIP_REV_IS_EMUL(p_dev) &&
+ (ECORE_IS_AH(p_dev)))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
+ 0x3ffffff);
+
+ /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+ /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
+ if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ (p_dev->num_ports_in_engines >> 1));
+
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ }
+ }
+
+ /* Poll on RBC */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
+ for (i = 0; i < 100; i++) {
+ OSAL_UDELAY(50);
+ if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
+ break;
+ }
+ if (i == 100)
+ DP_NOTICE(p_hwfn, true,
+ "RBC done failed to complete in PSWRQ2\n");
+
+ return ECORE_SUCCESS;
+}
+#endif
+
+/* Init run time data for all PFs and their VFs on an engine.
+ * TBD - for VFs - Once we have parent PF info for each VF in
+ * shmem available as CAU requires knowledge of parent PF for each VF.
+ */
+static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, sb_id;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_igu_info *p_igu_info;
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+ if (!p_block->is_pf)
+ continue;
+
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+ }
+ }
+}
+
+static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 vf_id, max_num_vfs;
+ u16 num_pfs, pf_id;
+ u32 concrete_fid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ ecore_init_cau_rt_data(p_dev);
+
+ /* Program GTT windows */
+ ecore_gtt_init(p_hwfn);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+#endif
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ ecore_qm_common_rt_init(p_hwfn,
+ p_dev->num_ports_in_engines,
+ qm_info->max_phys_tcs_per_port,
+ qm_info->pf_rl_en, qm_info->pf_wfq_en,
+ qm_info->vport_rl_en, qm_info->vport_wfq_en,
+ qm_info->qm_port_params);
+
+ ecore_cxt_hw_init_common(p_hwfn);
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
+ * need to decide with which value, maybe runtime
+ */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ if (ECORE_IS_BB(p_dev)) {
+ /* Workaround clears ROCE search for all functions to prevent
+ * involving non initialized function in processing ROCE packet.
+ */
+ num_pfs = NUM_OF_ENG_PFS(p_dev);
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
+ /* pretend to original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ /* Workaround for avoiding CCFC execution error when getting packets
+ * with CRC errors, and allowing instead the invoking of the FW error
+ * handler.
+ * This is not done inside the init tool since it currently can't
+ * perform a pretending to VFs.
+ */
+ max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+ concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
+ ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
+ }
+ /* pretend to original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+ return rc;
+}
+
+#ifndef ASIC_ONLY
+#define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4)
+#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5)
+
+#define PMEG_IF_BYTE_COUNT 8
+
+static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u64 data, u8 reg_type, u8 port)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
+ ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
+ (8 << PMEG_IF_BYTE_COUNT),
+ (reg_type << 25) | (addr << 8) | port,
+ (u32)((data >> 32) & 0xffffffff),
+ (u32)(data & 0xffffffff));
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
+ (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
+ 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
+ (reg_type << 25) | (addr << 8) | port);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
+ (data >> 32) & 0xffffffff);
+}
+
+#define XLPORT_MODE_REG (0x20a)
+#define XLPORT_MAC_CONTROL (0x210)
+#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
+#define XLPORT_ENABLE_REG (0x20b)
+
+#define XLMAC_CTRL (0x600)
+#define XLMAC_MODE (0x601)
+#define XLMAC_RX_MAX_SIZE (0x608)
+#define XLMAC_TX_CTRL (0x604)
+#define XLMAC_PAUSE_CTRL (0x60d)
+#define XLMAC_PFC_CTRL (0x60e)
+
+static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 loopback = 0, port = p_hwfn->port_id * 2;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
+ port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+ /* XLMAC: SOFT RESET */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+ /* XLMAC: Port Speed >= 10Gbps */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+ /* XLMAC: Max Size */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
+ 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
+ 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
+ 0x30ffffc000ULL, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
+ port); /* XLMAC: TX_EN, RX_EN */
+ /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
+ 0x1003 | (loopback << 2), 0, port);
+ /* Enabled Parallel PFC interface */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
+
+ /* XLPORT port enable */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
+}
+
+static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 port = p_hwfn->port_id;
+ u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
+ (port <<
+ CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
+ (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
+ (0xA <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
+ (8 <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
+ 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
+ else /* BB */
+ ecore_emul_link_init_bb(p_hwfn, p_ptt);
+}
+
+static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port)
+{
+ int port_offset = port ? 0x800 : 0;
+ u32 xmac_rxctrl = 0;
+
+ /* Reset of XMAC */
+ /* FIXME: move to common start */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+ OSAL_MSLEEP(1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
+
+ /* Soft reset of XMAC */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+ OSAL_MSLEEP(1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+
+ /* FIXME: move to common end */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
+
+ /* Set Max packet size: initialize XMAC block register for port 0 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
+
+ /* CRC append for Tx packets: init XMAC block register for port 1 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
+
+ /* Enable TX and RX: initialize XMAC block register for port 1 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
+ XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
+ xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
+ XMAC_REG_RX_CTRL_BB + port_offset);
+ xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
+}
+#endif
+
+static enum _ecore_status_t
+ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+ u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+ u32 dpi_bit_shift, dpi_count;
+ u32 min_dpis;
+
+ /* Calculate DPI size
+ * ------------------
+ * The PWM region contains Doorbell Pages. The first is reserverd for
+ * the kernel for, e.g, L2. The others are free to be used by non-
+ * trusted applications, typically from user space. Each page, called a
+ * doorbell page is sectioned into windows that allow doorbells to be
+ * issued in parallel by the kernel/application. The size of such a
+ * window (a.k.a. WID) is 1kB.
+ * Summary:
+ * 1kB WID x N WIDS = DPI page size
+ * DPI page size x N DPIs = PWM region size
+ * Notes:
+ * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
+ * in order to ensure that two applications won't share the same page.
+ * It also must contain at least one WID per CPU to allow parallelism.
+ * It also must be a power of 2, since it is stored as a bit shift.
+ *
+ * The DPI page size is stored in a register as 'dpi_bit_shift' so that
+ * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
+ * containing 4 WIDs.
+ */
+ dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
+ dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
+ dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
+ dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+ dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
+
+ dpi_count = pwm_region_size / dpi_page_size;
+
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
+
+ /* Update hwfn */
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ /* Update registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return ECORE_NORESOURCES;
+
+ return ECORE_SUCCESS;
+}
+
+enum ECORE_ROCE_EDPM_MODE {
+ ECORE_ROCE_EDPM_MODE_ENABLE = 0,
+ ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
+ ECORE_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+static enum _ecore_status_t
+ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 cond;
+
+ db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions
+ * -----------------------------------
+ * The doorbell BAR is made of two regions. The first is called normal
+ * region and the second is called PWM region. In the normal region
+ * each ICID has its own set of addresses so that writing to that
+ * specific address identifies the ICID. In the Process Window Mode
+ * region the ICID is given in the data written to the doorbell. The
+ * above per PF register denotes the offset in the doorbell BAR in which
+ * the PWM region begins.
+ * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
+ * non-PWM connection. The calculation below computes the total non-PWM
+ * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
+ * in units of 4,096 bytes.
+ */
+ non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->p_dev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return ECORE_NORESOURCES;
+ }
+ if (pwm_regsize < ECORE_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->p_dev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
+ norm_regsize);
+ return ECORE_NORESOURCES;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
+ */
+ n_cpus = OSAL_NUM_ACTIVE_CPU();
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ }
+
+ cond = ((rc != ECORE_SUCCESS) &&
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ /* If we entered this flow due to DCBX then the DPM register is
+ * already configured.
+ */
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d",
+ norm_regsize, pwm_regsize);
+ DP_INFO(p_hwfn,
+ " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ p_hwfn->dpi_size, p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ /* Check return codes from above calls */
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs\n");
+ return ECORE_NORESOURCES;
+ }
+
+ /* Update hwfn */
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* Update registers */
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+ else if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
+ } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ /* Activate OPTE in CMT */
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+ val |= 0x10;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+ 0x55555555);
+ }
+
+ ecore_emul_link_init(p_hwfn, p_ptt);
+ } else {
+ DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+ }
+#endif
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tunn,
+ int hw_mode,
+ bool b_hw_start,
+ enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ u32 prs_reg;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ctrl;
+ int pos;
+
+ if (p_hwfn->mcp_info) {
+ struct ecore_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100000;
+ }
+ ecore_cxt_hw_init_pf(p_hwfn);
+
+ ecore_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
+ 1);
+ }
+
+ /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
+ (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* perform debug configuration when chip is out of reset */
+ OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+ /* Cleanup chip from previous driver if such remains exist */
+ rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
+ if (rc != ECORE_SUCCESS) {
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+ return rc;
+ }
+
+ /* PF Init sequence */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ /* PCI relaxed ordering causes a decrease in the performance on some
+ * systems. Till a root cause is found, disable this attribute in the
+ * PCI config space.
+ */
+ /* Not in use @DPDK
+ * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+ * if (!pos) {
+ * DP_NOTICE(p_hwfn, true,
+ * "Failed to find the PCIe Cap\n");
+ * return ECORE_IO;
+ * }
+ * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+ * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);
+ */
+
+ rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ if (b_hw_start) {
+ /* enable interrupts */
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* send function start command */
+ rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+ allow_npar_tx_switch);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Function start ramrod failed\n");
+ } else {
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
+ (1 << 2));
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH registers after start PFn\n");
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+ prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ }
+ }
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 enable)
+{
+ u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+ /* Change PF in PXP */
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ OSAL_UDELAY(50);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn, true,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ ecore_mcp_read_mb(p_hwfn, p_main_ptt);
+ OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+ struct ecore_hw_init_params *p_params)
+{
+ struct ecore_load_req_params load_req_params;
+ u32 load_code, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ int i;
+
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
+ (p_dev->num_hwfns > 1)) {
+ DP_NOTICE(p_dev, false,
+ "MSI mode is not supported for CMT devices\n");
+ return ECORE_INVAL;
+ }
+
+ if (IS_PF(p_dev)) {
+ rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_start(p_hwfn, p_params);
+ continue;
+ }
+
+ /* Enable DMAE in PXP */
+ rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_calc_hw_mode(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
+ load_req_params.drv_role = p_params->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ load_req_params.timeout_val = p_params->mfw_timeout_val;
+ load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a LOAD_REQ command\n");
+ return rc;
+ }
+
+ load_code = load_req_params.load_code;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ /* CQ75580:
+ * When coming back from hiberbate state, the registers from
+ * which shadow is read initially are not initialized. It turns
+ * out that these registers get initialized during the call to
+ * ecore_mcp_load_req request. So we need to reread them here
+ * to get the proper shadow register value.
+ * Note: This is a workaround for the missing MFW
+ * initialization. It may be removed once the implementation
+ * is done.
+ */
+ ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Only relevant for recovery:
+ * Clear the indication after the LOAD_REQ command is responded
+ * by the MFW.
+ */
+ p_dev->recov_in_prog = false;
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ if (!qm_lock_init) {
+ OSAL_SPIN_LOCK_INIT(&qm_lock);
+ qm_lock_init = true;
+ }
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected load code [0x%08x]", load_code);
+ rc = ECORE_NOTIMPL;
+ break;
+ }
+
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, true,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+
+ /* ACK mfw regardless of success or failure of initialization */
+ mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_LOAD_DONE,
+ 0, &load_code, &param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mfw_rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a LOAD_DONE command\n");
+ return mfw_rc;
+ }
+
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &load_code, &param);
+ if (mfw_rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to send DCBX attention request\n");
+ return mfw_rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+ }
+
+ if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ drv_mb_param = STORM_FW_VERSION;
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &load_code, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu)
+ rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+
+ rc = ecore_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_DISABLED);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+ }
+
+ return rc;
+}
+
+#define ECORE_HW_STOP_RETRY_LIMIT (10)
+static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ int i;
+
+ /* close timers */
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
+ i++) {
+ if ((!ecore_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ OSAL_MSLEEP(1);
+ }
+
+ if (i < ECORE_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
+ " [Connection %02x Tasks %02x]\n",
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
+{
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+ }
+}
+
+static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 expected_val)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (val != expected_val) {
+ DP_NOTICE(p_hwfn, true,
+ "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
+ addr, val, expected_val);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ p_hwfn = &p_dev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_int_cleanup(p_hwfn);
+ rc = ecore_vf_pf_reset(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ continue;
+ }
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ /* Send unload command to MCP */
+ if (!p_dev->recov_in_prog) {
+ rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
+
+ rc = ecore_sp_pf_stop(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+
+ /* perform debug action after PF stop was sent */
+ OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
+
+ /* close NIG to BRB gate */
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ /* close parser */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ /* @@@TBD - clean transmission queues (5.b) */
+ /* @@@TBD - clean BTB (5.c) */
+
+ ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+
+ /* @@@TBD - verify DMAE requests are done (8) */
+
+ /* Disable Attention Generation */
+ ecore_int_igu_disable_int(p_hwfn, p_ptt);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ OSAL_MSLEEP(1);
+
+ if (!p_dev->recov_in_prog) {
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+ /* @@@TBD - assert on incorrect xCFC values (10.b) */
+ }
+
+ /* Disable PF in HW blocks */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ if (!p_dev->recov_in_prog) {
+ ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+ } /* hwfn loop */
+
+ if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
+
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_change_pci_hwfn failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ return rc2;
+}
+
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+{
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ /* @@@TBD - clean transmission queues (5.b) */
+ /* @@@TBD - clean BTB (5.c) */
+
+ /* @@@TBD - verify DMAE requests are done (8) */
+
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ OSAL_MSLEEP(1);
+ }
+}
+
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;
+
+ /* If roce info is allocated it means roce is initialized and should
+ * be enabled in searcher.
+ */
+ if (p_hwfn->p_rdma_info) {
+ if (p_hwfn->b_rdma_enabled_in_prs)
+ ecore_wr(p_hwfn, p_ptt,
+ p_hwfn->rdma_prs_search_reg, 0x1);
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1);
+ }
+
+ /* Re-open incoming traffic */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_ptt_pool_free(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
+{
+ /* clear indirect access */
+ if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
+ } else {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
+
+ /* Clean Previous errors if such exist */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+
+ /* enable internal target-read */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct ecore_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ /* Bits 16-19 from the ME registers are the pf_num */
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct ecore_sb_cnt_info sb_cnt_info;
+ int num_features = 1;
+
+ /* L2 Queues require each: 1 status block. 1 L2 queue */
+ feat_num[ECORE_PF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_SB) / num_features,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ sb_cnt_info.sb_iov_cnt);
+
+ feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+ feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
+ (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
+ RESC_NUM(p_hwfn, ECORE_SB));
+}
+
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
+{
+ switch (res_id) {
+ case ECORE_SB:
+ return "SB";
+ case ECORE_L2_QUEUE:
+ return "L2_QUEUE";
+ case ECORE_VPORT:
+ return "VPORT";
+ case ECORE_RSS_ENG:
+ return "RSS_ENG";
+ case ECORE_PQ:
+ return "PQ";
+ case ECORE_RL:
+ return "RL";
+ case ECORE_MAC:
+ return "MAC";
+ case ECORE_VLAN:
+ return "VLAN";
+ case ECORE_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
+ case ECORE_ILT:
+ return "ILT";
+ case ECORE_LL2_QUEUE:
+ return "LL2_QUEUE";
+ case ECORE_CMDQS_CQS:
+ return "CMDQS_CQS";
+ case ECORE_RDMA_STATS_QUEUE:
+ return "RDMA_STATS_QUEUE";
+ case ECORE_BDQ:
+ return "BDQ";
+ default:
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+{
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ enum _ecore_status_t rc;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ /* @DPDK */
+ switch (res_id) {
+ case ECORE_LL2_QUEUE:
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_RDMA_STATS_QUEUE:
+ case ECORE_BDQ:
+ resc_max_val = 0;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return ECORE_NOTIMPL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static
+enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ struct ecore_sb_cnt_info sb_cnt_info;
+
+ switch (res_id) {
+ case ECORE_SB:
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ *p_resc_num = sb_cnt_info.sb_cnt;
+ break;
+ case ECORE_L2_QUEUE:
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ break;
+ case ECORE_VPORT:
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ break;
+ case ECORE_RSS_ENG:
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ break;
+ case ECORE_PQ:
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ break;
+ case ECORE_RL:
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ break;
+ case ECORE_ILT:
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ break;
+ case ECORE_LL2_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ /* @DPDK */
+ *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ /* @DPDK */
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ break;
+ case ECORE_BDQ:
+ /* @DPDK */
+ *p_resc_num = 0;
+ break;
+ default:
+ break;
+ }
+
+
+ switch (res_id) {
+ case ECORE_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ break;
+ default:
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
+ bool drv_resc_alloc)
+{
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
+ enum _ecore_status_t rc;
+
+ p_resc_num = &RESC_NUM(p_hwfn, res_id);
+ p_resc_start = &RESC_START(p_hwfn, res_id);
+
+ rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+#endif
+
+ rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW response failure for an allocation request for"
+ " resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ /* Default driver values are applied in the following cases:
+ * - The resource allocation MB command is not supported by the MFW
+ * - There is an internal error in the MFW while processing the request
+ * - The resource ID is unknown to the MFW
+ */
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]."
+ " mcp_resp = 0x%x. Applying default values"
+ " [%d,%d].\n",
+ res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
+ dflt_resc_num, dflt_resc_start);
+
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+
+ /* TBD - remove this when revising the handling of the SB resource */
+ if (res_id == ECORE_SB) {
+ /* Excluding the slowpath SB */
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
+ }
+
+ if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
+ DP_INFO(p_hwfn,
+ "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
+ *p_resc_start, dflt_resc_num, dflt_resc_start,
+ drv_resc_alloc ? " - Applying default values" : "");
+ if (drv_resc_alloc) {
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ }
+ }
+out:
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
+{
+ enum _ecore_status_t rc;
+ u8 res_id;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
+#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
+
+static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
+{
+ struct ecore_resc_unlock_params resc_unlock_params;
+ struct ecore_resc_lock_params resc_lock_params;
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u8 res_id;
+ enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ /* For AH, an equal share of the ILT lines between the maximal number of
+ * PFs is not enough for RoCE. This would be solved by the future
+ * resource allocation scheme, but isn't currently present for
+ * FPGA/emulation. For now we keep a number that is sufficient for RoCE
+ * to work - the BB number of ILT lines divided by its max PFs number.
+ */
+ u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
+#endif
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ * Old drivers that don't acquire the lock can run in parallel, and
+ * their allocation values won't be affected by the updated max values.
+ */
+ OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
+ resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+ resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
+ resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+ resc_lock_params.sleep_b4_retry = true;
+ OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
+ resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ return rc;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ rc = ECORE_BUSY;
+ goto unlock_and_exit;
+ } else {
+ rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ /* Reduced build contains less PQs */
+ if (!(p_hwfn->p_dev->b_is_emul_full)) {
+ resc_num[ECORE_PQ] = 32;
+ resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
+ p_hwfn->enabled_func_idx;
+ }
+
+ /* For AH emulation, since we have a possible maximal number of
+ * 16 enabled PFs, in case there are not enough ILT lines -
+ * allocate only first PF as RoCE and have all the other ETH
+ * only with less ILT lines.
+ */
+ if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
+ resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
+ resc_num[ECORE_ILT],
+ roce_min_ilt_lines);
+ }
+
+ /* Correct the common ILT calculation if PF0 has more */
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
+ p_hwfn->p_dev->b_is_emul_full &&
+ p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)
+ resc_start[ECORE_ILT] += roce_min_ilt_lines -
+ resc_num[ECORE_ILT];
+#endif
+
+ /* Sanity for ILT */
+ if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
+ ECORE_ILT) -
+ 1);
+ return ECORE_INVAL;
+ }
+
+ ecore_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "The numbers for each resource are:\n");
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
+ ecore_hw_get_resc_name(res_id),
+ RESC_NUM(p_hwfn, res_id),
+ RESC_START(p_hwfn, res_id));
+
+ return ECORE_SUCCESS;
+
+unlock_and_exit:
+ ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_hw_prepare_params *p_params)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ struct ecore_mcp_link_params *link;
+ enum _ecore_status_t rc;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
+ return ECORE_INVAL;
+ }
+
+/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
+ core_cfg);
+
+ core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ /* Read DCBX configuration */
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ dcbx_mode = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, generic_cont0));
+ dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
+ >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
+ switch (dcbx_mode) {
+ case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_CEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_IEEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
+ break;
+ default:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
+
+ link_temp = link->speed.advertised_speeds;
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
+ }
+
+ p_hwfn->mcp_info->link_capabilities.default_speed =
+ link->speed.forced_speed;
+ p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
+ link->speed.autoneg;
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+ p_hwfn->p_dev->mf_mode);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+
+ device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
+ OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
+ OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
+ &p_hwfn->hw_info.device_capabilities);
+
+ rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
+}
+
+static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT in BB, only the "even" functions are enabled, and thus
+ * the number of functions for both hwfns is learnt from the same bits.
+ */
+ if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) {
+ reg_function_hide = ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_FUNCTION_HIDE_BB_K2);
+ } else { /* E5 */
+ reg_function_hide = 0;
+ }
+
+ if (reg_function_hide & 0x1) {
+ if (ECORE_IS_BB(p_dev)) {
+ if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+ } else {
+ num_funcs = 1;
+ eng_mask = 0xfffe;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
+ p_hwfn->num_funcs_on_engine = 4;
+ }
+#endif
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
+}
+
+static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 port_mode;
+
+#ifndef ASIC_ONLY
+ /* Read the port mode */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ port_mode = 4;
+ else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
+ (p_hwfn->p_dev->num_hwfns > 1))
+ /* In CMT on emulation, assume 1 port */
+ port_mode = 1;
+ else
+#endif
+ port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
+
+ if (port_mode < 3) {
+ p_hwfn->p_dev->num_ports_in_engines = 1;
+ } else if (port_mode <= 5) {
+ p_hwfn->p_dev->num_ports_in_engines = 2;
+ } else {
+ DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
+ p_hwfn->p_dev->num_ports_in_engines);
+
+ /* Default num_ports_in_engines to something */
+ p_hwfn->p_dev->num_ports_in_engines = 1;
+ }
+}
+
+static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 port;
+ int i;
+
+ p_hwfn->p_dev->num_ports_in_engines = 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
+ switch ((port & 0xf000) >> 12) {
+ case 1:
+ p_hwfn->p_dev->num_ports_in_engines = 1;
+ break;
+ case 3:
+ p_hwfn->p_dev->num_ports_in_engines = 2;
+ break;
+ case 0xf:
+ p_hwfn->p_dev->num_ports_in_engines = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unknown port mode in ECO_RESERVED %08x\n",
+ port);
+ }
+ } else
+#endif
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = ecore_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2_E5 +
+ (i * 4));
+ if (port & 1)
+ p_hwfn->p_dev->num_ports_in_engines++;
+ }
+}
+
+static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t
+ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality personality,
+ struct ecore_hw_prepare_params *p_params)
+{
+ bool drv_resc_alloc = p_params->drv_resc_alloc;
+ enum _ecore_status_t rc;
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = ecore_iov_hw_info(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_BAD_IOV;
+ else
+ return rc;
+ }
+ }
+
+ /* TODO In get_hw_info, amoungst others:
+ * Get MCP FW revision and determine according to it the supported
+ * featrues (e.g. DCB)
+ * Get boot mode
+ * ecore_get_pcie_width_speed, WOL capability.
+ * Number of global CQ-s (for storage
+ */
+ ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
+#endif
+ rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ }
+#endif
+
+ rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
+ else
+ return rc;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
+#endif
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
+#ifndef ASIC_ONLY
+ } else {
+ static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
+
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
+ p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
+ }
+#endif
+
+ if (ecore_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+
+ if (personality != ECORE_PCI_DEFAULT) {
+ p_hwfn->hw_info.personality = personality;
+ } else if (ecore_mcp_is_init(p_hwfn)) {
+ enum ecore_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+#ifndef ASIC_ONLY
+ /* To overcome ILT lack for emulation, until at least until we'll have
+ * a definite answer from system about it, allow only PF0 to be RoCE.
+ */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+ if (!p_hwfn->rel_pf_id)
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
+ else
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+ }
+#endif
+
+ /* although in BB some constellations may support more than 4 tcs,
+ * that can result in performance penalty in some cases. 4
+ * represents a good tradeoff between performance and flexibility.
+ */
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+
+ /* start out with a single active tc. This can be increased either
+ * by dcbx negotiation or by upper layer driver
+ */
+ p_hwfn->hw_info.num_active_tc = 1;
+
+ ecore_get_num_funcs(p_hwfn, p_ptt);
+
+ if (ecore_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
+ /* In case of forcing the driver's default resource allocation, calling
+ * ecore_hw_get_resc() should come after initializing the personality
+ * and after getting the number of functions, since the calculation of
+ * the resources/features depends on them.
+ * This order is not harmful if not forcing.
+ */
+ rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 tmp;
+
+ /* Read Vendor Id / Device Id */
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
+ &p_dev->vendor_id);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
+ &p_dev->device_id);
+
+ /* Determine type */
+ if ((p_dev->device_id & ECORE_DEV_ID_MASK) == ECORE_DEV_ID_MASK_AH)
+ p_dev->type = ECORE_DEV_TYPE_AH;
+ else
+ p_dev->type = ECORE_DEV_TYPE_BB;
+
+ p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_NUM);
+ p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_REV);
+
+ MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+
+ /* Learn number of HW-functions */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << p_hwfn->rel_pf_id)) {
+ DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
+ p_dev->num_hwfns = 2;
+ } else {
+ p_dev->num_hwfns = 1;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ /* For some reason we have problems with this register
+ * in B0 emulation; Simply assume no CMT
+ */
+ DP_NOTICE(p_dev->hwfns, false,
+ "device on emul - assume no CMT\n");
+ p_dev->num_hwfns = 1;
+ }
+#endif
+
+ p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
+ p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+ DP_INFO(p_dev->hwfns,
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
+ p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
+ p_dev->chip_metal);
+
+ if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+ DP_NOTICE(p_dev->hwfns, false,
+ "The chip type/rev (BB A0) is not supported!\n");
+ return ECORE_ABORTED;
+ }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_ECO_RESERVED);
+ if (tmp & (1 << 29)) {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation: Running on a FULL build\n");
+ p_dev->b_is_emul_full = true;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation: Running on a REDUCED build\n");
+ }
+ }
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef LINUX_REMOVE
+void ecore_prepare_hibernate(struct ecore_dev *p_dev)
+{
+ int j;
+
+ if (IS_VF(p_dev))
+ return;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Mark hw/fw uninitialized\n");
+
+ p_hwfn->hw_init_done = false;
+ p_hwfn->first_on_engine = false;
+
+ ecore_ptt_invalidate(p_hwfn);
+ }
+}
+#endif
+
+static enum _ecore_status_t
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * p_regview,
+ void OSAL_IOMEM * p_doorbells,
+ struct ecore_hw_prepare_params *p_params)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mdump_info mdump_info;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ if (IS_VF(p_dev))
+ return ecore_vf_hw_prepare(p_hwfn);
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
+ return ECORE_INVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ /* Allocate PTT pool */
+ rc = ecore_ptt_pool_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
+ goto err0;
+ }
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id) {
+ rc = ecore_get_dev_info(p_dev);
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_DEV;
+ goto err1;
+ }
+ }
+
+ ecore_hw_hwfn_prepare(p_hwfn);
+
+ /* Initialize MCP structure */
+ rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->personality, p_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
+ * called, since among others it sets the ports number in an engine.
+ */
+ if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ !p_dev->recov_in_prog) {
+ rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
+
+ /* Check if mdump logs are present and update the epoch value */
+ if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_info);
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ DP_NOTICE(p_hwfn, false,
+ "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ }
+
+ ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->epoch);
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = ecore_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
+ goto err2;
+ }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround; Prevent DMAE parities\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
+ 7);
+
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround: Set VF bar0 size\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
+ }
+#endif
+
+ return rc;
+err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_iov_free_hw_info(p_dev);
+ ecore_mcp_free(p_hwfn);
+err1:
+ ecore_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
+ struct ecore_hw_prepare_params *p_params)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc;
+
+ p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
+
+ /* Store the precompiled init data ptrs */
+ if (IS_PF(p_dev))
+ ecore_init_iro_array(p_dev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = ecore_hw_prepare_single(p_hwfn,
+ p_dev->regview,
+ p_dev->doorbells, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_params->personality = p_hwfn->hw_info.personality;
+
+ /* initilalize 2nd hwfn if necessary */
+ if (p_dev->num_hwfns > 1) {
+ void OSAL_IOMEM *p_regview, *p_doorbell;
+ u8 OSAL_IOMEM *addr;
+
+ /* adjust bar offset for second engine */
+ addr = (u8 OSAL_IOMEM *)p_dev->regview +
+ ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ p_regview = (void OSAL_IOMEM *)addr;
+
+ addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
+ ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ p_doorbell = (void OSAL_IOMEM *)addr;
+
+ /* prepare second hw function */
+ rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
+ p_doorbell, p_params);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+ */
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_ENG2;
+
+ if (IS_PF(p_dev)) {
+ ecore_init_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ } else {
+ DP_NOTICE(p_dev, true,
+ "What do we need to free when VF hwfn1 init fails\n");
+ }
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+void ecore_hw_remove(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ int i;
+
+ if (IS_PF(p_dev))
+ ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_NOT_LOADED);
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_release(p_hwfn);
+ continue;
+ }
+
+ ecore_init_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+
+ OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+ }
+
+ ecore_iov_free_hw_info(p_dev);
+}
+
+static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
+ dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+ struct ecore_chain_next *p_next;
+ u32 size, i;
+
+ if (!p_virt)
+ return;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ if (!p_virt)
+ break;
+
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
+ p_virt_next = p_next->next_virt;
+ p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+ OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+
+ p_virt = p_virt_next;
+ p_phys = p_phys_next;
+ }
+}
+
+static void ecore_chain_free_single(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ if (!p_chain->p_virt_addr)
+ return;
+
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
+ p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
+}
+
+static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+
+ if (!pp_virt_addr_tbl)
+ return;
+
+ if (!p_pbl_virt)
+ goto out;
+
+ for (i = 0; i < page_cnt; i++) {
+ if (!pp_virt_addr_tbl[i])
+ break;
+
+ OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
+ *(dma_addr_t *)p_pbl_virt,
+ ECORE_CHAIN_PAGE_SIZE);
+
+ p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+
+ if (!p_chain->b_external_pbl)
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table, pbl_size);
+ out:
+ OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ ecore_chain_free_next_ptr(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ ecore_chain_free_single(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ ecore_chain_free_pbl(p_dev, p_chain);
+ break;
+ }
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
+ enum ecore_chain_cnt_type cnt_type,
+ osal_size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into acount the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of a u32 type.
+ */
+ if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
+ chain_size > ((u32)ECORE_U16_MAX + 1)) ||
+ (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
+ chain_size > ECORE_U32_MAX)) {
+ DP_NOTICE(p_dev, true,
+ "The actual chain size (0x%lx) is larger than the maximal possible value\n",
+ (unsigned long)chain_size);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 i;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ if (i == 0) {
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+ } else {
+ ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_virt, p_phys);
+ }
+
+ p_virt_prev = p_virt;
+ }
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ dma_addr_t p_phys = 0;
+ void *p_virt = OSAL_NULL;
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
+{
+ void *p_virt = OSAL_NULL;
+ u8 *p_pbl_virt = OSAL_NULL;
+ void **pp_virt_addr_tbl = OSAL_NULL;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ u32 page_cnt = p_chain->page_cnt, size, i;
+
+ size = page_cnt * sizeof(*pp_virt_addr_tbl);
+ pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
+ if (!pp_virt_addr_tbl) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate memory for the chain virtual addresses table\n");
+ return ECORE_NOMEM;
+ }
+
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ * ecore_chain_init_pbl_mem() is called even in a case of an allocation
+ * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
+ */
+ size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+
+ if (ext_pbl == OSAL_NULL) {
+ p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
+ ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
+ if (!p_pbl_virt) {
+ DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+ return ECORE_NOMEM;
+ }
+
+ for (i = 0; i < page_cnt; i++) {
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ if (i == 0) {
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+ p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+ p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ u32 num_elems, osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
+{
+ u32 page_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (mode == ECORE_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
+ page_cnt);
+ if (rc) {
+ DP_NOTICE(p_dev, true,
+ "Cannot allocate a chain with the given arguments:\n"
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
+ }
+
+ ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
+ mode, cnt_type, p_dev->dp_ctx);
+
+ switch (mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ rc = ecore_chain_alloc_single(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
+ break;
+ }
+ if (rc)
+ goto nomem;
+
+ return ECORE_SUCCESS;
+
+nomem:
+ ecore_chain_free(p_dev, p_chain);
+ return rc;
+}
+
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ DP_NOTICE(p_hwfn, true,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
+ DP_NOTICE(p_hwfn, true,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
+ DP_NOTICE(p_hwfn, true,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter)
+{
+ u32 high, low, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return ECORE_SUCCESS;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
+ p_filter[0], p_filter[1], p_filter[2],
+ p_filter[3], p_filter[4], p_filter[5], i);
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter\n");
+}
+
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type)
+{
+ u32 high, low, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return ECORE_SUCCESS;
+
+ high = 0;
+ low = 0;
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 1 << type);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return ECORE_NORESOURCES;
+ }
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ETH type %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP src port %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP src port %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP dst port %x is added at %d\n", dest_port, i);
+ break;
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP dst port %x is added at %d\n", dest_port, i);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, i);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, i);
+ break;
+ }
+ return ECORE_SUCCESS;
+}
+
+void
+ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type)
+{
+ u32 high, low;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = 0;
+ low = 0;
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return;
+ }
+
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
+ continue;
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
+ continue;
+ if (!(ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32)) & (1 << type)))
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter\n");
+}
+
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ }
+}
+
+enum _ecore_status_t
+ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
+ 1 << p_hwfn->abs_pf_id / 2);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
+ return ECORE_SUCCESS;
+ }
+
+ DP_NOTICE(p_hwfn, false,
+ "This function can't be set as default\n");
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *p_eth_qzone,
+ osal_size_t eth_qzone_size,
+ u8 timeset)
+{
+ struct coalescing_timeset *p_coal_timeset;
+
+ if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, true,
+ "Coalescing configuration not enabled\n");
+ return ECORE_INVAL;
+ }
+
+ p_coal_timeset = p_eth_qzone;
+ OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Configuring a single queue's coalescing but
+ * claiming all queues are abiding same configuration
+ * for PF and VF both.
+ */
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
+ tx_coal, p_cid);
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (rx_coal) {
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ }
+
+ if (tx_coal) {
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ustorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u32 address;
+ enum _ecore_status_t rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return ECORE_INVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, false);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ out:
+ return rc;
+}
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
+{
+ struct xstorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u32 address;
+ enum _ecore_status_t rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return ECORE_INVAL;
+ }
+
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, true);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
+ out:
+ return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure it.
+ * After this configuration each vport must have
+ * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ */
+static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
+ min_pf_rate;
+ ecore_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void
+ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+ ecore_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+/* Accounting for the vports which are configured for WFQ explicitly */
+
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ /* validate possible error cases */
+ if (req_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* TBD - for number of vports greater than 100 */
+ if (num_vports > ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ ECORE_WFQ_UNIT);
+ return ECORE_INVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* Data left for non requested vports */
+ total_left_rate = min_pf_rate - total_req_min_rate;
+ left_rate_per_vp = total_left_rate / non_requested_count;
+
+ /* validate if non requested get < 1% of min bw */
+ if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* now req_rate for given vport passes all scenarios.
+ * assign final wfq rates to all vports.
+ */
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 vp_id, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ int rc = ECORE_SUCCESS;
+
+ p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn, false,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = ECORE_SUCCESS;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (rc == ECORE_SUCCESS && use_wfq)
+ ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for ecore clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
+{
+ int i, rc = ECORE_INVAL;
+
+ /* TBD - for multiple hardware functions - that is 100 gig */
+ if (p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_dev, false,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ u32 min_pf_rate)
+{
+ int i;
+
+ /* TBD - for multiple hardware functions - that is 100 gig */
+ if (p_dev->num_hwfns > 1) {
+ DP_VERBOSE(p_dev, ECORE_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+ p_hwfn->p_dpc_ptt,
+ min_pf_rate);
+ }
+}
+
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = ECORE_SUCCESS;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
+{
+ int i, rc = ECORE_INVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = ECORE_SUCCESS;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
+{
+ int i, rc = ECORE_INVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) *
+ p_hwfn->qm_info.num_vports);
+}
+
+int ecore_device_num_engines(struct ecore_dev *p_dev)
+{
+ return ECORE_IS_BB(p_dev) ? 2 : 1;
+}
+
+int ecore_device_num_ports(struct ecore_dev *p_dev)
+{
+ /* in CMT always only one port */
+ if (p_dev->num_hwfns > 1)
+ return 1;
+
+ return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+}
+
+void ecore_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_dev_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_dev_api.h
new file mode 100644
index 00000000..e64a768d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_dev_api.h
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DEV_API_H__
+#define __ECORE_DEV_API_H__
+
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_int_api.h"
+
+/**
+ * @brief ecore_init_dp - initialize the debug level
+ *
+ * @param p_dev
+ * @param dp_module
+ * @param dp_level
+ * @param dp_ctx
+ */
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module,
+ u8 dp_level,
+ void *dp_ctx);
+
+/**
+ * @brief ecore_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param p_dev
+ */
+void ecore_init_struct(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_free -
+ *
+ * @param p_dev
+ */
+void ecore_resc_free(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_alloc -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_setup -
+ *
+ * @param p_dev
+ */
+void ecore_resc_setup(struct ecore_dev *p_dev);
+
+struct ecore_hw_init_params {
+ /* Tunnelling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum ecore_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+};
+
+/**
+ * @brief ecore_hw_init -
+ *
+ * @param p_dev
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+ struct ecore_hw_init_params *p_params);
+
+/**
+ * @brief ecore_hw_timers_stop_all -
+ *
+ * @param p_dev
+ *
+ * @return void
+ */
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief ecore_prepare_hibernate -should be called when
+ * the system is going into the hibernate state
+ *
+ * @param p_dev
+ *
+ */
+void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+#endif
+
+/**
+ * @brief ecore_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+
+ * @param p_dev
+ *
+ */
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+
+enum ecore_hw_prepare_result {
+ ECORE_HW_PREPARE_SUCCESS,
+
+ /* FAILED results indicate probe has failed & cleaned up */
+ ECORE_HW_PREPARE_FAILED_ENG2,
+ ECORE_HW_PREPARE_FAILED_ME,
+ ECORE_HW_PREPARE_FAILED_MEM,
+ ECORE_HW_PREPARE_FAILED_DEV,
+ ECORE_HW_PREPARE_FAILED_NVM,
+
+ /* BAD results indicate probe is passed even though some wrongness
+ * has occurred; Trying to actually use [I.e., hw_init()] might have
+ * dire reprecautions.
+ */
+ ECORE_HW_PREPARE_BAD_IOV,
+ ECORE_HW_PREPARE_BAD_MCP,
+ ECORE_HW_PREPARE_BAD_IGU,
+};
+
+struct ecore_hw_prepare_params {
+ /* Personality to initialize */
+ int personality;
+
+ /* Force the driver's default resource allocation */
+ bool drv_resc_alloc;
+
+ /* Check the reg_fifo after any register access */
+ bool chk_reg_fifo;
+
+ /* Request the MFW to initiate PF FLR */
+ bool initiate_pf_flr;
+
+ /* The OS Epoch time in seconds */
+ u32 epoch;
+
+ /* Allow prepare to pass even if some initializations are failing.
+ * If set, the `p_prepare_res' field would be set with the return,
+ * and might allow probe to pass even if there are certain issues.
+ */
+ bool b_relaxed_probe;
+ enum ecore_hw_prepare_result p_relaxed_res;
+};
+
+/**
+ * @brief ecore_hw_prepare -
+ *
+ * @param p_dev
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
+ struct ecore_hw_prepare_params *p_params);
+
+/**
+ * @brief ecore_hw_remove -
+ *
+ * @param p_dev
+ */
+void ecore_hw_remove(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_ptt
+ */
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#ifndef __EXTRACT__LINUX__
+struct ecore_eth_stats {
+ u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 tpa_coalesced_pkts;
+ u64 tpa_coalesced_events;
+ u64 tpa_aborts_num;
+ u64 tpa_not_coalesced_pkts;
+ u64 tpa_coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 rx_mac_bytes;
+ u64 rx_mac_uc_packets;
+ u64 rx_mac_mc_packets;
+ u64 rx_mac_bc_packets;
+ u64 rx_mac_frames_ok;
+ u64 tx_mac_bytes;
+ u64 tx_mac_uc_packets;
+ u64 tx_mac_mc_packets;
+ u64 tx_mac_bc_packets;
+ u64 tx_mac_ctrl_frames;
+};
+#endif
+
+enum ecore_dmae_address_type_t {
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ ECORE_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
+#define ECORE_DMAE_FLAG_VF_DST 0x00000004
+#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct ecore_dmae_params {
+ u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
+};
+
+/**
+ * @brief ecore_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief ecore_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief ecore_dmae_host2host - copy data from to source address
+ * to a destination address (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params);
+
+/**
+ * @brief ecore_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_chain_alloc(struct ecore_dev *p_dev,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ u32 num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
+
+/**
+ * @brief ecore_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void ecore_chain_free(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain);
+
+/**
+ * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * @@brief ecore_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
+
+enum ecore_llh_port_filter_type_t {
+ ECORE_LLH_FILTER_ETHERTYPE,
+ ECORE_LLH_FILTER_TCP_SRC_PORT,
+ ECORE_LLH_FILTER_TCP_DEST_PORT,
+ ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
+ ECORE_LLH_FILTER_UDP_SRC_PORT,
+ ECORE_LLH_FILTER_UDP_DEST_PORT,
+ ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
+};
+
+/**
+ * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type);
+
+/**
+ * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+void
+ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type);
+
+/**
+ * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_llh_set_function_as_default - set function as default per port
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t
+ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id,
+ bool is_vf);
+/**
+ * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param p_handle
+ *
+ * @return enum _ecore_status_t
+ **/
+enum _ecore_status_t
+ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
+ u16 tx_coal, void *p_handle);
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h
new file mode 100644
index 00000000..2acd864d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef GTT_REG_ADDR_H
+#define GTT_REG_ADDR_H
+
+/* Win 2 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+
+/* Win 3 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+
+/* Win 4 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+
+/* Win 5 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+
+/* Win 6 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+
+/* Win 7 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+
+/* Win 8 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+
+/* Win 9 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+
+/* Win 10 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+
+/* Win 11 */
+/* Access:RW DataWidth:0x20 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_values.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_values.h
new file mode 100644
index 00000000..2ddc5f19
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_gtt_values.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __PREVENT_PXP_GLOBAL_WIN__
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+#endif /* __PREVENT_PXP_GLOBAL_WIN__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_common.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_common.h
new file mode 100644
index 00000000..3042ed55
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_common.h
@@ -0,0 +1,2311 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_COMMON__
+#define __ECORE_HSI_COMMON__
+/********************************/
+/* Add include to common target */
+/********************************/
+#include "common_hsi.h"
+
+
+/*
+ * opcodes for the event ring
+ */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_RL_UPDATE,
+ COMMON_EVENT_EMPTY,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+
+/*
+ * Common Ramrod Command IDs
+ */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_VF_START /* VF Function Start */,
+ COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
+ COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
+ COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
+ COMMON_RAMROD_EMPTY /* Empty Ramrod */,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+
+/*
+ * The core storm context for the Ystorm
+ */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * The core storm context for the Pstorm
+ */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * Core Slowpath Connection storm context of Xstorm
+ */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+/* Consolidation Ring Base Address */
+ struct regpair consolid_base_addr;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+/* cf18 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+/* cf23 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 word5 /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 e5_reserved /* e5_reserved */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+/*
+ * The core storm context for the Mstorm
+ */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/*
+ * The core storm context for the Ustorm
+ */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * core connection context
+ */
+struct core_conn_context {
+/* ystorm storm context */
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+/* pstorm storm context */
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2] /* padding */;
+/* xstorm storm context */
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+/* xstorm aggregative context */
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+/* tstorm aggregative context */
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+/* ustorm aggregative context */
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+/* mstorm storm context */
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+/* ustorm storm context */
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+
+/*
+ * How ll2 should deal with packet upon errors
+ */
+enum core_error_handle {
+ LL2_DROP_PACKET /* If error occurs drop packet */,
+ LL2_DO_NOTHING /* If error occurs do nothing */,
+ LL2_ASSERT /* If error occurs assert */,
+ MAX_CORE_ERROR_HANDLE
+};
+
+
+/*
+ * opcodes for the event ring
+ */
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
+ MAX_CORE_EVENT_OPCODE
+};
+
+
+/*
+ * The L4 pseudo checksum mode for Core
+ */
+enum core_l4_pseudo_checksum_mode {
+/* Pseudo Checksum on packet is calculated with the correct packet length. */
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+/* Pseudo Checksum on packet is calculated with zero length. */
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct core_ll2_pstorm_per_queue_stat {
+/* number of total bytes sent without errors */
+ struct regpair sent_ucast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_mcast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_bcast_bytes;
+/* number of total packets sent without errors */
+ struct regpair sent_ucast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_mcast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_bcast_pkts;
+};
+
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_rx_prod {
+ __le16 bd_prod /* BD Producer */;
+ __le16 cqe_prod /* CQE Producer */;
+ __le32 reserved;
+};
+
+
+struct core_ll2_tstorm_per_queue_stat {
+/* Number of packets discarded because they are bigger than MTU */
+ struct regpair packet_too_big_discard;
+/* Number of packets discarded due to lack of host buffers */
+ struct regpair no_buff_discard;
+};
+
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+
+/*
+ * Core Ramrod Command IDs (light L2)
+ */
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+
+/*
+ * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
+ */
+struct core_rx_action_on_error {
+ u8 error_type;
+/* ll2 how to handle error packet_too_big (use enum core_error_handle) */
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+/* ll2 how to handle error with no_buff (use enum core_error_handle) */
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+
+/*
+ * Core RX BD for Light L2
+ */
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
+/* Core Rx Bd with dynamic buffer length */
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+
+
+/*
+ * Opaque Data for Light L2 RX CQE .
+ */
+struct core_rx_cqe_opaque_data {
+ __le32 data[2] /* Opaque CQE Data */;
+};
+
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
+ CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
+ CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_fast_path_cqe {
+ u8 type /* CQE type */;
+/* Offset (in bytes) of the packet from start of the buffer */
+ u8 placement_offset;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length /* Total packet length (from the parser) */;
+ __le16 vlan /* 802.1q VLAN tag */;
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+/* bit- map: each bit represents a specific error. errors indications are
+ * provided by the cracker. see spec for detailed description
+ */
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
+};
+
+/*
+ * Core Rx CM offload CQE .
+ */
+struct core_rx_gsi_offload_cqe {
+ u8 type /* CQE type */;
+ u8 data_length_error /* set if gsi data is bigger than buff */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length /* Total packet length (from the parser) */;
+ __le16 vlan /* 802.1q VLAN tag */;
+ __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
+ __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
+ u8 reserved1[2];
+ __le32 gid_dst[4] /* Gid destination address */;
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_slow_path_cqe {
+ u8 type /* CQE type */;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+ __le32 reserved1[5];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
+ struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
+};
+
+
+
+
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base /* bd address of the first bd page */;
+ struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+ __le16 mtu /* Maximum transmission unit */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* index of the protocol index */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
+ __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+/* if set, 802.1q tags will be removed and copied to CQE */
+ u8 inner_vlan_removal_en;
+ u8 queue_id /* Light L2 RX Queue ID */;
+ u8 main_func_queue /* Is this the main queue for the PF */;
+/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
+ * main_func_queue is set.
+ */
+ u8 mf_si_bcast_accept_all;
+/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
+ * main_func_queue is set.
+ */
+ u8 mf_si_mcast_accept_all;
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff
+ */
+ struct core_rx_action_on_error action_on_error;
+/* set when in GSI offload mode on ROCE connection */
+ u8 gsi_offload_flag;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 queue_id /* Light L2 RX Queue ID */;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+
+/*
+ * Flags for Core TX BD
+ */
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+/* Do not allow additional VLAN manipulations on this packet (DCB) */
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+/* Insert VLAN into packet */
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+/* This is the first BD of the packet (for debug) */
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+/* Calculate the IP checksum for the packet */
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+/* Calculate the L4 checksum for the packet */
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+/* Packet is IPv6 with extensions */
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
+ * 0-TCP, 1-UDP
+ */
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+/* The pseudo checksum mode to place in the L4 checksum field. Required only
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ */
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+/* Number of BDs that make up one packet - width wide enough to present
+ * CORE_LL2_TX_MAX_BDS_PER_PACKET
+ */
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
+ */
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+/* Calculate ip length */
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
+};
+
+/*
+ * Core TX BD for Light L2
+ */
+struct core_tx_bd {
+ struct regpair addr /* Buffer Address */;
+ __le16 nbytes /* Number of Bytes in Buffer */;
+/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
+ * packets: echo data to pass to Rx
+ */
+ __le16 nw_vlan_or_lb_echo;
+ struct core_tx_bd_data bd_data /* BD Flags */;
+ __le16 bitfield1;
+/* L4 Header Offset from start of packet (in Words). This is needed if both
+ * l4_csum and ipv6_ext are set
+ */
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+/* Packet destination - Network, LB (use enum core_tx_dest) */
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
+};
+
+
+
+/*
+ * Light L2 TX Destination
+ */
+enum core_tx_dest {
+ CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
+ CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
+ MAX_CORE_TX_DEST
+};
+
+
+/*
+ * Ramrod data for tx queue start ramrod
+ */
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr /* Address of the pbl page */;
+ __le16 mtu /* Maximum transmission unit */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* Status block protocol index */;
+ u8 stats_en /* Statistics Enable */;
+ u8 stats_id /* Statistics Counter ID */;
+ u8 conn_type /* connection type that loaded ll2 */;
+ __le16 pbl_size /* Number of BD pages pointed by PBL */;
+ __le16 qm_pq_id /* QM PQ ID */;
+/* set when in GSI offload mode on ROCE connection */
+ u8 gsi_offload_flag;
+ u8 resrved[3];
+};
+
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
+
+/*
+ * Enum flag for what type of dcb data to update
+ */
+enum dcb_dscp_update_mode {
+/* use when no change should be done to dcb data */
+ DONT_UPDATE_DCB_DSCP,
+ UPDATE_DCB /* use to update only l2 (vlan) priority */,
+ UPDATE_DSCP /* use to update only l3 dscp */,
+ UPDATE_DCB_DSCP /* update vlan pri and dscp */,
+ MAX_DCB_DSCP_UPDATE_FLAG
+};
+
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
+ struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
+ struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
+ struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
+};
+
+
+struct eth_mstorm_per_queue_stat {
+/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
+ struct regpair ttl0_discard;
+/* Number of packets discarded because they are bigger than MTU */
+ struct regpair packet_too_big_discard;
+/* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
+ struct regpair no_buff_discard;
+/* Number of packets discarded because of no active Rx connection */
+ struct regpair not_active_discard;
+/* number of coalesced packets in all TPA aggregations */
+ struct regpair tpa_coalesced_pkts;
+/* total number of TPA aggregations */
+ struct regpair tpa_coalesced_events;
+/* number of aggregations, which abnormally ended */
+ struct regpair tpa_aborts_num;
+/* total TCP payload length in all TPA aggregations */
+ struct regpair tpa_coalesced_bytes;
+};
+
+
+/*
+ * Ethernet TX Per PF
+ */
+struct eth_pstorm_per_pf_stat {
+/* number of total ucast bytes sent on loopback port without errors */
+ struct regpair sent_lb_ucast_bytes;
+/* number of total mcast bytes sent on loopback port without errors */
+ struct regpair sent_lb_mcast_bytes;
+/* number of total bcast bytes sent on loopback port without errors */
+ struct regpair sent_lb_bcast_bytes;
+/* number of total ucast packets sent on loopback port without errors */
+ struct regpair sent_lb_ucast_pkts;
+/* number of total mcast packets sent on loopback port without errors */
+ struct regpair sent_lb_mcast_pkts;
+/* number of total bcast packets sent on loopback port without errors */
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes /* Sent GRE bytes */;
+ struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
+ struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
+ struct regpair sent_gre_pkts /* Sent GRE packets */;
+ struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
+ struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
+ struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
+ struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
+ struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
+};
+
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct eth_pstorm_per_queue_stat {
+/* number of total bytes sent without errors */
+ struct regpair sent_ucast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_mcast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_bcast_bytes;
+/* number of total packets sent without errors */
+ struct regpair sent_ucast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_mcast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_bcast_pkts;
+/* number of total packets dropped due to errors */
+ struct regpair error_drop_pkts;
+};
+
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_rate_limit {
+/* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
+ __le16 mult;
+/* Constant term to add (or subtract from number of cycles) */
+ __le16 cnst;
+ u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+
+struct eth_ustorm_per_pf_stat {
+/* number of total ucast bytes received on loopback port without errors */
+ struct regpair rcv_lb_ucast_bytes;
+/* number of total mcast bytes received on loopback port without errors */
+ struct regpair rcv_lb_mcast_bytes;
+/* number of total bcast bytes received on loopback port without errors */
+ struct regpair rcv_lb_bcast_bytes;
+/* number of total ucast packets received on loopback port without errors */
+ struct regpair rcv_lb_ucast_pkts;
+/* number of total mcast packets received on loopback port without errors */
+ struct regpair rcv_lb_mcast_pkts;
+/* number of total bcast packets received on loopback port without errors */
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes /* Received GRE bytes */;
+ struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
+ struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
+ struct regpair rcv_gre_pkts /* Received GRE packets */;
+ struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
+ struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
+};
+
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+
+/*
+ * Event Ring Next Page Address
+ */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+/*
+ * Event Ring Element
+ */
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+/* Event Ring Next Page Address */
+ struct event_ring_next_addr next_addr;
+};
+
+
+
+/*
+ * Ports mode
+ */
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
+
+/*
+ * Major and Minor hsi Versions
+ */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
+ u8 major_ver_arr[2] /* Major Version of driver loading pf */;
+};
+
+
+/*
+ * Integration Phase
+ */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
+ INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
+ MAX_INTEG_PHASE
+};
+
+
+/*
+ * Ports mode
+ */
+enum iwarp_ll2_tx_queues {
+/* LL2 queue for OOO packets sent in-order by the driver */
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+/* LL2 queue for unaligned packets sent aligned by the driver */
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
+ * driver
+ */
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
+ IWARP_LL2_ERROR /* Error indication */,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
+/* Writing to VF/PF channel when it is not ready */
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
+ VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
+/* TX packet is shorter then reported on BDs or from minimal size */
+ ETH_PACKET_TOO_SMALL,
+/* Tx packet with marked as insert VLAN when its illegal */
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
+/* TX packet has illegal inband tags marked */
+ ETH_ILLEGAL_INBAND_TAGS,
+/* Vlan cant be added to inband tag */
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+/* indicated number of BDs for the packet is illegal */
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
+/* There are not enough BDs for transmission of even one packet */
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
+ ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
+/* empty BD (which not contains control flags) is illegal */
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
+/* In LSO its expected that on the local BD ring there will be at least MSS
+ * bytes of data
+ */
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
+/* Tunneled packet with IPv6+Ext without a proper number of BDs */
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+ ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
+
+
+/*
+ * Mstorm non-triggering VF zone
+ */
+struct mstorm_non_trigger_vf_zone {
+/* VF statistic bucket */
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+/* VF RX queues producers */
+ struct eth_rx_prod_data
+ eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+};
+
+
+/*
+ * Mstorm VF zone
+ */
+struct mstorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+
+/*
+ * personality per PF
+ */
+enum personality_type {
+ BAD_PERSONALITY_TYP,
+ PERSONALITY_ISCSI /* iSCSI and LL2 */,
+ PERSONALITY_FCOE /* Fcoe and LL2 */,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
+ PERSONALITY_RDMA /* Roce and LL2 */,
+ PERSONALITY_CORE /* CORE(LL2) */,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_TOE /* Toe and LL2 */,
+ MAX_PERSONALITY_TYPE
+};
+
+
+/*
+ * tunnel configuration
+ */
+struct pf_start_tunnel_config {
+/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
+ * FW will use a default port
+ */
+ u8 set_vxlan_udp_port_flg;
+/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
+ * FW will use a default port
+ */
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+/* If set, enable l2 GENEVE tunnel in TX path. */
+ u8 tx_enable_l2geneve;
+/* If set, enable IP GENEVE tunnel in TX path. */
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_l2geneve;
+/* Classification scheme for ip GENEVE tunnel. */
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
+ __le16 vxlan_udp_port;
+/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
+ __le16 geneve_udp_port;
+};
+
+/*
+ * Ramrod data for PF start ramrod
+ */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
+/* PBL address of consolidation queue */
+ struct regpair consolid_q_pbl_addr;
+/* tunnel configuration. */
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id /* Status block ID */;
+/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
+ u8 base_vf_id;
+ u8 num_vfs /* Amount of vfs owned by PF */;
+ u8 event_ring_num_pages /* Number of PBL pages in event ring */;
+ u8 event_ring_sb_index /* Status block index */;
+ u8 path_id /* HW path ID (engine ID) */;
+ u8 warning_as_error /* In FW asserts, treat warning as error */;
+/* If not set - throw a warning for each ramrod (for debug) */
+ u8 dont_log_ramrods;
+ u8 personality /* define what type of personality is new PF */;
+/* Log type mask. Each bit set enables a corresponding event type logging.
+ * Event types are defined as ASSERT_LOG_TYPE_xxx
+ */
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
+ u8 allow_npar_tx_switching;
+/* Map from inner to outer priority. Set pri_map_valid when init map */
+ u8 inner_to_outer_pri_map[8];
+/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+ u8 pri_map_valid;
+/* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
+ * (lower 16 bits) and ethType to use (higher 16 bits)
+ */
+ __le32 outer_tag;
+/* FP HSI version to be used by FW */
+ struct hsi_fp_ver_struct hsi_fp_ver;
+};
+
+
+
+/*
+ * Data for port update ramrod
+ */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag /* dcbEnable flag value */;
+ u8 dscp_enable_flag /* If set use dscp value */;
+ u8 dcb_priority /* dcbPri flag value */;
+ u8 dcb_tc /* dcb TC value */;
+ u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
+ u8 reserved0;
+};
+
+/*
+ * Update tunnel configuration
+ */
+struct pf_update_tunnel_config {
+/* Update RX per PF tunnel classification scheme. */
+ u8 update_rx_pf_clss;
+/* Update per PORT default tunnel RX classification scheme for traffic with
+ * unknown unicast outer MAC in NPAR mode.
+ */
+ u8 update_rx_def_ucast_clss;
+/* Update per PORT default tunnel RX classification scheme for traffic with non
+ * unicast outer MAC in NPAR mode.
+ */
+ u8 update_rx_def_non_ucast_clss;
+/* Update TX per PF tunnel classification scheme. used by pf update. */
+ u8 update_tx_pf_clss;
+/* Update VXLAN tunnel UDP destination port. */
+ u8 set_vxlan_udp_port_flg;
+/* Update GENEVE tunnel UDP destination port. */
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+/* If set, enable l2 GENEVE tunnel in TX path. */
+ u8 tx_enable_l2geneve;
+/* If set, enable IP GENEVE tunnel in TX path. */
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_l2geneve;
+/* Classification scheme for ip GENEVE tunnel. */
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+ __le16 reserved[2];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct pf_update_ramrod_data {
+ u8 pf_id;
+ u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
+ u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
+ u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
+ u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
+/* Update RROCE (RoceV2) DCB data indication */
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
+ u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+ struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
+ struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
+/* core iscsi related fields */
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
+/* core roce related fields */
+ struct protocol_dcb_data rroce_dcb_data;
+/* core iwarp related fields */
+ struct protocol_dcb_data iwarp_dcb_data;
+ __le16 mf_vlan /* new outer vlan id value */;
+ __le16 reserved;
+/* tunnel configuration. */
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+
+
+/*
+ * Ports mode
+ */
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+
+
+/*
+ * use to index in hsi_fp_[major|minor]_ver_arr per protocol
+ */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+
+
+/*
+ * RDMA TX Stats
+ */
+struct rdma_sent_stats {
+ struct regpair sent_bytes /* number of total RDMA bytes sent */;
+ struct regpair sent_pkts /* number of total RDMA packets sent */;
+};
+
+/*
+ * Pstorm non-triggering VF zone
+ */
+struct pstorm_non_trigger_vf_zone {
+/* VF statistic bucket */
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
+};
+
+
+/*
+ * Pstorm VF zone
+ */
+struct pstorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
+};
+
+
+/*
+ * Ramrod Header of SPQE
+ */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+
+/*
+ * RDMA RX Stats
+ */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes /* number of total RDMA bytes received */;
+ struct regpair rcv_pkts /* number of total RDMA packets received */;
+};
+
+
+
+/*
+ * Data for update QCN/DCQCN RL ramrod
+ */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
+/* Update DCQCN global params: timeout, g, k. */
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
+ u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
+ u8 rl_stop_flg /* Stop RL. */;
+ u8 rl_id_first /* ID of first or single RL, that will be updated. */;
+/* ID of last RL, that will be updated. If clear, single RL will updated. */
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
+ __le32 rl_bc_rate /* Byte Counter Limit. */;
+ __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
+ __le16 rl_r_ai /* Active increase rate. */;
+ __le16 rl_r_hai /* Hyper active increase rate. */;
+ __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
+ __le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
+ __le32 dcqcn_timeuot_us /* DCQCN timeout. */;
+ __le32 qcn_timeuot_us /* QCN timeout. */;
+ __le32 reserved[2];
+};
+
+
+/*
+ * Slowpath Element (SPQE)
+ */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
+};
+
+
+/*
+ * Tstorm non-triggering VF zone
+ */
+struct tstorm_non_trigger_vf_zone {
+ struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
+};
+
+
+struct tstorm_per_port_stat {
+/* packet is dropped because it was truncated in NIG */
+ struct regpair trunc_error_discard;
+/* packet is dropped because of Ethernet FCS error */
+ struct regpair mac_error_discard;
+/* packet is dropped because classification was unsuccessful */
+ struct regpair mftag_filter_discard;
+/* packet was passed to Ethernet and dropped because of no mac filter match */
+ struct regpair eth_mac_filter_discard;
+/* packet passed to Light L2 and dropped because Light L2 is not configured for
+ * this PF
+ */
+ struct regpair ll2_mac_filter_discard;
+/* packet passed to Light L2 and dropped because Light L2 is not configured for
+ * this PF
+ */
+ struct regpair ll2_conn_disabled_discard;
+/* packet is an ISCSI irregular packet */
+ struct regpair iscsi_irregular_pkt;
+/* packet is an FCOE irregular packet */
+ struct regpair fcoe_irregular_pkt;
+/* packet is an ROCE irregular packet */
+ struct regpair roce_irregular_pkt;
+/* packet is an IWARP irregular packet */
+ struct regpair iwarp_irregular_pkt;
+/* packet is an ETH irregular packet */
+ struct regpair eth_irregular_pkt;
+/* packet is an TOE irregular packet */
+ struct regpair toe_irregular_pkt;
+/* packet is an PREROCE irregular packet */
+ struct regpair preroce_irregular_pkt;
+ struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
+/* VXLAN dropped packets */
+ struct regpair eth_vxlan_tunn_filter_discard;
+/* GENEVE dropped packets */
+ struct regpair eth_geneve_tunn_filter_discard;
+};
+
+
+/*
+ * Tstorm VF zone
+ */
+struct tstorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+
+/*
+ * Tunnel classification scheme
+ */
+enum tunnel_clss {
+/* Use MAC and VLAN from first L2 header for vport classification. */
+ TUNNEL_CLSS_MAC_VLAN = 0,
+/* Use MAC from first L2 header and VNI from tunnel header for vport
+ * classification
+ */
+ TUNNEL_CLSS_MAC_VNI,
+/* Use MAC and VLAN from last L2 header for vport classification */
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+/* Use MAC from last L2 header and VNI from tunnel header for vport
+ * classification
+ */
+ TUNNEL_CLSS_INNER_MAC_VNI,
+/* Use MAC and VLAN from last L2 header for vport classification. If no exact
+ * match, use MAC and VLAN from first L2 header for classification.
+ */
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_TUNNEL_CLSS
+};
+
+
+
+/*
+ * Ustorm non-triggering VF zone
+ */
+struct ustorm_non_trigger_vf_zone {
+/* VF statistic bucket */
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr /* VF-PF message address */;
+};
+
+
+/*
+ * Ustorm triggering VF zone
+ */
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid /* VF-PF message valid flag */;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ustorm VF zone
+ */
+struct ustorm_vf_zone {
+/* non-interrupt-triggering zone */
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
+};
+
+
+/*
+ * VF-PF channel data
+ */
+struct vf_pf_channel_data {
+/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
+ * is ready for a new transaction.
+ */
+ __le32 ready;
+/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
+ * valid.
+ */
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_start_ramrod_data {
+ u8 vf_id /* VF ID */;
+/* If set, initial cleanup ack will be sent to parent PF SP event queue */
+ u8 enable_flr_ack;
+ __le16 opaque_fid /* VF opaque FID */;
+ u8 personality /* define what type of personality is new VF */;
+ u8 reserved[7];
+/* FP HSI version to be used by FW */
+ struct hsi_fp_ver_struct hsi_fp_ver;
+};
+
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_stop_ramrod_data {
+ u8 vf_id /* VF ID */;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
+
+/*
+ * VF zone size mode.
+ */
+enum vf_zone_size_mode {
+/* Default VF zone size. Up to 192 VF supported. */
+ VF_ZONE_SIZE_MODE_DEFAULT,
+/* Doubled VF zone size. Up to 96 VF supported. */
+ VF_ZONE_SIZE_MODE_DOUBLE,
+/* Quad VF zone size. Up to 48 VF supported. */
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
+
+
+
+/*
+ * Attentions status block
+ */
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+
+/*
+ * DMAE command
+ */
+struct dmae_cmd {
+ __le32 opcode;
+/* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
+ * (use enum dmae_cmd_dst_enum)
+ */
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+/* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+/* Reset the CRC result (do not use the previous result as the seed) */
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+/* Reset the source address in the next go to the same source address of the
+ * previous go
+ */
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+/* Reset the destination address in the next go to the same destination address
+ * of the previous go
+ */
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+/* 0 completion function is the same as src function, 1 - 0 completion
+ * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
+ */
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+/* 0 - Do not write a completion word, 1 - Write a completion word
+ * (use enum dmae_cmd_comp_word_en_enum)
+ */
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+/* 0 - Do not write a CRC word, 1 - Write a CRC word
+ * (use enum dmae_cmd_comp_crc_en_enum)
+ */
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+/* The CRC word should be taken from the DMAE address space from address 9+X,
+ * where X is the value in these bits.
+ */
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+/* The field specifies how the completion word is affected by PCIe read error. 0
+ * Send a regular completion, 1 - Send a completion with an error indication,
+ * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
+ */
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+/* The port ID to be placed on the RF FID field of the GRC bus. this field is
+ * used both when GRC is the destination and when it is the source of the DMAE
+ * transaction.
+ */
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+/* Source PCI function number [3:0] */
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+/* Destination PCI function number [3:0] */
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+/* PCIe source address low in bytes or GRC source address in DW */
+ __le32 src_addr_lo;
+/* PCIe source address high in bytes or reserved (if source is GRC) */
+ __le32 src_addr_hi;
+/* PCIe destination address low in bytes or GRC destination address in DW */
+ __le32 dst_addr_lo;
+/* PCIe destination address high in bytes or reserved (if destination is GRC) */
+ __le32 dst_addr_hi;
+ __le16 length_dw /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+/* PCIe completion address low in bytes or GRC completion address in DW */
+ __le32 comp_addr_lo;
+/* PCIe completion address high in bytes or reserved (if completion address is
+ * GRC)
+ */
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to completion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
+ dmae_cmd_comp_crc_enabled /* Write a CRC word */,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+
+enum dmae_cmd_comp_func_enum {
+/* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
+ dmae_cmd_comp_func_to_src,
+/* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled /* Do not write a completion word */,
+ dmae_cmd_comp_word_enabled /* Write the completion word */,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
+};
+
+
+enum dmae_cmd_error_handling_enum {
+/* Send a regular completion (with no error indication) */
+ dmae_cmd_error_handling_send_regular_comp,
+/* Send a completion with an error indication (i.e. set bit 31 of the completion
+ * word)
+ */
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie /* The source is the PCIe */,
+ dmae_cmd_src_grc /* The source is the GRC */,
+ MAX_DMAE_CMD_SRC_ENUM
+};
+
+
+struct e4_mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+
+/*
+ * IGU cleanup command
+ */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+/* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+/* must always be set (use enum command_type_bit) */
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+
+/*
+ * IGU firmware driver command
+ */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+
+/*
+ * IGU firmware driver command
+ */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+/* command typ: 0 - read, 1 - write */
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+
+/*
+ * IGU mapping line structure
+ */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+/* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+
+/*
+ * IGU MSIX line structure
+ */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+
+/*
+ * per encapsulation type enabling flags
+ */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+/* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+/* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+/* Enable bit for VXLAN encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+/* Enable bit for T-Tag encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+/* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+/* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+/* Device Write and Host Read, or Host Write and Device Read */
+ TPH_ST_HINT_TARGET,
+/* Device Write and Host Read, or Host Write and Device Read - with temporal
+ * reuse
+ */
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+
+/*
+ * QM hardware structure of enable bypass credit mask
+ */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+
+/*
+ * QM hardware structure of opportunistic credit mask
+ */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+
+/*
+ * QM hardware structure of QM map memory
+ */
+struct qm_rf_pq_map {
+ __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+/* the first PQ associated with the VPORT and VOQ of this PQ */
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+
+/*
+ * Completion params for aggregated interrupt completion
+ */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+/* the number of aggregated interrupt, 0-31 */
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+/* 1 - set a bit in aggregated vector, 0 - dont set */
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+/* Number of bit in the aggregated vector, 0-279 (TBD) */
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+ __le32 command;
+/* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+#endif /* __ECORE_HSI_COMMON__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h
new file mode 100644
index 00000000..917e8f4c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -0,0 +1,1114 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_DEBUG_TOOLS__
+#define __ECORE_HSI_DEBUG_TOOLS__
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x1fa0000,
+ GRCBASE_RGSRC = 0x1fa8000,
+ GRCBASE_TGFS = 0x1fb0000,
+ GRCBASE_TGSRC = 0x1fb8000,
+ GRCBASE_PTLD = 0x1fc0000,
+ GRCBASE_YPLD = 0x1fe0000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+
+/*
+ * binary debug buffer types
+ */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE /* init modes tree */,
+ BIN_BUF_DBG_DUMP_REG /* GRC Dump registers */,
+ BIN_BUF_DBG_DUMP_MEM /* GRC Dump memories */,
+ BIN_BUF_DBG_IDLE_CHK_REGS /* Idle Check registers */,
+ BIN_BUF_DBG_IDLE_CHK_IMMS /* Idle Check immediates */,
+ BIN_BUF_DBG_IDLE_CHK_RULES /* Idle Check rules */,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA /* Idle Check parsing data */,
+ BIN_BUF_DBG_ATTN_BLOCKS /* Attention blocks */,
+ BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
+ BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
+ BIN_BUF_DBG_BUS_BLOCKS /* Debug Bus blocks */,
+ BIN_BUF_DBG_BUS_LINES /* Debug Bus lines */,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA /* Debug Bus blocks user data */,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */,
+ BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+
+/*
+ * Attention bit mapping
+ */
+struct dbg_attn_bit_mapping {
+ __le16 data;
+/* The index of an attention in the blocks attentions list
+ * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
+ * (if is_unused_bit_cnt=1)
+ */
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+/* if set, the val field indicates the number of consecutive unused attention
+ * bits
+ */
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+
+/*
+ * Attention block per-type data
+ */
+struct dbg_attn_block_type_data {
+/* Offset of this block attention names in the debug attention name offsets
+ * array
+ */
+ __le16 names_offset;
+ __le16 reserved1;
+ u8 num_regs /* Number of attention registers in this block */;
+ u8 reserved2;
+/* Offset of this blocks attention registers in the attention registers array
+ * (in dbg_attn_reg units)
+ */
+ __le16 regs_offset;
+};
+
+/*
+ * Block attentions
+ */
+struct dbg_attn_block {
+/* attention block per-type data. Count must match the number of elements in
+ * dbg_attn_type.
+ */
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+
+/*
+ * Attention register result
+ */
+struct dbg_attn_reg_result {
+ __le32 data;
+/* STS attention register GRC address (in dwords) */
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+/* Number of attention indexes in this register */
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
+ */
+ __le16 attn_idx_offset;
+ __le16 reserved;
+ __le32 sts_val /* Value read from the STS attention register */;
+ __le32 mask_val /* Value read from the MASK attention register */;
+};
+
+/*
+ * Attention block result
+ */
+struct dbg_attn_block_result {
+ u8 block_id /* Registers block ID */;
+ u8 data;
+/* Value from dbg_attn_type enum */
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+/* Number of registers in block in which at least one attention bit is set */
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+/* Offset of this registers block attention names in the attention name offsets
+ * array
+ */
+ __le16 names_offset;
+/* result data for each register in the block in which at least one attention
+ * bit is set
+ */
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+
+
+/*
+ * mode header
+ */
+struct dbg_mode_hdr {
+ __le16 data;
+/* indicates if a mode expression should be evaluated (0/1) */
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+/* offset (in bytes) in modes expression buffer. valid only if eval_mode is
+ * set.
+ */
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/*
+ * Attention register
+ */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode /* Mode header */;
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
+ */
+ __le16 attn_idx_offset;
+ __le32 data;
+/* STS attention register GRC address (in dwords) */
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+/* Number of attention in this register */
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+/* STS_CLR attention register GRC address (in dwords) */
+ __le32 sts_clr_address;
+/* MASK attention register GRC address (in dwords) */
+ __le32 mask_address;
+};
+
+
+
+/*
+ * attention types
+ */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the Debug Bus lines array. */
+ __le16 lines_offset;
+};
+
+
+/*
+ * Debug Bus block user data
+ */
+struct dbg_bus_block_user_data {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the debug bus line name offsets array. */
+ __le16 names_offset;
+};
+
+
+/*
+ * Block Debug line data
+ */
+struct dbg_bus_line {
+ u8 data;
+/* Number of groups in the line (0-3) */
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+/* Indicates if this is a 128b line (0) or a 256b line (1). */
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+/* Four 2-bit values, indicating the size of each group minus 1 (i.e.
+ * value=0 means size=1, value=1 means size=2, etc), starting from lsb.
+ * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1).
+ */
+ u8 group_sizes;
+};
+
+
+/*
+ * condition header for registers dump
+ */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode /* Mode header */;
+ u8 block_id /* block ID */;
+ u8 data_size /* size in dwords of the data following this header */;
+};
+
+
+/*
+ * memory data for registers dump
+ */
+struct dbg_dump_mem {
+ __le32 dword0;
+/* register address (in dwords) */
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ __le32 dword1;
+/* register size (in dwords) */
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+
+/*
+ * register data for registers dump
+ */
+struct dbg_dump_reg {
+ __le32 data;
+/* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+
+/*
+ * split header for registers dump
+ */
+struct dbg_dump_split_hdr {
+ __le32 hdr;
+/* size in dwords of the data following this header */
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF /* split type ID */
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+
+/*
+ * condition header for idle check
+ */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode /* Mode header */;
+/* size in dwords of the data following this header */
+ __le16 data_size;
+};
+
+
+/*
+ * Idle Check condition register
+ */
+struct dbg_idle_chk_cond_reg {
+ __le32 data;
+/* Register GRC address (in dwords) */
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+/* value from block_id enum */
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ __le16 num_entries /* number of registers entries to check */;
+ u8 entry_size /* size of registers entry (in dwords) */;
+ u8 start_entry /* index of the first entry to check */;
+};
+
+
+/*
+ * Idle Check info register
+ */
+struct dbg_idle_chk_info_reg {
+ __le32 data;
+/* Register GRC address (in dwords) */
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+/* value from block_id enum */
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ __le16 size /* register size in dwords */;
+ struct dbg_mode_hdr mode /* Mode header */;
+};
+
+
+/*
+ * Idle Check register
+ */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg /* condition register */;
+ struct dbg_idle_chk_info_reg info_reg /* info register */;
+};
+
+
+/*
+ * Idle Check result header
+ */
+struct dbg_idle_chk_result_hdr {
+ __le16 rule_id /* Failing rule index */;
+ __le16 mem_entry_id /* Failing memory entry index */;
+ u8 num_dumped_cond_regs /* number of dumped condition registers */;
+ u8 num_dumped_info_regs /* number of dumped condition registers */;
+ u8 severity /* from dbg_idle_chk_severity_types enum */;
+ u8 reserved;
+};
+
+
+/*
+ * Idle Check result register header
+ */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+/* indicates if this register is a memory */
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+/* register index within the failing rule */
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry /* index of the first checked entry */;
+ __le16 size /* register size in dwords */;
+};
+
+
+/*
+ * Idle Check rule
+ */
+struct dbg_idle_chk_rule {
+ __le16 rule_id /* Idle Check rule ID */;
+ u8 severity /* value from dbg_idle_chk_severity_types enum */;
+ u8 cond_id /* Condition ID */;
+ u8 num_cond_regs /* number of condition registers */;
+ u8 num_info_regs /* number of info registers */;
+ u8 num_imms /* number of immediates in the condition */;
+ u8 reserved1;
+/* offset of this rules registers in the idle check register array
+ * (in dbg_idle_chk_reg units)
+ */
+ __le16 reg_offset;
+/* offset of this rules immediate values in the immediate values array
+ * (in dwords)
+ */
+ __le16 imm_offset;
+};
+
+
+/*
+ * Idle Check rule parsing data
+ */
+struct dbg_idle_chk_rule_parsing_data {
+ __le32 data;
+/* indicates if this register has a FW message */
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+/* Offset of this rules strings in the debug strings array (in bytes) */
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+
+/*
+ * idle check severity types
+ */
+enum dbg_idle_chk_severity_types {
+/* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+/* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+/* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+ __le16 data;
+/* 4-bit value: bit i set -> dword/qword i is enabled. */
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+/* Number of dwords/qwords to shift right the debug data (0-3) */
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+/* 4-bit value: bit i set -> dword/qword i is forced valid. */
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+/* 4-bit value: bit i set -> dword/qword i frame bit is forced. */
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 line_num /* Debug line number to select */;
+ u8 hw_id /* HW ID associated with the block */;
+};
+
+
+/*
+ * Debug Bus Clients
+ */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+
+/*
+ * Debug Bus constraint operation types
+ */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ /* equal */,
+ DBG_BUS_CONSTRAINT_OP_NE /* not equal */,
+ DBG_BUS_CONSTRAINT_OP_LT /* less than */,
+ DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */,
+ DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GT /* greater than */,
+ DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */,
+ DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+
+/*
+ * Debug Bus trigger state data
+ */
+struct dbg_bus_trigger_state_data {
+ u8 data;
+/* 4-bit value: bit i set -> dword i of the trigger state block
+ * (after right shift) is enabled.
+ */
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+/* 4-bit value: bit i set -> dword i is compared by a constraint */
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
+/*
+ * Debug Bus memory address
+ */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/*
+ * Debug Bus PCI buffer data
+ */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
+ struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
+ __le32 size /* PCI buffer size in bytes */;
+};
+
+/*
+ * Debug Bus Storm EID range filter params
+ */
+struct dbg_bus_storm_eid_range_params {
+ u8 min /* Minimal event ID to filter on */;
+ u8 max /* Maximal event ID to filter on */;
+};
+
+/*
+ * Debug Bus Storm EID mask filter params
+ */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val /* Event ID value */;
+ u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
+};
+
+/*
+ * Debug Bus Storm EID filter params
+ */
+union dbg_bus_storm_eid_params {
+/* EID range filter params */
+ struct dbg_bus_storm_eid_range_params range;
+/* EID mask filter params */
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/*
+ * Debug Bus Storm data
+ */
+struct dbg_bus_storm_data {
+ u8 enabled /* indicates if the Storm is enabled for recording */;
+ u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
+ u8 hw_id /* HW ID associated with the Storm */;
+ u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
+/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
+ * set,
+ */
+ u8 eid_range_not_mask;
+ u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
+/* EID filter params to filter on. Valid only if eid_filter_en is set. */
+ union dbg_bus_storm_eid_params eid_filter_params;
+/* CID to filter on. Valid only if cid_filter_en is set. */
+ __le32 cid;
+};
+
+/*
+ * Debug Bus data
+ */
+struct dbg_bus_data {
+ __le32 app_version /* The tools version number of the application */;
+ u8 state /* The current debug bus state */;
+ u8 hw_dwords /* HW dwords per cycle */;
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
+ * HW ID of dword/qword i
+ */
+ __le16 hw_id_mask;
+ u8 num_enabled_blocks /* Number of blocks enabled for recording */;
+ u8 num_enabled_storms /* Number of Storms enabled for recording */;
+ u8 target /* Output target */;
+ u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
+ u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
+/* Indicates if timestamp recording is enabled (0/1) */
+ u8 timestamp_input_en;
+ u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
+/* If true, the next added constraint belong to the filter. Otherwise,
+ * it belongs to the last added trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 adding_filter;
+/* Indicates if the recording filter should be applied before the trigger.
+ * Valid only if both filter and trigger are enabled (0/1)
+ */
+ u8 filter_pre_trigger;
+/* Indicates if the recording filter should be applied after the trigger.
+ * Valid only if both filter and trigger are enabled (0/1)
+ */
+ u8 filter_post_trigger;
+ __le16 reserved;
+/* Indicates if the recording trigger is enabled (0/1) */
+ u8 trigger_en;
+/* trigger states data */
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+/* ID of next filter/trigger constraint to be added */
+ u8 next_constraint_id;
+/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
+ * assigned a different HW ID (0/1)
+ */
+ u8 unify_inputs;
+/* Indicates if the other engine sends it NW recording to this engine (0/1) */
+ u8 rcv_from_other_engine;
+/* Debug Bus PCI buffer data. Valid only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ struct dbg_bus_pci_buf_data pci_buf;
+/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[88];
+/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6];
+};
+
+
+/*
+ * Debug bus filter types
+ */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
+ DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
+ DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
+ DBG_BUS_FILTER_TYPE_ON /* filter always on */,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
+
+/*
+ * Debug bus frame modes
+ */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+
+/*
+ * Debug bus other engine mode
+ */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+
+
+/*
+ * Debug bus post-trigger recording types
+ */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
+ DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+
+/*
+ * Debug bus pre-trigger recording types
+ */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
+/* start recording some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+
+/*
+ * Debug bus SEMI frame modes
+ */
+enum dbg_bus_semi_frame_modes {
+/* 0 slow dwords, 4 fast dwords */
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+/* 4 slow dwords, 0 fast dwords */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+
+/*
+ * Debug bus states
+ */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE /* debug bus idle state (not recording) */,
+/* debug bus is ready for configuration and recording */
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING /* debug bus is currently recording */,
+ DBG_BUS_STATE_STOPPED /* debug bus recording has stopped */,
+ MAX_DBG_BUS_STATES
+};
+
+
+
+
+
+
+/*
+ * Debug Bus Storm modes
+ */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */,
+ DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
+ DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
+ DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
+ DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+
+/*
+ * Debug bus target IDs
+ */
+enum dbg_bus_targets {
+/* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
+ DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
+ MAX_DBG_BUS_TARGETS
+};
+
+
+
+/*
+ * GRC Dump data
+ */
+struct dbg_grc_data {
+/* Indicates if the GRC parameters were initialized */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
+/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
+ __le32 param_val[48];
+};
+
+
+/*
+ * Debug GRC params
+ */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */,
+ DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */,
+ DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */,
+ DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */,
+ DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */,
+ DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
+ DBG_GRC_PARAM_RESERVED /* reserved */,
+ DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DIF /* dump DIF memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
+ DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
+ DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
+ DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
+/* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+/* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+/* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+ DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
+ DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
+ MAX_DBG_GRC_PARAMS
+};
+
+
+/*
+ * Debug reset registers
+ */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
+
+/*
+ * Debug status codes
+ */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ MAX_DBG_STATUS
+};
+
+
+/*
+ * Debug Storms IDs
+ */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+
+/*
+ * Idle Check data
+ */
+struct idle_chk_data {
+ __le32 buf_size /* Idle check buffer size in dwords */;
+/* Indicates if the idle check buffer size was set (0/1) */
+ u8 buf_size_set;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/*
+ * Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc /* GRC Dump data */;
+ struct dbg_bus_data bus /* Debug Bus data */;
+ struct idle_chk_data idle_chk /* Idle Check data */;
+ u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
+/* Indicates if a block is in reset state (0/1) */
+ u8 block_in_reset[88];
+ u8 chip_id /* Chip ID (from enum chip_ids) */;
+ u8 platform_id /* Platform ID */;
+ u8 initialized /* Indicates if the data was initialized */;
+ u8 reserved;
+};
+
+
+#endif /* __ECORE_HSI_DEBUG_TOOLS__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_eth.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_eth.h
new file mode 100644
index 00000000..397c408d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_eth.h
@@ -0,0 +1,2400 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_ETH__
+#define __ECORE_HSI_ETH__
+/************************************************************************/
+/* Add include to common eth target for both eCore and protocol driver */
+/************************************************************************/
+#include "eth_common.h"
+
+/*
+ * The eth storm context for the Tstorm
+ */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * The eth storm context for the Pstorm
+ */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/*
+ * The eth storm context for the Xstorm
+ */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct e4_xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+/* cf5 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+/* cf6 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+/* cf7 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+/* cf9 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 e5_reserved /* e5_reserved */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/*
+ * The eth storm context for the Ystorm
+ */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+struct e4_ystorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+/* cf0en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+/* cf1en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+/* cf2en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+/* rule0en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+/* rule1en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+/* rule2en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+/* rule3en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+/* rule4en */
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 tx_q0_int_coallecing_timeset /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 terminate_spqe /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 tx_bd_cons_upd /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+struct e4_tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct e4_ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+/* timer0cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+/* timer1cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+/* timer2cf */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+/* timer_stop_all */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+/* cf4 */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+/* cf5 */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+/* cf6 */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+/* cf0en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+/* cf1en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+/* cf2en */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+/* cf3en */
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+/* cf4en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+/* cf5en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+/* cf6en */
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+/* rule0en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+/* rule1en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+/* rule2en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+/* rule3en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+/* rule4en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+/* rule5en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+/* rule6en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+/* rule7en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+/* rule8en */
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 tx_int_coallecing_timeset /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+/*
+ * The eth storm context for the Ustorm
+ */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/*
+ * The eth storm context for the Mstorm
+ */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/*
+ * eth connection context
+ */
+struct eth_conn_context {
+/* tstorm storm context */
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2] /* padding */;
+/* pstorm storm context */
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+/* xstorm storm context */
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+/* xstorm aggregative context */
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
+/* ystorm storm context */
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+/* ystorm aggregative context */
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+/* tstorm aggregative context */
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+/* ustorm aggregative context */
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
+/* ustorm storm context */
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+/* mstorm storm context */
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+};
+
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_error_code {
+ ETH_OK = 0x00 /* command succeeded */,
+/* mac add filters command failed due to cam full state */
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+/* mac add filters command failed due to mtt2 full state */
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+/* mac add filters command failed due to duplicate mac address */
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+/* mac add filters command failed due to duplicate mac address */
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+/* mac add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+/* vlan add filters command failed due to cam full state */
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+/* vlan add filters command failed due to duplicate VLAN filter */
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+/* vlan delete filters command failed due to not found state */
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+/* vlan delete filters command failed due to not found state */
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+/* pair add filters command failed due to duplicate request */
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+/* pair add filters command failed due to full state */
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+/* pair add filters command failed due to full state */
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+/* pair add filters command failed due not found state */
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+/* pair add filters command failed due not found state */
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+/* pair add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+/* vni add filters command failed due to cam full state */
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+/* vni add filters command failed due to duplicate VNI filter */
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */,
+ MAX_ETH_ERROR_CODE
+};
+
+
+/*
+ * opcodes for the event ring
+ */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ MAX_ETH_EVENT_OPCODE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+/* Remove all filters of given type and vport ID. */
+ ETH_FILTER_ACTION_REMOVE_ALL,
+ MAX_ETH_FILTER_ACTION
+};
+
+
+/*
+ * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd_header {
+ u8 rx /* If set, apply these commands to the RX path */;
+ u8 tx /* If set, apply these commands to the TX path */;
+ u8 cmd_cnt /* Number of filter commands */;
+/* 0 - dont assert in case of filter configuration error. Just return an error
+ * code. 1 - assert in case of filter configuration error.
+ */
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
+ ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */,
+ ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */,
+ ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */,
+ ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */,
+ ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */,
+ ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */,
+/* Add/remove a inner MAC-VNI pair */
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */,
+ ETH_FILTER_TYPE_VNI /* Add/remove a VNI */,
+ MAX_ETH_FILTER_TYPE
+};
+
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */,
+/* First Fragment of IPv4 Packet (contains headers) */
+ ETH_IPV4_FIRST_FRAG,
+/* Non-First Fragment of IPv4 Packet (does not contain headers) */
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ip_type {
+ ETH_IPV4 /* IPv4 */,
+ ETH_IPV6 /* IPv6 */,
+ MAX_ETH_IP_TYPE
+};
+
+
+/*
+ * Ethernet Ramrod Command IDs
+ */
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+/* RX - Create an Openflow Action */
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+/* RX - Add an Openflow Filter to the Searcher */
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+/* RX - Delete an Openflow Filter to the Searcher */
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+/* RX - Add a UDP Filter to the Searcher */
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+/* RX - Delete a UDP Filter to the Searcher */
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
+/* RX - Add/Delete a GFT Filter to the Searcher */
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+
+/*
+ * return code from eth sp ramrods
+ */
+struct eth_return_code {
+ u8 value;
+/* error code (use enum eth_error_code) */
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+/* rx path - 0, tx path - 1 */
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+
+/*
+ * What to do in case an error occurs
+ */
+enum eth_tx_err {
+ ETH_TX_ERR_DROP /* Drop erroneous packet. */,
+/* Assert an interrupt for PF, declare as malicious for VF */
+ ETH_TX_ERR_ASSERT_MALICIOUS,
+ MAX_ETH_TX_ERR
+};
+
+
+/*
+ * Array of the different error type behaviors
+ */
+struct eth_tx_err_vals {
+ __le16 values;
+/* Wrong VLAN insertion mode (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+/* Packet is below minimal size (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+/* Vport has sent spoofed packet (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+/* Packet with illegal type of inband tag (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+/* Packet marked for VLAN insertion when inband tag is present
+ * (use enum eth_tx_err)
+ */
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+/* Non LSO packet larger than MTU (use enum eth_tx_err) */
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+/* VF/PF has sent LLDP/PFC or any other type of control packet which is not
+ * allowed to (use enum eth_tx_err)
+ */
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
+
+/*
+ * vport rss configuration data
+ */
+struct eth_vport_rss_config {
+ __le16 capabilities;
+/* configuration of the IpV4 2-tuple capability */
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+/* configuration of the IpV6 2-tuple capability */
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+/* configuration of the IpV4 4-tuple capability for TCP */
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+/* configuration of the IpV6 4-tuple capability for TCP */
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+/* configuration of the IpV4 4-tuple capability for UDP */
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+/* configuration of the IpV6 4-tuple capability for UDP */
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+/* configuration of the 5-tuple capability */
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+/* if set update the rss keys */
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+/* The RSS engine ID. Must be allocated to each vport with RSS enabled.
+ * Total number of RSS engines is ETH_RSS_ENGINE_NUM_ , according to chip type.
+ */
+ u8 rss_id;
+ u8 rss_mode /* The RSS mode for this function */;
+ u8 update_rss_key /* if set update the rss key */;
+/* if set update the indirection table values */
+ u8 update_rss_ind_table;
+/* if set update the capabilities and indirection table size. */
+ u8 update_rss_capabilities;
+ u8 tbl_size /* rss mask (Tbl size) */;
+ __le32 reserved2[2];
+/* RSS indirection table */
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+/* RSS key supplied to us by OS */
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+
+/*
+ * eth vport RSS mode
+ */
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */,
+ ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_rx_mode {
+ __le16 state;
+/* drop all unicast packets */
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+/* accept all unicast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+/* accept all unmatched unicast packets */
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+/* drop all multicast packets */
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+/* accept all multicast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+/* accept all broadcast packets (subject to vlan) */
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+ __le16 reserved2[3];
+};
+
+
+/*
+ * Command for setting tpa parameters
+ */
+struct eth_vport_tpa_param {
+ u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
+ u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
+ u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
+ u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment
+ * allowed
+ */
+ u8 tpa_pkt_split_flg;
+/* If set, put header of first TPA segment on bd and data on SGE */
+ u8 tpa_hdr_data_split_flg;
+/* If set, GRO data consistent will checked for TPA continue */
+ u8 tpa_gro_consistent_flg;
+/* maximum number of opened aggregations per v-port */
+ u8 tpa_max_aggs_num;
+ __le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
+/* minimum TCP payload size for a packet to start aggregation */
+ __le16 tpa_min_size_to_start;
+/* minimum TCP payload size for a packet to continue aggregation */
+ __le16 tpa_min_size_to_cont;
+/* maximal number of buffers that can be used for one aggregation */
+ u8 max_buff_num;
+ u8 reserved;
+};
+
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_tx_mode {
+ __le16 state;
+/* drop all unicast packets */
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+/* accept all unicast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+/* drop all multicast packets */
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+/* accept all multicast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+/* accept all broadcast packets (subject to vlan) */
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+ __le16 reserved2[3];
+};
+
+
+/*
+ * Ramrod data for rx create gft action
+ */
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+
+/*
+ * Ramrod data for rx create gft action
+ */
+enum gft_logic_filter_type {
+ GFT_FILTER_TYPE /* flow FW is GFT-logic as well */,
+ RFS_FILTER_TYPE /* flow FW is A-RFS-logic */,
+ MAX_GFT_LOGIC_FILTER_TYPE
+};
+
+
+
+
+/*
+ * Ramrod data for rx add openflow filter
+ */
+struct rx_add_openflow_filter_data {
+ __le16 action_icid /* CID of Action to run for this filter */;
+ u8 priority /* Searcher String - Packet priority */;
+ u8 reserved0;
+ __le32 tenant_id /* Searcher String - Tenant ID */;
+/* Searcher String - Destination Mac Bytes 0 to 1 */
+ __le16 dst_mac_hi;
+/* Searcher String - Destination Mac Bytes 2 to 3 */
+ __le16 dst_mac_mid;
+/* Searcher String - Destination Mac Bytes 4 to 5 */
+ __le16 dst_mac_lo;
+ __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
+ __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
+ __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
+ __le16 vlan_id /* Searcher String - Vlan ID */;
+ __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
+ u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
+ u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
+ u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
+ u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+ __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
+ __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */;
+ __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */;
+ __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
+};
+
+
+/*
+ * Ramrod data for rx create gft action
+ */
+struct rx_create_gft_action_data {
+ u8 vport_id /* Vport Id of GFT Action */;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for rx create openflow action
+ */
+struct rx_create_openflow_action_data {
+ u8 vport_id /* ID of RX queue */;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+ __le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* index of the protocol index */;
+ u8 vport_id /* ID of virtual port */;
+ u8 default_rss_queue_flg /* set queue as default rss queue if set */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 stats_counter_id /* Statistics counter ID */;
+ u8 pin_context /* Pin context in CCFC to improve performance */;
+ u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
+/* PXP command TPH Valid - for packet placement */
+ u8 pxp_tph_valid_pkt;
+/* PXP command Steering tag hint. Use enum pxp_tph_st_hint */
+ u8 pxp_st_hint;
+ __le16 pxp_st_index /* PXP command Steering tag index */;
+/* Indicates that current queue belongs to poll-mode driver */
+ u8 pmd_mode;
+/* Indicates that the current queue is using the TX notification queue
+ * mechanism - should be set only for PMD queue
+ */
+ u8 notify_en;
+/* Initial value for the toggle valid bit - used in PMD mode */
+ u8 toggle_val;
+/* Index of RX producers in VF zone. Used for VF only. */
+ u8 vf_rx_prod_index;
+/* Backward compatibility mode. If set, unprotected mStorm queue zone will used
+ * for VF RX producers instead of VF zone.
+ */
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
+ __le16 reserved1 /* FW reserved. */;
+ struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+ struct regpair bd_base /* bd address of the first bd page */;
+ struct regpair reserved2 /* FW reserved. */;
+};
+
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 vport_id /* ID of virtual port */;
+ u8 reserved[3];
+};
+
+
+/*
+ * Ramrod data for rx queue update ramrod
+ */
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 vport_id /* ID of virtual port */;
+ u8 reserved[4];
+ u8 reserved1 /* FW reserved. */;
+ u8 reserved2 /* FW reserved. */;
+ u8 reserved3 /* FW reserved. */;
+ __le16 reserved4 /* FW reserved. */;
+ __le16 reserved5 /* FW reserved. */;
+ struct regpair reserved6 /* FW reserved. */;
+};
+
+
+/*
+ * Ramrod data for rx Add UDP Filter
+ */
+struct rx_udp_filter_data {
+ __le16 action_icid /* CID of Action to run for this filter */;
+ __le16 vlan_id /* Searcher String - Vlan ID */;
+ u8 ip_type /* Searcher String - IP Type */;
+ u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+ __le16 reserved1;
+/* Searcher String - IP Destination Address, for IPv4 use ip_dst_addr[0] only */
+ __le32 ip_dst_addr[4];
+/* Searcher String - IP Source Address, for IPv4 use ip_dst_addr[0] only */
+ __le32 ip_src_addr[4];
+ __le16 udp_dst_port /* Searcher String - UDP Destination Port */;
+ __le16 udp_src_port /* Searcher String - UDP Source Port */;
+ __le32 tenant_id /* Searcher String - Tenant ID */;
+};
+
+
+/*
+ * Ramrod to add filter - filter is packet headr of type of packet wished to
+ * pass certin FW flow
+ */
+struct rx_update_gft_filter_data {
+/* Pointer to Packet Header That Defines GFT Filter */
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length /* Packet Header Length */;
+/* If is_rfs flag is set: Queue Id to associate filter with else: action icid */
+ __le16 rx_qid_or_action_icid;
+/* Field is used if is_rfs flag is set: vport Id of which to associate filter
+ * with
+ */
+ u8 vport_id;
+/* Use enum to set type of flow using gft HW logic blocks */
+ u8 filter_type;
+ u8 filter_action /* Use to set type of action on filter */;
+/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
+ * case of error.
+ */
+ u8 assert_on_error;
+};
+
+
+
+/*
+ * Ramrod data for tx queue start ramrod
+ */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* Status block protocol index */;
+ u8 vport_id /* VPort ID */;
+ u8 reserved0 /* FW reserved. (qcn_rl_en) */;
+ u8 stats_counter_id /* Statistics counter ID to use */;
+ __le16 qm_pq_id /* QM PQ ID */;
+ u8 flags;
+/* 0: Enable QM opportunistic flow. 1: Disable QM opportunistic flow */
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+/* If set, Test Mode - packets will be duplicated by Xstorm handler */
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+/* If set, Test Mode - packets destination will be determined by dest_port_mode
+ * field from Tx BD
+ */
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+/* Indicates that current queue belongs to poll-mode driver */
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+/* Indicates that the current queue is using the TX notification queue
+ * mechanism - should be set only for PMD queue
+ */
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+/* Pin context in CCFC to improve performance */
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint /* PXP command Steering tag hint */;
+ u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
+ u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
+ __le16 pxp_st_index /* PXP command Steering tag index */;
+/* TX completion min agg size - for PMD queues */
+ __le16 comp_agg_size;
+ __le16 queue_zone_id /* queue zone ID to use */;
+ __le16 reserved2 /* FW reserved. (test_dup_count) */;
+ __le16 pbl_size /* Number of BD pages pointed by PBL */;
+/* unique Queue ID - currently used only by PMD flow */
+ __le16 tx_queue_id;
+/* Unique Same-As-Last Resource ID - improves performance for same-as-last
+ * packets per connection (range 0..ETH_TX_NUM_SAME_AS_LAST_ENTRIES-1 IDs
+ * available)
+ */
+ __le16 same_as_last_id;
+ __le16 reserved[3];
+ struct regpair pbl_base_addr /* address of the pbl page */;
+/* BD consumer address in host - for PMD queues */
+ struct regpair bd_cons_address;
+};
+
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_filter_update_ramrod_data {
+/* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */
+ struct eth_filter_cmd_header filter_cmd_hdr;
+/* Filter Commands */
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+
+/*
+ * Ramrod data for vport start ramrod
+ */
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en /* if set, drop packet with ttl=0 */;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode /* Rx filter data */;
+ struct eth_vport_tx_mode tx_mode /* Tx filter data */;
+/* TPA configuration parameters */
+ struct eth_vport_tpa_param tpa_param;
+ __le16 default_vlan /* Default Vlan value to be forced by FW */;
+ u8 tx_switching_en /* Tx switching is enabled for current Vport */;
+/* Anti-spoofing verification is set for current Vport */
+ u8 anti_spoofing_en;
+/* If set, the default Vlan value is forced by the FW */
+ u8 default_vlan_en;
+/* If set, the vport handles PTP Timesync Packets */
+ u8 handle_ptp_pkts;
+/* If enable then innerVlan will be striped and not written to cqe */
+ u8 silent_vlan_removal_en;
+/* If set untagged filter (vlan0) is added to current Vport, otherwise port is
+ * marked as any-vlan
+ */
+ u8 untagged;
+/* Desired behavior per TX error type */
+ struct eth_tx_err_vals tx_err_behav;
+/* If set, ETH header padding will not inserted. placement_offset will be zero.
+ */
+ u8 zero_placement_offset;
+/* If set, Contorl frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[5];
+};
+
+
+/*
+ * Ramrod data for vport stop ramrod
+ */
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg /* set if rx active flag should be handled */;
+ u8 rx_active_flg /* rx active flag value */;
+ u8 update_tx_active_flg /* set if tx active flag should be handled */;
+ u8 tx_active_flg /* tx active flag value */;
+ u8 update_rx_mode_flg /* set if rx state data should be handled */;
+ u8 update_tx_mode_flg /* set if tx state data should be handled */;
+/* set if approx. mcast data should be handled */
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg /* set if rss data should be handled */;
+/* set if inner_vlan_removal_en should be handled */
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+/* set if tpa parameters should be handled, TPA must be disable before */
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg /* set if tpa enable changes */;
+/* set if tx switching en flag should be handled */
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en /* tx switching en value */;
+/* set if anti spoofing flag should be handled */
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en /* Anti-spoofing verification en value */;
+/* set if handle_ptp_pkts should be handled. */
+ u8 update_handle_ptp_pkts;
+/* If set, the vport handles PTP Timesync Packets */
+ u8 handle_ptp_pkts;
+/* If set, the default Vlan enable flag is updated */
+ u8 update_default_vlan_en_flg;
+/* If set, the default Vlan value is forced by the FW */
+ u8 default_vlan_en;
+/* If set, the default Vlan value is updated */
+ u8 update_default_vlan_flg;
+ __le16 default_vlan /* Default Vlan value to be forced by FW */;
+/* set if accept_any_vlan should be handled */
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan /* accept_any_vlan updated value */;
+/* Set to remove vlan silently, update_inner_vlan_removal_en_flg must be enabled
+ * as well. If Rx is in noSgl mode send rx_queue_update_ramrod_data
+ */
+ u8 silent_vlan_removal_en;
+/* If set, MTU will be updated. Vport must be not active. */
+ u8 update_mtu_flg;
+ __le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
+ * updated
+ */
+ u8 update_ctl_frame_checks_en_flg;
+/* If set, Contorl frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */;
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data {
+/* Common data for all vport update ramrods */
+ struct vport_update_ramrod_data_cmn common;
+ struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
+ struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+/* TPA configuration parameters */
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config /* rss config data */;
+};
+
+
+
+
+
+
+struct E4XstormEthConnAgCtxDqExtLdPart {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+/* bit12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+/* bit13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+/* bit14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+/* timer1cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+/* timer2cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+/* cf5 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+/* cf6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+/* cf7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+/* cf9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+/* cf10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+/* cf11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+/* cf13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+/* cf14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+/* cf15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+/* cf1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+/* cf3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+/* cf4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+/* cf5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+/* cf6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+/* cf7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+/* cf8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+/* cf9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+/* cf11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+/* cf12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+/* cf13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+/* cf14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+/* cf15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+/* cf16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+};
+
+
+struct e4_mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+
+
+/*
+ * GFT CAM line struct
+ */
+struct gft_cam_line {
+ __le32 camline;
+/* Indication if the line is valid. */
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+/* Data bits, the word that compared with the profile key */
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+/* Mask bits, indicate the bits in the data that are Dont-Care */
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+
+/*
+ * GFT CAM line struct (for driversim use)
+ */
+struct gft_cam_line_mapped {
+ __le32 camline;
+/* Indication if the line is valid. */
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+
+/*
+ * Used in gft_profile_key: Indication for ip version
+ */
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+
+/*
+ * Profile key stucr fot GFT logic in Prs
+ */
+struct gft_profile_key {
+ __le16 profile_key;
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Used in gft_profile_key: Indication for tunnel type
+ */
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+
+/*
+ * Used in gft_profile_key: Indication for protocol type
+ */
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+
+/*
+ * GFT RAM line struct
+ */
+struct gft_ram_line {
+ __le32 lo;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 hi;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+
+/*
+ * Used in the first 2 bits for gft_ram_line: Indication for vlan mask
+ */
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
+
+#endif /* __ECORE_HSI_ETH__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h
new file mode 100644
index 00000000..fca74791
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_INIT_FUNC__
+#define __ECORE_HSI_INIT_FUNC__
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+
+/*
+ * BRB RAM init requirements
+ */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+ __le32 headroom_per_tc /* headroom size per TC, in bytes */;
+ __le32 min_pkt_size /* min packet size, in bytes */;
+ __le32 max_ports_per_engine /* min packet size, in bytes */;
+ u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
+};
+
+
+/*
+ * ETS per-TC init requirements
+ */
+struct init_ets_tc_req {
+/* if set, this TC participates in the arbitration with a strict priority
+ * (the priority is equal to the TC ID)
+ */
+ u8 use_sp;
+/* if set, this TC participates in the arbitration with a WFQ weight
+ * (indicated by the weight field)
+ */
+ u8 use_wfq;
+/* An arbitration weight. Valid only if use_wfq is set. */
+ __le16 weight;
+};
+
+/*
+ * ETS init requirements
+ */
+struct init_ets_req {
+ __le32 mtu /* Max packet size (in bytes) */;
+/* ETS initialization requirements per TC. */
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+
+
+/*
+ * NIG LB RL init requirements
+ */
+struct init_nig_lb_rl_req {
+/* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
+ __le16 lb_mac_rate;
+/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
+ __le16 lb_rate;
+ __le32 mtu /* Max packet size (in bytes) */;
+/* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+
+/*
+ * NIG TC mapping for each priority
+ */
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id /* the mapped TC ID */;
+ u8 valid /* indicates if the mapping entry is valid */;
+};
+
+
+/*
+ * NIG priority to TC map init requirements
+ */
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+
+/*
+ * QM per-port init parameters
+ */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+/* Vector of valid bits for active TCs used by this port */
+ u8 active_phys_tcs;
+/* number of PBF command lines that can be used by this port */
+ __le16 num_pbf_cmd_lines;
+/* number of BTB blocks that can be used by this port */
+ __le16 num_btb_blocks;
+ __le16 reserved;
+};
+
+
+/*
+ * QM per-PQ init parameters
+ */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+/* Indicates if a rate limiter should be allocated for the PQ (0/1) */
+ u8 rl_valid;
+};
+
+
+/*
+ * QM per-vport init parameters
+ */
+struct init_qm_vport_params {
+/* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if
+ * VPORT RL is globally disabled.
+ */
+ __le32 vport_rl;
+/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
+ * globally disabled.
+ */
+ __le16 vport_wfq;
+/* the first Tx PQ ID associated with this VPORT for each TC. */
+ __le16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+#endif /* __ECORE_HSI_INIT_FUNC__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h
new file mode 100644
index 00000000..1f57e9b2
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_INIT_TOOL__
+#define __ECORE_HSI_INIT_TOOL__
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_E5,
+ MAX_CHIP_IDS
+};
+
+
+struct fw_asserts_ram_section {
+/* The offset of the section in the RAM in RAM lines (64-bit units) */
+ __le16 section_ram_line_offset;
+/* The size of the section in RAM lines (64-bit units) */
+ __le16 section_ram_line_size;
+/* The offset of the asserts list within the section in dwords */
+ u8 list_dword_offset;
+/* The size of an assert list element in dwords */
+ u8 list_element_dword_size;
+ u8 list_num_elements /* The number of elements in the asserts list */;
+/* The offset of the next list index field within the section in dwords */
+ u8 list_next_index_dword_offset;
+};
+
+
+struct fw_ver_num {
+ u8 major /* Firmware major version number */;
+ u8 minor /* Firmware minor version number */;
+ u8 rev /* Firmware revision version number */;
+/* Firmware engineering version number (for bootleg versions) */
+ u8 eng;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver /* Tools version number */;
+ u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+ u8 reserved1;
+ struct fw_ver_num num /* FW version number */;
+ __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver /* FW version information */;
+/* Info regarding the FW asserts section in the Storm RAM */
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+
+struct fw_info_location {
+/* GRC address where the fw_info struct is located. */
+ __le32 grc_addr;
+/* Size of the fw_info structure (thats located at the grc_addr). */
+ __le32 size;
+};
+
+/*
+ * Binary buffer header
+ */
+struct bin_buffer_hdr {
+/* buffer offset in bytes from the beginning of the binary file */
+ __le32 offset;
+ __le32 length /* buffer length in bytes */;
+};
+
+
+/*
+ * binary init buffer types
+ */
+enum bin_init_buffer_type {
+ BIN_BUF_INIT_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_INIT_IRO /* internal RAM offsets */,
+ MAX_BIN_INIT_BUFFER_TYPE
+};
+
+
+/*
+ * init array header: raw
+ */
+struct init_array_raw_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+/* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/*
+ * init array header: standard
+ */
+struct init_array_standard_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+/* Init array size (in dwords) */
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: zipped
+ */
+struct init_array_zipped_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+/* Init array zipped size (in bytes) */
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: pattern
+ */
+struct init_array_pattern_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+/* pattern size in dword */
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+/* pattern repetitions */
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+/*
+ * init array header union
+ */
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+/* standard init array header */
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+
+enum init_modes {
+ MODE_BB_A0_DEPRECATED,
+ MODE_BB,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_E5,
+ MAX_INIT_MODES
+};
+
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
+
+/*
+ * init array types
+ */
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+
+
+/*
+ * init operation: callback
+ */
+struct init_callback_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+
+/*
+ * init operation: delay
+ */
+struct init_delay_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+
+/*
+ * init operation: if_mode
+ */
+struct init_if_mode_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+/* Commands to skip if the modes dont match */
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+/* offset (in bytes) in modes expression buffer */
+ __le16 modes_buf_offset;
+};
+
+
+/*
+ * init operation: if_phase
+ */
+struct init_if_phase_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+/* Indicates if DMAE is enabled in this phase */
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+/* Commands to skip if the phases dont match */
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+
+/*
+ * init mode operators
+ */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+
+/*
+ * init operation: raw
+ */
+struct init_raw_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/*
+ * init array params
+ */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/*
+ * Write init operation arguments
+ */
+union init_write_args {
+/* value to write, used when init source is INIT_SRC_INLINE */
+ __le32 inline_val;
+/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
+ __le32 zeros_count;
+/* array offset to write, used when init source is INIT_SRC_ARRAY */
+ __le32 array_offset;
+/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
+ struct init_op_array_params runtime;
+};
+
+/*
+ * init operation: write
+ */
+struct init_write_op {
+ __le32 data;
+/* init operation, from init_op_types enum */
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+/* init source type, taken from init_source_types enum */
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+/* indicates if the register is wide-bus */
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+/* internal (absolute) GRC address, in dwords */
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/*
+ * init operation: read
+ */
+struct init_read_op {
+ __le32 op_data;
+/* init operation, from init_op_types enum */
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+/* polling type, from init_poll_types enum */
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+/* internal (absolute) GRC address, in dwords */
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+/* expected polling value, used only when polling is done */
+ __le32 expected_val;
+};
+
+/*
+ * Init operations union
+ */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+
+
+/*
+ * Init command operation types
+ */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+/* Skip init commands if the init modes expression doesn't match */
+ INIT_OP_IF_MODE,
+/* Skip init commands if the init phase doesn't match */
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+
+/*
+ * init polling types
+ */
+enum init_poll_types {
+ INIT_POLL_NONE /* No polling */,
+ INIT_POLL_EQ /* init value is included in the init command */,
+ INIT_POLL_OR /* init value is all zeros */,
+ INIT_POLL_AND /* init value is an array of values */,
+ MAX_INIT_POLL_TYPES
+};
+
+
+
+
+/*
+ * init source types
+ */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+
+
+
+/*
+ * Internal RAM Offsets macro data
+ */
+struct iro {
+ __le32 base /* RAM field offset */;
+ __le16 m1 /* multiplier 1 */;
+ __le16 m2 /* multiplier 2 */;
+ __le16 m3 /* multiplier 3 */;
+ __le16 size /* RAM field size */;
+};
+
+#endif /* __ECORE_HSI_INIT_TOOL__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hw.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_hw.c
new file mode 100644
index 00000000..2bcc32d3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hw.c
@@ -0,0 +1,933 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+#include "ecore_utils.h"
+#include "ecore_iov_api.h"
+
+#ifndef ASIC_ONLY
+#define ECORE_EMUL_FACTOR 2000
+#define ECORE_FPGA_FACTOR 200
+#endif
+
+#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
+
+struct ecore_ptt {
+ osal_list_entry_t list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
+};
+
+struct ecore_ptt_pool {
+ osal_list_t free_list;
+ osal_spinlock_t lock; /* ptt synchronized access */
+ struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_pool));
+ int i;
+
+ if (!p_pool)
+ return ECORE_NOMEM;
+
+ OSAL_LIST_INIT(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
+
+ /* There are special PTT entries that are taken only by design.
+ * The rest are added ot the list for general usage.
+ */
+ if (i >= RESERVED_PTT_MAX)
+ OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+ OSAL_SPIN_LOCK_INIT(&p_pool->lock);
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
+ }
+}
+
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->p_ptt_pool)
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+}
+
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
+ OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->p_ptt_pool->free_list,
+ struct ecore_ptt, list_entry);
+ OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
+ &p_hwfn->p_ptt_pool->free_list);
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+
+ return p_ptt;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_MSLEEP(1);
+ }
+
+ DP_NOTICE(p_hwfn, true,
+ "PTT acquire timeout - failed to allocate PTT\n");
+ return OSAL_NULL;
+}
+
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ /* This PTT should not be set to pretend if it is being released */
+ /* TODO - add some pretend sanity checks, to make sure pretend
+ * isn't set on this ptt
+ */
+
+ OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
+}
+
+static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, offset),
+ OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
+}
+
+static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+ u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn, true,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return ecore_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return OSAL_NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ bool is_empty = true;
+ u32 bar_addr;
+
+ if (!p_hwfn->p_dev->chk_reg_fifo)
+ goto out;
+
+ /* ecore_rd() cannot be used here since it calls this function */
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
+ is_empty = REG_RD(p_hwfn, bar_addr) == 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+out:
+ return is_empty;
+}
+
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
+{
+ bool prev_fifo_err;
+ u32 bar_addr;
+
+ prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
+
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+ OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
+ "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
+ hw_addr, val);
+}
+
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+ bool prev_fifo_err;
+ u32 bar_addr, val;
+
+ prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
+
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+ OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
+ "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
+ hw_addr);
+
+ return val;
+}
+
+static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr, osal_size_t n, bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ osal_size_t quota, done = 0;
+ u32 OSAL_IOMEM *reg_addr;
+
+ while (done < n) {
+ quota = OSAL_MIN_T(osal_size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = ecore_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
+
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(p_hwfn,
+ reg_addr++);
+
+ done += quota;
+ }
+}
+
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest, u32 hw_addr, osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *src, osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+/* Every pretend undos prev pretends, including previous port pretend */
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
+/* Not in use @DPDK
+ * Ecore HW lock
+ * =============
+ * Although the implementation is ready, today we don't have any flow that
+ * utliizes said locks - and we want to keep it this way.
+ * If this changes, this needs to be revisted.
+ */
+
+/* Ecore DMAE
+ * =============
+ */
+static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct ecore_dmae_params *p_params)
+{
+ u16 opcode_b = 0;
+ u32 opcode = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
+ opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT;
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
+ opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT;
+
+ /* DMAE_E4_TODO need to check which value to specifiy here. */
+ /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
+ opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+ if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+ opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
+
+ /* swapping mode 3 - big endian there should be a define ifdefed in
+ * the HSI somewhere. Since it is currently
+ */
+ opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
+
+ opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+
+ /* reset source address in next go */
+ opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+ /* reset dest address in next go */
+ opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
+
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+ opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
+ opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
+ } else {
+ opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+ }
+ if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
+}
+
+static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
+{
+ OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
+
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+ /* verify address is not OSAL_NULL */
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
+ DP_NOTICE(p_hwfn, true,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x"
+ " src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ OSAL_LE32_TO_CPU(p_command->opcode),
+ OSAL_LE16_TO_CPU(p_command->opcode_b),
+ OSAL_LE16_TO_CPU(p_command->length_dw),
+ OSAL_LE32_TO_CPU(p_command->src_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->src_addr_lo),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
+
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
+ "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ OSAL_LE32_TO_CPU(p_command->opcode),
+ OSAL_LE16_TO_CPU(p_command->opcode_b),
+ OSAL_LE16_TO_CPU(p_command->length_dw),
+ OSAL_LE32_TO_CPU(p_command->src_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->src_addr_lo),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The number of commands have been increased to 16 (previous was 14)
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and
+ * the rest are result registers (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)p_command) + i) : 0;
+
+ ecore_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ ecore_wr(p_hwfn, p_ptt,
+ ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+
+ return ecore_status;
+}
+
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
+ if (*p_comp == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `p_completion_word'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+ sizeof(struct dmae_cmd));
+ if (*p_cmd == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct dmae_cmd'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+ sizeof(u32) * DMAE_MAX_RW_SIZE);
+ if (*p_buff == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `intermediate_buffer'\n");
+ goto err;
+ }
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return ECORE_SUCCESS;
+err:
+ ecore_dmae_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys, sizeof(u32));
+ p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys, sizeof(struct dmae_cmd));
+ p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
+ p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
+ }
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+}
+
+static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
+{
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
+ ECORE_EMUL_FACTOR :
+ (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
+ ECORE_FPGA_FACTOR : 1));
+
+ wait_cnt_limit *= factor;
+#endif
+
+ /* DMAE_E4_TODO : TODO check if we have to call any other function
+ * other than BARRIER to sync the completion_word since we are not
+ * using the volatile keyword for this
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
+ "Timed-out waiting for operation to"
+ " complete. Completion word is 0x%08x"
+ " expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ ecore_status = ECORE_TIMEOUT;
+ break;
+ }
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ }
+
+ if (ecore_status == ECORE_SUCCESS)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return ecore_status;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type, u8 dst_type, u32 length_dw)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+ switch (src_type) {
+ case ECORE_DMAE_ADDRESS_GRC:
+ case ECORE_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
+ cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
+ break;
+ /* for virt source addresses we use the intermediate buffer. */
+ case ECORE_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(osal_uintptr_t)src_addr,
+ length_dw * sizeof(u32));
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ switch (dst_type) {
+ case ECORE_DMAE_ADDRESS_GRC:
+ case ECORE_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
+ cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
+ break;
+ /* for virt destination address we use the intermediate buff. */
+ case ECORE_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
+
+ if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+ src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+ OSAL_DMA_SYNC(p_hwfn->p_dev,
+ (void *)HILO_U64(cmd->src_addr_hi,
+ cmd->src_addr_lo),
+ length_dw * sizeof(u32), false);
+
+ ecore_dmae_post_command(p_hwfn, p_ptt);
+
+ ecore_status = ecore_dmae_operation_wait(p_hwfn);
+
+ /* TODO - is it true ? */
+ if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+ src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+ OSAL_DMA_SYNC(p_hwfn->p_dev,
+ (void *)HILO_U64(cmd->src_addr_hi,
+ cmd->src_addr_lo),
+ length_dw * sizeof(u32), true);
+
+ if (ecore_status != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, ECORE_MSG_HW,
+ "ecore_dmae_host2grc: Wait Failed. source_addr"
+ " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
+ (unsigned long)src_addr, (unsigned long)dst_addr,
+ length_dw);
+ return ecore_status;
+ }
+
+ if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
+ OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length_dw * sizeof(u32));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+ u32 offset = 0;
+
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ /* Return success to let the flow to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
+ ecore_dmae_opcode(p_hwfn,
+ (src_type == ECORE_DMAE_ADDRESS_GRC),
+ (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
+
+ cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == ECORE_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == ECORE_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (ecore_status != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "ecore_dmae_execute_sub_operation Failed"
+ " with error 0x%x. source_addr 0x%lx,"
+ " dest addr 0x%lx, size_in_dwords 0x%x\n",
+ ecore_status, (unsigned long)src_addr,
+ (unsigned long)dst_addr, length_cur);
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
+ break;
+ }
+ }
+
+ return ecore_status;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = flags;
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ ECORE_DMAE_ADDRESS_GRC,
+ size_in_dwords, &params);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = flags;
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, ECORE_DMAE_ADDRESS_GRC,
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, &params);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct ecore_dmae_params *p_params)
+{
+ enum _ecore_status_t rc;
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type)
+{
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
+ "Recovery is in progress."
+ "Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hw.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hw.h
new file mode 100644
index 00000000..0750b2ed
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hw.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HW_H__
+#define __ECORE_HW_H__
+
+#include "ecore.h"
+#include "ecore_dev_api.h"
+
+/* Forward decleration */
+struct ecore_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
+ * instead of 0, this should be fixed in later HW versions.
+ */
+#ifndef MISC_REG_DRIVER_CONTROL_0
+#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1
+#endif
+#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
+#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
+#endif
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#ifdef __BIG_ENDIAN
+#define DMAE_COMPLETION_VAL 0xAED10000
+#define DMAE_CMD_ENDIANITY 0x3
+#else
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+#endif
+
+#define DMAE_CMD_SIZE 14
+/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+/* Minimum wait for dmae opertaion to complete 2 milliseconds */
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+* @brief ecore_gtt_init - Initialize GTT windows
+*
+* @param p_hwfn
+*/
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t - success (0), negative - error.
+ */
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * @brief ecore_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct ecore_ptt *
+ */
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief ecore_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * @brief ecore_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 ecore_rd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * @brief ecore_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ osal_size_t n);
+
+/**
+ * @brief ecore_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ osal_size_t n);
+/**
+ * @brief ecore_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * @brief ecore_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * @brief ecore_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+* @brief ecore_dmae_info_alloc - Init the dmae_info structure
+* which is part of p_hwfn.
+* @param p_hwfn
+*/
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_dmae_info_free - Free the dmae_info structure
+* which is part of p_hwfn
+*
+* @param p_hwfn
+*/
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+
+union ecore_qm_pq_params {
+ struct {
+ u8 q_idx;
+ } iscsi;
+
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+
+ struct {
+ u8 dcqcn;
+ u8 qpid; /* roce relative */
+ } roce;
+
+ struct {
+ u8 qidx;
+ } iwarp;
+};
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union ecore_qm_pq_params *params);
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+ const u8 *fw_data);
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
+
+#endif /* __ECORE_HW_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_hw_defs.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_hw_defs.h
new file mode 100644
index 00000000..4456af43
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_hw_defs.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_IGU_DEF_H_
+#define _ECORE_IGU_DEF_H_
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+/* function enable */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0)
+/* MSI/MSIX enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1)
+/* INT enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2)
+/* attention enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3)
+/* single ISR mode enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)
+/* simd all ones mode */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5)
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+/* function enable */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0)
+/* MSI/MSIX enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1)
+/* single ISR mode enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4)
+/* Parent PF */
+#define IGU_VF_CONF_PARENT_MASK (0xF)
+/* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c
new file mode 100644
index 00000000..004ab351
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -0,0 +1,1883 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hw.h"
+#include "ecore_init_ops.h"
+#include "reg_addr.h"
+#include "ecore_rt_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_init_func.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_hsi_init_tool.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
+/* General constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
+ 0)
+#define QM_INVALID_PQ_ID 0xffff
+
+/* Feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
+/* Other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+
+/* WFQ constants: */
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* 0.7 * upper bound (62500000) */
+#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* RL constants: */
+
+/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_RL_UPPER_BOUND 62500000
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+
+/* 0.7 * upper bound (62500000) */
+#define QM_RL_MAX_INC_VAL 43750000
+
+/* RL increment value - rate is specified in mbps. the factor of 1.01 was
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
+ */
+#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), 1)
+
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+
+/* Command Queue constants: */
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* BTB: blocks constants (block size = 256B) */
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR 10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+ SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
+ ((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phys_tcs_per_port) \
+ ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
+ LB_VOQ(port))
+
+
+/******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Prepare PF RL enable/disable runtime init values */
+static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ /* Enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (1 << MAX_NUM_VOQS) - 1);
+
+ /* Write RL period */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+
+ /* Set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+
+ /* Set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* Write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+
+ /* Set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+
+ /* Set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 voq, u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id, num_tcs_in_port;
+
+ /* Clear PBF lines for all VOQs */
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u16 phys_lines, phys_lines_per_tc;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Find #lines to divide between the active physical TCs */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+ /* Init registers per active TC */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
+ }
+ }
+
+ /* Init registers for pure LB TC */
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+}
+
+/*
+ * Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B BTB blocks for this port
+ * C Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom
+ * b. B = B 38 (remainder after global headroom allocation)
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space is
+ * not enabled and allocated for each TC.
+ */
+static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id, num_tcs_in_port;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* Find blocks per physical TC. use factor to avoid floating
+ * arithmethic.
+ */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks /
+ BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
+
+ /* Init physical TCs */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ phys_blocks);
+ }
+ }
+
+ /* Init pure LB TC */
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u32 base_mem_addr_4kb,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
+{
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
+ u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = num_pf_pqs + num_vf_pqs;
+
+ first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+
+ /* Set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(num_vf_cids));
+
+ /* Go over all Tx PQs */
+ for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
+ u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ struct qm_rf_pq_map tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u8 voq, vport_id_in_pf;
+ u16 first_tx_pq_id;
+
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ is_vf_pq = (i >= num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
+ max_qm_global_rls;
+
+ /* Update first Tx PQ of VPORT/TC */
+ vport_id_in_pf = pq_params[i].vport_id - start_vport;
+ first_tx_pq_id =
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ /* Create new VP PQ */
+ vport_params[vport_id_in_pf].
+ first_tx_pq_id[pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+
+ /* Map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
+ (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
+ QM_WFQ_VP_PQ_PF_SHIFT));
+ }
+
+ /* Check RL ID */
+ if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+ max_qm_global_rls)
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter config\n");
+
+ /* Fill PQ map entry */
+ OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+ rl_valid ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+ rl_valid ? pq_params[i].vport_id : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+ pq_params[i].wrr_group);
+
+ /* Write PQ map entry to CAM */
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+ *((u32 *)&tx_pq_map));
+
+ /* Set base address */
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+
+ /* If VF PQ, add indication to PQ VF mask */
+ if (is_vf_pq) {
+ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
+ (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, tx_pq_vf_mask[i]);
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_tids, u32 base_mem_addr_4kb)
+{
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
+
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
+ */
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+
+ /* Set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+
+ /* Set base address */
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u16 pf_wfq,
+ u8 max_phys_tcs_per_port,
+ u16 num_tx_pqs,
+ struct init_qm_pq_params *pq_params)
+{
+ u32 inc_val, crd_reg_offset;
+ u8 voq;
+ u16 i;
+
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ (pf_id % MAX_NUM_PFS_BB);
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
+ return -1;
+ }
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u16 vport_pq_id;
+ u32 inc_val;
+ u8 tc, i;
+
+ /* Go over all PF VPORTs */
+ for (i = 0; i < num_vports; i++) {
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+ u32 inc_val;
+
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT rate-limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
+ i++) {
+ OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* Check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
+ "Timeout waiting for QM SDM cmd ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+{
+ if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u32 mask;
+
+ /* Init AFullOprtnstcCrdMask */
+ mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+ /* Enable/disable PF RL */
+ ecore_enable_pf_rl(p_hwfn, pf_rl_en);
+
+ /* Enable/disable PF WFQ */
+ ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
+
+ /* Enable/disable VPORT RL */
+ ecore_enable_vport_rl(p_hwfn, vport_rl_en);
+
+ /* Enable/disable VPORT WFQ */
+ ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
+
+ /* Init PBF CMDQ line credit */
+ ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
+ max_phys_tcs_per_port, port_params);
+
+ /* Init BTB blocks in PBF */
+ ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
+ max_phys_tcs_per_port, port_params);
+
+ return 0;
+}
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
+{
+ u32 other_mem_size_4kb;
+ u8 tc, i;
+
+ other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
+ QM_OTHER_PQS_PER_PF;
+
+ /* Clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* Map Other PQs (if any) */
+#if QM_OTHER_PQS_PER_PF > 0
+ ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
+ num_tids, 0);
+#endif
+
+ /* Map Tx PQs */
+ ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
+ max_phys_tcs_per_port, is_first_pf, num_pf_cids,
+ num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
+ start_vport, other_mem_size_4kb, pq_params,
+ vport_params);
+
+ /* Init PF WFQ */
+ if (pf_wfq)
+ if (ecore_pf_wfq_rt_init
+ (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
+ num_pf_pqs + num_vf_pqs, pq_params))
+ return -1;
+
+ /* Init PF RL */
+ if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
+ return -1;
+
+ /* Set VPORT WFQ */
+ if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
+ return -1;
+
+ /* Set VPORT RL */
+ if (ecore_vport_rl_rt_init
+ (p_hwfn, start_vport, num_vports, vport_params))
+ return -1;
+
+ return 0;
+}
+
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val;
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+ u16 vport_pq_id;
+ u32 inc_val;
+ u8 tc;
+
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ ecore_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
+ }
+ }
+
+ return 0;
+}
+
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+{
+ u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+
+ if (vport_id >= max_qm_global_rls) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter configuration\n");
+ return -1;
+ }
+
+ inc_val = QM_RL_INC_VAL(vport_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT rate-limit configuration\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+ return 0;
+}
+
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
+
+ /* Set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ /* Go over requested PQs */
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* Set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* If last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
+ pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!ecore_send_qm_cmd
+ (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
+ cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
+
+
+/* NIG: ETS configuration constants */
+#define NIG_TX_ETS_CLIENT_OFFSET 4
+#define NIG_LB_ETS_CLIENT_OFFSET 1
+#define NIG_ETS_MIN_WFQ_BYTES 1600
+
+/* NIG: ETS constants */
+#define NIG_ETS_UP_BOUND(weight, mtu) \
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+/* NIG: RL constants */
+
+/* Byte base type value */
+#define NIG_RL_BASE_TYPE 1
+
+/* Period in us */
+#define NIG_RL_PERIOD 1
+
+/* Period in 25MHz cycles */
+#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
+
+/* Rate in mbps */
+#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
+
+#define NIG_RL_MAX_VAL(inc_val, mtu) \
+ (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+
+/* NIG: packet prioritry configuration constants */
+#define NIG_PRIORITY_MAP_TC_BITS 4
+
+
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req, bool is_lb)
+{
+ u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
+ u32 tc_bound_base_addr, tc_bound_addr_diff;
+ u8 sp_tc_map = 0, wfq_tc_map = 0;
+ u8 tc, num_tc, tc_client_offset;
+
+ num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+ tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
+ NIG_TX_ETS_CLIENT_OFFSET;
+ min_weight = 0xffffffff;
+ tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+
+ /* Update SP map */
+ if (tc_req->use_sp)
+ sp_tc_map |= (1 << tc);
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
+ }
+
+ /* Write SP map */
+ ecore_wr(p_hwfn, p_ptt,
+ is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
+ NIG_REG_TX_ARB_CLIENT_IS_STRICT,
+ (sp_tc_map << tc_client_offset));
+
+ /* Write WFQ map */
+ ecore_wr(p_hwfn, p_ptt,
+ is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ (wfq_tc_map << tc_client_offset));
+ /* write WFQ weights */
+ for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
+ tc_weight_addr_diff * tc_client_offset, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
+ tc_bound_addr_diff * tc_client_offset,
+ NIG_ETS_UP_BOUND(byte_weight, req->mtu));
+ }
+}
+
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req)
+{
+ u32 ctrl, inc_val, reg_offset;
+ u8 tc;
+
+ /* Disable global MAC+LB RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+
+ /* Configure and enable global MAC+LB RL */
+ if (req->lb_mac_rate) {
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
+ NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
+ inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
+ NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |=
+ 1 <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+ }
+
+ /* Disable global LB-only RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+
+ /* Configure and enable global LB-only RL */
+ if (req->lb_rate) {
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
+ NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->lb_rate);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
+ inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
+ NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |=
+ 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+ }
+
+ /* Per-TC RLs */
+ for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
+ tc++, reg_offset += 4) {
+ /* Disable TC RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
+
+ /* Configure and enable TC RL */
+ if (!req->tc_rate[tc])
+ continue;
+
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+ reg_offset, NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+ reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+ reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |= 1 <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
+ reg_offset, ctrl);
+ }
+}
+
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req)
+{
+ u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+ u32 pri_tc_mask = 0;
+ u8 pri, tc;
+
+ for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
+ if (!req->pri[pri].valid)
+ continue;
+
+ pri_tc_mask |= (req->pri[pri].tc_id <<
+ (pri * NIG_PRIORITY_MAP_TC_BITS));
+ tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
+ }
+
+ /* Write priority -> TC mask */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
+
+ /* Write TC -> priority mask */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
+ tc_pri_mask[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
+ tc_pri_mask[tc]);
+ }
+}
+
+
+/* PRS: ETS configuration constants */
+#define PRS_ETS_MIN_WFQ_BYTES 1600
+#define PRS_ETS_UP_BOUND(weight, mtu) \
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_ets_req *req)
+{
+ u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
+ u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+
+ tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+
+ /* Update SP map */
+ if (tc_req->use_sp)
+ sp_tc_map |= (1 << tc);
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
+ }
+
+ /* write SP map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+
+ /* write WFQ map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
+ wfq_tc_map);
+
+ /* write WFQ weights */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
+ tc_weight_addr_diff, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+ tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
+ req->mtu));
+ }
+}
+
+
+/* BRB: RAM configuration constants */
+#define BRB_TOTAL_RAM_BLOCKS_BB 4800
+#define BRB_TOTAL_RAM_BLOCKS_K2 5632
+#define BRB_BLOCK_SIZE 128
+#define BRB_MIN_BLOCKS_PER_TC 9
+#define BRB_HYST_BYTES 10240
+#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+
+/* Temporary big RAM allocation - should be updated */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
+{
+ u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
+ u32 active_port_blocks, reg_offset = 0;
+ u8 port, active_ports = 0;
+
+ tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
+ BRB_BLOCK_SIZE);
+ min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
+ BRB_BLOCK_SIZE);
+ total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+ BRB_TOTAL_RAM_BLOCKS_BB;
+
+ /* Find number of active ports */
+ for (port = 0; port < MAX_NUM_PORTS; port++)
+ if (req->num_active_tcs[port])
+ active_ports++;
+
+ active_port_blocks = (u32)(total_blocks / active_ports);
+
+ for (port = 0; port < req->max_ports_per_engine; port++) {
+ u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
+ u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
+ u32 tc_guaranteed_blocks;
+ u8 tc;
+
+ /* Calculate per-port sizes */
+ tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
+ BRB_BLOCK_SIZE);
+ port_blocks = req->num_active_tcs[port] ? active_port_blocks :
+ 0;
+ port_guaranteed_blocks = req->num_active_tcs[port] *
+ tc_guaranteed_blocks;
+ port_shared_blocks = port_blocks - port_guaranteed_blocks;
+ full_xoff_th = req->num_active_tcs[port] *
+ BRB_MIN_BLOCKS_PER_TC;
+ full_xon_th = full_xoff_th + min_pkt_size_blocks;
+ pause_xoff_th = tc_headroom_blocks;
+ pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+
+ /* Init total size per port */
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
+ port_blocks);
+
+ /* Init shared size per port */
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
+ port_shared_blocks);
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
+ /* Clear init values for non-active TCs */
+ if (tc == req->num_active_tcs[port]) {
+ tc_guaranteed_blocks = 0;
+ full_xoff_th = 0;
+ full_xon_th = 0;
+ pause_xoff_th = 0;
+ pause_xon_th = 0;
+ }
+
+ /* Init guaranteed size per TC */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_TC_GUARANTIED_0 + reg_offset,
+ tc_guaranteed_blocks);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
+ BRB_HYST_BLOCKS);
+
+ /* Init pause/full thresholds per physical TC - for
+ * loopback traffic.
+ */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
+ reg_offset, full_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
+ reg_offset, full_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
+ reg_offset, pause_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
+ reg_offset, pause_xon_th);
+
+ /* Init pause/full thresholds per physical TC - for
+ * main traffic.
+ */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
+ reg_offset, full_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
+ reg_offset, full_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
+ reg_offset, pause_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
+ reg_offset, pause_xon_th);
+ }
+ }
+}
+
+/* In MF should be called once per engine to set EtherType of OuterTag */
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 ethType)
+{
+ /* Update PRS register */
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
+
+ /* Update NIG register */
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
+
+ /* Update PBF register */
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
+}
+
+/* In MF should be called once per port to set EtherType of OuterTag */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 ethType)
+{
+ /* Update DORQ register */
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
+}
+
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
+#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port)
+{
+ /* Update PRS register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+ /* Update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+ /* Update PBF register */
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool vxlan_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
+ vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ }
+
+ /* Update NIG register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
+ vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ /* Update DORQ register */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ }
+
+ /* Update NIG register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port)
+{
+ /* Update PRS register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+ /* Update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+ /* Update PBF register */
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
+ eth_geneve_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
+ ip_geneve_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ }
+
+ /* Update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
+ ip_geneve_enable ? 1 : 0);
+
+ /* EDPM with geneve tunnel not supported in BB */
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev))
+ return;
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+ ip_geneve_enable ? 1 : 0);
+}
+
+
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR 0
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
+{
+ union gft_cam_line_union cam_line;
+ struct gft_ram_line ram_line;
+ u32 i, *ram_line_ptr;
+
+ ram_line_ptr = (u32 *)&ram_line;
+
+ /* Stop using gft logic, disable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next rfs/gft session*/
+
+ /* Zero camline */
+ OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ cam_line.cam_line_mapped.camline);
+
+ /* Zero ramline */
+ OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
+
+ /* Each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(ram_line_ptr + i));
+}
+
+
+void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 rfs_cm_hdr_event_id;
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6)
+{
+ u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ union gft_cam_line_union camLine;
+ struct gft_ram_line ramLine;
+ u32 *ramLinePointer = (u32 *)&ramLine;
+ int i;
+
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn, true,
+ "set_rfs_mode_enable: must accept at "
+ "least on of - ipv4 or ipv6");
+
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn, true,
+ "set_rfs_mode_enable: must accept at "
+ "least on of - udp or tcp");
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
+ /* Configure Registers for RFS mode */
+
+ /* Enable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
+ * context only cid
+ * in PRS on match
+ */
+ camLine.cam_line_mapped.camline = 0;
+
+ /* Cam line is now valid!! */
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* Filters are per PF!! */
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
+ if (!(tcp && udp)) {
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
+ if (tcp)
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+
+ /* Write characteristics to cam */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camLine.cam_line_mapped.camline);
+ camLine.cam_line_mapped.camline =
+ ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramLine.lo = 0;
+ ramLine.hi = 0;
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* Each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(ramLinePointer + i));
+
+ /* Set default profile so that no filter match will happen */
+ ramLine.lo = 0xffff;
+ ramLine.hi = 0xffff;
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
+ i * REG_SIZE, *(ramLinePointer + i));
+}
+
+/* Configure VF zone size mode */
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mode,
+ bool runtime_init)
+{
+ u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
+ u32 msdm_vf_offset_mask;
+
+ if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ msdm_vf_size_log += 1;
+ else if (mode == VF_ZONE_SIZE_MODE_QUAD)
+ msdm_vf_size_log += 2;
+
+ msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
+
+ if (runtime_init) {
+ STORE_RT_REG(p_hwfn,
+ PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
+ msdm_vf_size_log);
+ STORE_RT_REG(p_hwfn,
+ PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
+ msdm_vf_offset_mask);
+ } else {
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
+ }
+}
+
+/* Get mstorm statistics for offset by VF zone size mode */
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
+ u16 stat_cnt_id,
+ u16 vf_zone_size_mode)
+{
+ u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
+
+ if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
+ (stat_cnt_id > MAX_NUM_PFS)) {
+ if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ (stat_cnt_id - MAX_NUM_PFS);
+ else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
+ offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ (stat_cnt_id - MAX_NUM_PFS);
+ }
+
+ return offset;
+}
+
+/* Get mstorm VF producer offset by VF zone size mode */
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
+ u8 vf_id,
+ u8 vf_queue_id,
+ u16 vf_zone_size_mode)
+{
+ u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+
+ if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
+ if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ vf_id;
+ else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
+ offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ vf_id;
+ }
+
+ return offset;
+}
+
+/* Calculate CRC8 of first 4 bytes in buf */
+static u8 ecore_calc_crc8(const u8 *buf)
+{
+ u32 i, j, crc = 0xff << 8;
+
+ /* CRC-8 polynomial */
+ #define POLY 0x1070
+
+ for (j = 0; j < 4; j++, buf++) {
+ crc ^= (*buf << 8);
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x8000)
+ crc ^= (POLY << 3);
+
+ crc <<= 1;
+ }
+ }
+
+ return (u8)(crc >> 8);
+}
+
+/* Calculate and return CDU validation byte per conneciton type / region /
+ * cid
+ */
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
+ u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ u32 validation_string = 0;
+ const u8 *data_to_crc_rev;
+ u8 data_to_crc[4];
+
+ data_to_crc_rev = (const u8 *)&validation_string;
+
+ /*
+ * The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian (ntoh())*/
+ data_to_crc[0] = data_to_crc_rev[3];
+ data_to_crc[1] = data_to_crc_rev[2];
+ data_to_crc[2] = data_to_crc_rev[1];
+ data_to_crc[3] = data_to_crc_rev[0];
+
+ crc = ecore_calc_crc8(data_to_crc);
+
+ validation_byte |= ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
+ const u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3 - bits [31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h
new file mode 100644
index 00000000..4da3fc29
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _INIT_FW_FUNCS_H
+#define _INIT_FW_FUNCS_H
+/* Forward declarations */
+
+struct init_qm_pq_params;
+
+/**
+ * @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
+
+/**
+ * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
+ * phase
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params - array of size MAX_NUM_PORTS with params for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+
+/**
+ * @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param port_id - port ID
+ * @param pf_id - PF ID
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF
+ * (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
+ * @param num_vports - number of VPORTs associated with this PF
+ * @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
+ * be 0. otherwise, the weight must be non-zero.
+ * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
+ * configure. ignored if PF RL is globally disabled.
+ * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
+ * each Tx PQ associated with the specified PF.
+ * @param vport_params - array of size num_vports with parameters for each
+ * associated VPORT.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
+
+/**
+ * @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u16 pf_wfq);
+
+/**
+ * @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
+
+/**
+ * @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * ecore_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 vport_wfq);
+
+/**
+ * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
+ * VPORT.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl);
+
+/**
+ * @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+#ifndef UNUSED_HSI_FUNC
+
+/**
+ * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG ETS initialization requirements.
+ * @param is_lb - if set, the loopback port arbiter is initialized, otherwise
+ * the physical port arbiter is initialized. The pure-LB TC
+ * requirements are ignored when is_lb is cleared.
+ */
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req,
+ bool is_lb);
+
+/**
+ * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
+ *
+ * Based on global and per-TC rate requirements
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG LB RLs initialization requirements.
+ */
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+ * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
+ *
+ * Assumes valid arguments.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - required mapping from prioirties to TCs.
+ */
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req);
+
+#ifndef UNUSED_HSI_FUNC
+/**
+ * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the PRS ETS initialization requirements.
+ */
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req);
+#endif /* UNUSED_HSI_FUNC */
+
+#ifndef UNUSED_HSI_FUNC
+/**
+ * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the BRB RAM initialization requirements.
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_brb_ram_req *req);
+#endif /* UNUSED_HSI_FUNC */
+
+#ifndef UNUSED_HSI_FUNC
+/**
+ * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
+ * ethType Regs to input ethType
+ * should Be called once per engine
+ * if engine
+ * is in BD mode.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 ethType);
+
+/**
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
+ * input ethType should Be called
+ * once per port.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 ethType);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+ * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
+ * port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 dest_port);
+
+/**
+ * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool vxlan_enable);
+
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable,
+ bool ip_gre_enable);
+
+/**
+ * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
+ * udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 dest_port);
+
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_geneve_enable,
+ bool ip_geneve_enable);
+#ifndef UNUSED_HSI_FUNC
+
+/**
+* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
+*
+* @param p_ptt - ptt window used for writing the registers.
+*/
+void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable RFS.
+ */
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
+
+/**
+* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
+*
+* @param p_ptt - ptt window used for writing the registers.
+* @param pf_id - pf on which to enable RFS.
+* @param tcp - set profile tcp packets.
+* @param udp - set profile udp packet.
+* @param ipv4 - set profile ipv4 packet.
+* @param ipv6 - set profile ipv6 packet.
+*/
+void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6);
+#endif /* UNUSED_HSI_FUNC */
+
+/**
+* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
+* used before first ETH queue started.
+*
+*
+* @param p_ptt - ptt window used for writing the registers. Don't care
+* if runtime_init used
+* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
+* @param runtime_init - Set 1 to init runtime registers in engine phase. Set 0
+* if VF zone size mode configured after engine phase.
+*/
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
+ *p_ptt, u16 mode, bool runtime_init);
+
+/**
+ * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
+ * VF zone size mode.
+*
+* @param stat_cnt_id - statistic counter id
+* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
+*/
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
+ u16 stat_cnt_id, u16 vf_zone_size_mode);
+
+/**
+ * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
+ * size mode.
+*
+* @param vf_id - vf id.
+* @param vf_queue_id - per VF rx queue id.
+* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
+*/
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
+ vf_queue_id, u16 vf_zone_size_mode);
+/**
+ * @brief ecore_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid);
+/**
+ * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid);
+/**
+ * @brief ecore_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
+/**
+ * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
+ * validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.c
new file mode 100644
index 00000000..b907a95e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/* include the precompiled configuration values - only once */
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_status.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_fw_funcs.h"
+
+#include "ecore_iro_values.h"
+#include "ecore_sriov.h"
+#include "ecore_gtt_values.h"
+#include "reg_addr.h"
+#include "ecore_init_ops.h"
+
+#define ECORE_INIT_MAX_POLL_COUNT 100
+#define ECORE_INIT_POLL_PERIOD_US 500
+
+void ecore_init_iro_array(struct ecore_dev *p_dev)
+{
+ p_dev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data.b_valid[i] = false;
+}
+
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
+{
+ p_hwfn->rt_data.init_val[rt_offset] = val;
+ p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset, u32 *p_val, osal_size_t size)
+{
+ osal_size_t i;
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+ p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+ }
+}
+
+static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u16 rt_offset,
+ u16 size, bool b_must_dmae)
+{
+ u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+ bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+ u16 i, segment;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Since not all RT entries are initialized, go over the RT and
+ * for each segment of initialized values use DMA.
+ */
+ for (i = 0; i < size; i++) {
+ if (!p_valid[i])
+ continue;
+
+ /* In case there isn't any wide-bus configuration here,
+ * simply write the data instead of using dmae.
+ */
+ if (!b_must_dmae) {
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ continue;
+ }
+
+ /* Start of a new segment */
+ for (segment = 1; i + segment < size; segment++)
+ if (!p_valid[i + segment])
+ break;
+
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)(p_init_val + i),
+ addr + (i << 2), segment, 0);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Jump over the entire segment, including invalid entry */
+ i += segment;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(bool) * RUNTIME_ARRAY_SIZE);
+ if (!rt_data->b_valid)
+ return ECORE_NOMEM;
+
+ rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(u32) * RUNTIME_ARRAY_SIZE);
+ if (!rt_data->init_val) {
+ OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
+ return ECORE_NOMEM;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_init_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
+}
+
+static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size, const u32 *p_buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+#ifndef ASIC_ONLY
+ if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
+ !b_can_dmae || (!b_must_dmae && (size < 16))) {
+#else
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+#endif
+ const u32 *data = p_buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)(p_buf +
+ dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 fill,
+ u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ return ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)&zero_buffer[0],
+ addr, fill_count,
+ ECORE_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 fill, u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ ecore_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+ u32 data = OSAL_LE32_TO_CPU(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ u32 offset, output_len, input_len, max_size;
+#endif
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 size;
+
+ array_data = p_dev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)
+ (uintptr_t)(array_data + dmae_array_offset);
+ data = OSAL_LE32_TO_CPU(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
+ (u8 *)(uintptr_t)&array_data[offset],
+ max_size,
+ (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
+ rc = ECORE_INVAL;
+ }
+#else
+ DP_NOTICE(p_hwfn, true,
+ "Using zipped firmware without config enabled\n");
+ rc = ECORE_INVAL;
+#endif
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset +
+ 1, size, array_data,
+ b_must_dmae,
+ b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_write_op *p_cmd,
+ bool b_can_dmae)
+{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn, true,
+ "Need to write to %08x for Wide-bus but DMAE isn't"
+ " allowed\n",
+ addr);
+ return ECORE_INVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
+ ecore_wr(p_hwfn, p_ptt, addr, data);
+ break;
+ case INIT_SRC_ZEROS:
+ data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ else
+ ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
+ break;
+ case INIT_SRC_ARRAY:
+ rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ ecore_init_rt(p_hwfn, p_ptt, addr,
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+ b_must_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
+{
+ return (val == expected_val);
+}
+
+static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_read_op *cmd)
+{
+ bool (*comp_check)(u32 val, u32 expected_val);
+ u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
+ u32 data, addr, poll;
+ int i;
+
+ data = OSAL_LE32_TO_CPU(cmd->op_data);
+ addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay *= 100;
+#endif
+
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (poll == INIT_POLL_NONE)
+ return;
+
+ switch (poll) {
+ case INIT_POLL_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_POLL_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_POLL_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ cmd->op_data);
+ return;
+ }
+
+ data = OSAL_LE32_TO_CPU(cmd->expected_val);
+ for (i = 0;
+ i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
+ OSAL_UDELAY(delay);
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == ECORE_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
+ " Got: %08x (comparsion %08x)]\n",
+ addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
+ OSAL_LE32_TO_CPU(cmd->op_data));
+}
+
+/* init_ops callbacks entry point */
+static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ DP_NOTICE(p_hwfn, true,
+ "Currently init values have no need of callbacks\n");
+}
+
+static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
+ u16 *p_offset, int modes)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = p_dev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*p_offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd, int modes)
+{
+ u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
+
+ if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
+ struct init_if_phase_op *p_cmd,
+ u32 phase, u32 phase_id)
+{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+ INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int phase, int phase_id, int modes)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ num_init_ops = p_dev->fw_data->init_ops_size;
+ init_ops = p_dev->fw_data->init_ops;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ MAX_ZIPPED_SIZE * 4);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
+ return ECORE_NOMEM;
+ }
+#endif
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+
+ case INIT_OP_READ:
+ ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+
+ case INIT_OP_IF_MODE:
+ cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ phase, phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* ecore_init_run is always invoked from
+ * sleep-able context
+ */
+ OSAL_UDELAY(cmd->delay.delay);
+ break;
+
+ case INIT_OP_CALLBACK:
+ ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
+#endif
+ return rc;
+}
+
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ /* This is done by MFW on ASIC; regardless, this should only
+ * be done once per chip [i.e., common]. Implementation is
+ * not too bright, but it should work on the simple FPGA/EMUL
+ * scenarios.
+ */
+ static bool initialized;
+ int poll_cnt = 500;
+ u32 val;
+
+ /* initialize PTT/GTT (poll for completion) */
+ if (!initialized) {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+ initialized = true;
+ }
+
+ do {
+ /* ptt might be overrided by HW until this is done */
+ OSAL_UDELAY(10);
+ ecore_ptt_invalidate(p_hwfn);
+ val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INIT_DONE_PTT_GTT);
+ } while ((val != 1) && --poll_cnt);
+
+ if (!poll_cnt)
+ DP_ERR(p_hwfn,
+ "PGLUE_B_REG_INIT_DONE didn't complete\n");
+ }
+#endif
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+ const u8 *data)
+{
+ struct ecore_fw_data *fw = p_dev->fw_data;
+
+#ifdef CONFIG_ECORE_BINARY_FW
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(p_dev, true, "Invalid fw data\n");
+ return ECORE_INVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+#else
+ fw->init_ops = (union init_op *)init_ops;
+ fw->arr_data = (u32 *)init_val;
+ fw->modes_tree_buf = (u8 *)modes_tree_buf;
+ fw->init_ops_size = init_ops_size;
+#endif
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.h
new file mode 100644
index 00000000..d58c7d6a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_init_ops.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INIT_OPS__
+#define __ECORE_INIT_OPS__
+
+#include "ecore.h"
+
+/**
+ * @brief ecore_init_iro_array - init iro_arr.
+ *
+ *
+ * @param p_dev
+ */
+void ecore_init_iro_array(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_free(struct ecore_hwfn *p_hwfn);
+
+
+/**
+ * @brief ecore_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ ecore_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ ecore_init_store_rt_reg(hwfn, offset, val)
+
+/**
+* @brief
+*
+*
+* @param p_hwfn
+* @param rt_offset
+* @param val
+* @param size
+*/
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ osal_size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+#endif /* __ECORE_INIT_OPS__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_int.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_int.c
new file mode 100644
index 00000000..8dc4d150
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_int.c
@@ -0,0 +1,2260 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_init_ops.h"
+#include "ecore_rt_defs.h"
+#include "ecore_int.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_hw_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_mcp.h"
+
+struct ecore_pi_info {
+ ecore_int_comp_cb_t comp_cb;
+ void *cookie; /* Will be sent to the compl cb function */
+};
+
+struct ecore_sb_sp_info {
+ struct ecore_sb_info sb_info;
+ /* per protocol index data */
+ struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+enum ecore_attention_type {
+ ECORE_ATTN_TYPE_ATTN,
+ ECORE_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT (1 << 23)
+
+#define ATTENTION_CLEAR_ENABLE (1 << 28)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
+#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
+#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp =
+ ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_VALID);
+
+ /* Disabled VF access */
+ if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
+ u32 addr, data;
+
+ addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
+ data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_DATA);
+ DP_INFO(p_hwfn->p_dev,
+ "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
+ " Write [0x%02x] Addr [0x%08x]\n",
+ (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
+ >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
+ (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
+ >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
+ ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
+ ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
+ ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
+ addr);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+ if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->p_dev,
+ "Incorrect access to %08x of length %08x - PF [%02x]"
+ " VF [%04x] [valid %02x] client [%02x] write [%02x]"
+ " Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
+ data);
+ }
+
+ /* TODO - We know 'some' of these are legal due to virtualization,
+ * but is it true for all of them?
+ */
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
+#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
+#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
+#define ECORE_GRC_ATTENTION_PF_MASK (0xf)
+#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
+#define ECORE_GRC_ATTENTION_VF_SHIFT (4)
+#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
+#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
+#define ECORE_GRC_ATTENTION_PRIV_VF (0)
+static const char *grc_timeout_attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1:
+ return "PXP";
+ case 2:
+ return "MCP";
+ case 3:
+ return "MSDM";
+ case 4:
+ return "PSDM";
+ case 5:
+ return "YSDM";
+ case 6:
+ return "USDM";
+ case 7:
+ return "TSDM";
+ case 8:
+ return "XSDM";
+ case 9:
+ return "DBU";
+ case 10:
+ return "DMAE";
+ default:
+ return "Unknown";
+ }
+}
+
+static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register
+ */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+ goto out;
+
+ /* Read the GRC timeout information */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_INFO(p_hwfn->p_dev,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
+ " [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+ (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ grc_timeout_attn_master_to_str((tmp &
+ ECORE_GRC_ATTENTION_MASTER_MASK) >>
+ ECORE_GRC_ATTENTION_MASTER_SHIFT),
+ (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+ (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+ ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
+ ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+ (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+ ECORE_GRC_ATTENTION_VF_SHIFT);
+
+out:
+ /* Regardles of anything else, clean the validity bit */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
+#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
+static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp;
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked."
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
+ " Details2 %08x [Was_error %02x BME deassert %02x"
+ " FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
+ ? 1 : 0), tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
+ 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0));
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked."
+ " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
+ " Details2 %08x [Was_error %02x BME deassert %02x"
+ " FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
+ ? 1 : 0), tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
+ 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0));
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
+ DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "ILT error - Details %08x Details2 %08x"
+ " [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
+{
+ DP_NOTICE(p_hwfn, false, "FW assertion!\n");
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
+
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t
+ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
+#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 reason;
+
+ reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+ ECORE_DORQ_ATTENTION_REASON_MASK;
+ if (reason) {
+ u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+
+ DP_INFO(p_hwfn->p_dev,
+ "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
+ " Size [bytes] 0x%08x Reason: 0x%08x\n",
+ ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+ (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
+ ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
+ ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
+ }
+
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
+ u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ TM_REG_INT_STS_1);
+
+ if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
+ TM_REG_INT_STS_1_PEND_CONN_SCAN))
+ return ECORE_INVAL;
+
+ if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
+ TM_REG_INT_STS_1_PEND_CONN_SCAN))
+ DP_INFO(p_hwfn,
+ "TM attention on emulation - most likely"
+ " results of clock-ratios\n");
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
+ val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
+ TM_REG_INT_MASK_1_PEND_TASK_SCAN;
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
+
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ return ECORE_INVAL;
+}
+
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+};
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+ BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SW timers #%d",
+ (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
+ OSAL_NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_fw_assertion, MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
+ OSAL_NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_general_attention_35, MAX_BLOCK_ID},
+ {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ OSAL_NULL, BLOCK_NWM},
+ {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ OSAL_NULL, BLOCK_NWM},
+ {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
+ MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+};
+
+static struct aeu_invert_reg_bit *
+ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!ECORE_IS_BB(p_hwfn->p_dev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct ecore_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0, index;
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = ECORE_SB_ATT_IDX;
+ }
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ return rc;
+}
+
+/**
+ * @brief ecore_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ /* FIXME - this will change once we'll have GOOD gtt definitions */
+ DIRECT_REG_WR(p_hwfn,
+ (u8 OSAL_IOMEM *) p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
+ enum block_id id, enum dbg_attn_type type,
+ bool b_clear)
+{
+ /* @DPDK */
+ DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
+}
+
+/**
+ * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg,
+ const char *p_bit_name,
+ u32 bitmask)
+{
+ enum _ecore_status_t rc = ECORE_INVAL;
+ bool b_fatal = false;
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ b_fatal = true;
+
+ /* Print HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ ecore_int_attn_print(p_hwfn, p_aeu->block_index,
+ ATTN_TYPE_INTERRUPT, !b_fatal);
+}
+
+ /* Reach assertion if attention is fatal */
+ if (b_fatal) {
+ DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
+ p_bit_name);
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+ }
+
+ /* Prevent this Attention from being asserted in the future */
+ if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
+ p_hwfn->p_dev->attn_clr_en) {
+ u32 val;
+ u32 mask = ~bitmask;
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_bit_name);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief ecore_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the
+ * parity
+ * @param bit_index
+ */
+static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index;
+
+ DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
+ p_aeu->bit_name, bit_index);
+
+ if (block_id == MAX_BLOCK_ID)
+ return;
+
+ ecore_int_attn_print(p_hwfn, block_id,
+ ATTN_TYPE_PARITY, false);
+
+ /* In A0, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ ecore_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
+ }
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return enum _ecore_status_t
+ *
+ */
+static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ bool b_parity = false;
+ u8 i, j, k, bit_idx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
+ }
+
+ /* Handle parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32));
+
+ u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+
+ /* Skip register in which no parity bit is currently set */
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
+ !!(parities & (1 << bit_idx))) {
+ ecore_int_deassertion_parity(p_hwfn, p_bit,
+ bit_idx);
+ b_parity = true;
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
+ u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ u32 bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ unsigned long int bitmask;
+ u8 bit, bit_len;
+
+ /* Need to account bits with changed meaning */
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ /* Find the bits relating to HW-block, then
+ * shift so they'll become LSB.
+ */
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
+ if (bitmask) {
+ u32 flags = p_aeu->flags;
+ char bit_name[30];
+ u8 num;
+
+ num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
+ bit_len);
+
+ /* Some bits represent more than a
+ * a single interrupt. Correctly print
+ * their name.
+ */
+ if (ATTENTION_LENGTH(flags) > 2 ||
+ ((flags & ATTENTION_PAR_INT) &&
+ ATTENTION_LENGTH(flags) > 1))
+ OSAL_SNPRINTF(bit_name, 30,
+ p_aeu->bit_name,
+ num);
+ else
+ OSAL_STRNCPY(bit_name,
+ p_aeu->bit_name,
+ 30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
+ /* Handle source of the attention */
+ ecore_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bit_name,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
+
+ /* Clear IGU indication for the deasserted bits */
+ /* FIXME - this will change once we'll have GOOD gtt definitions */
+ DIRECT_REG_WR(p_hwfn,
+ (u8 OSAL_IOMEM *) p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u16 index = 0, asserted_bits, deasserted_bits;
+ u32 attn_bits = 0, attn_acks = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
+ attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
+ attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
+ } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ else if (asserted_bits == 0x100)
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "MFW indication [deassertion]\n");
+
+ if (asserted_bits) {
+ rc = ecore_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits)
+ rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
+
+ return rc;
+}
+
+static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *igu_addr, u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+ OSAL_BARRIER(p_hwfn->p_dev);
+}
+
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
+{
+ struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+ struct ecore_pi_info *pi_info = OSAL_NULL;
+ struct ecore_sb_attn_info *sb_attn;
+ struct ecore_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn)
+ return;
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->p_dev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(p_hwfn->p_dev,
+ "Interrupt Status block is NULL -"
+ " cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+ rc = ecore_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(p_hwfn->p_dev,
+ "Attentions Status block is NULL -"
+ " cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & ECORE_SB_EVENT_MASK)) {
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+/* Check the validity of the DPC ptt. If not ack interrupts and fail */
+
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & ECORE_SB_ATT_IDX)
+ ecore_int_attentions(p_hwfn);
+
+ if (rc & ECORE_SB_IDX) {
+ int pi;
+
+ /* Since we only looked at the SB index, it's possible more
+ * than a single protocol-index on the SB incremented.
+ * Iterate over all configured protocol indices and check
+ * whether something happened for each.
+ */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb != OSAL_NULL)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+ }
+
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn) {
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
+ p_sb->sb_phys,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn));
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ DMA_LO(p_hwfn->p_sb_attn->sb_phys));
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ DMA_HI(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
+{
+ struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32; j++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ p_aeu = &aeu_descs[i].bits[j];
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_sb_attn_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
+ if (!p_sb) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate `struct ecore_sb_attn_info'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* SB ring */
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn));
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate status block (attentions)\n");
+ OSAL_FREE(p_dev, p_sb);
+ return ECORE_NOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return ECORE_SUCCESS;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define ECORE_CAU_DEF_RX_USECS 24
+#define ECORE_CAU_DEF_TX_USECS 48
+
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id, u16 vf_number, u8 vf_valid)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 cau_state;
+ u8 timer_res;
+
+ OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_dev->rx_coalesce_usecs)
+ p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
+ if (!p_dev->tx_coalesce_usecs)
+ p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
+ }
+
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (p_dev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_dev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (p_dev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_dev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys, u16 igu_sb_id,
+ u16 vf_number, u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ /* Wide-bus, initialize via DMAE */
+ u64 phys_addr = (u64)sb_phys;
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&phys_addr,
+ CAU_REG_SB_ADDR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2, sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2, sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+ /* eth will open queues for all tcs, so configure all of them
+ * properly, rather than just the active ones
+ */
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
+
+ u8 timeset, timer_res;
+ u8 i;
+
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
+ ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ ECORE_COAL_RX_STATE_MACHINE, timeset);
+
+ if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
+ for (i = 0; i < num_tc; i++) {
+ ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ ECORE_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return; /* @@@TBD MichalK- VF CAU... */
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (IS_PF(p_hwfn->p_dev))
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief ecore_get_igu_sb_id - given a sw sb_id return the
+ * igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == ECORE_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else if (IS_PF(p_hwfn->p_dev))
+ igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ else
+ igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id == ECORE_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
+
+ return igu_sb_id;
+}
+
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != ECORE_SP_SB_ID) {
+ p_hwfn->sbs_info[sb_id] = sb_info;
+ p_hwfn->num_sbs++;
+ }
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ sb_info->p_hwfn = p_hwfn;
+#endif
+ sb_info->p_dev = p_hwfn->p_dev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ if (IS_PF(p_hwfn->p_dev)) {
+ sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
+
+ } else {
+ sb_info->igu_addr =
+ (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
+ }
+
+ sb_info->flags |= ECORE_SB_INFO_INIT;
+
+ ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_id == ECORE_SP_SB_ID) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return ECORE_INVAL;
+ }
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
+ p_hwfn->sbs_info[sb_id] = OSAL_NULL;
+ p_hwfn->num_sbs--;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt) {
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys,
+ SB_ALIGNED_SIZE(p_hwfn));
+ }
+
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb =
+ OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_sb));
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sb_info'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* SB ring */
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_phys, SB_ALIGNED_SIZE(p_hwfn));
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+ return ECORE_NOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
+ p_virt, p_phys, ECORE_SP_SB_ID);
+
+ OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx, __le16 **p_fw_cons)
+{
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ enum _ecore_status_t rc = ECORE_NOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = ECORE_SUCCESS;
+ break;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
+{
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
+ return ECORE_SUCCESS;
+}
+
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
+ igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
+ }
+#endif
+
+ p_hwfn->p_dev->int_mode = int_mode;
+ switch (p_hwfn->p_dev->int_mode) {
+ case ECORE_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case ECORE_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case ECORE_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case ECORE_INT_MODE_POLL:
+ break;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "FPGA - Don't enable Attentions in IGU and MISC\n");
+ return;
+ }
+#endif
+
+ /* Configure AEU signal change to produce attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+ /* Flush the writes to IGU */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ /* Unmask AEU signals toward IGU */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+enum _ecore_status_t
+ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 tmp;
+
+ /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
+ * attentions. Since we're waiting for BRCM answer regarding this
+ * attention, in the meanwhile we simply mask it.
+ */
+ tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+ tmp &= ~0x800;
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+ ecore_int_igu_enable_attn(p_hwfn, p_ptt);
+
+ if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Slowpath IRQ request failed\n");
+ return ECORE_NORESOURCES;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+
+ /* Enable interrupt Generation */
+ ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
+}
+
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid)
+{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u8 type = 0; /* FIXME MichalS type??? */
+
+ OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
+ IGU_REG_CLEANUP_STATUS_0) != 0x200);
+
+ /* USE Control Command Register to perform cleanup. There is an
+ * option to do this using IGU bar, but then it can't be used for VFs.
+ */
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ OSAL_BARRIER(p_hwfn->p_dev);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* Flush the write to IGU */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (sb_id % 32);
+ sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
+
+ /* Now wait for the command to complete */
+ while (--sleep_cnt) {
+ val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+ OSAL_MSLEEP(5);
+ }
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn, true,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, sb_id);
+}
+
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, u16 opaque, bool b_set)
+{
+ int pi, i;
+
+ /* Set */
+ if (b_set)
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+ /* Clear */
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING +
+ ((sb_id / 32) * 4));
+ if (val & (1 << (sb_id % 32)))
+ OSAL_UDELAY(10);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn, true,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ sb_id);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set, bool b_slowpath)
+{
+ u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 sb_id = 0, val = 0;
+
+ /* @@@TBD MichalK temporary... should be moved to init-tool... */
+ val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+ /* end temporary */
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU cleaning SBs [%d,...,%d]\n",
+ igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+ for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+
+ if (!b_slowpath)
+ return;
+
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid, b_set);
+}
+
+static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 sb_id)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ struct ecore_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ goto out;
+
+ /* Fill the block information */
+ p_block->status = ECORE_IGU_STATUS_VALID;
+ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
+ " is_pf = %d vector_num = 0x%x\n",
+ sb_id, val, p_block->function_id, p_block->is_pf,
+ p_block->vector_number);
+
+out:
+ return val;
+}
+
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_info *p_igu_info;
+ struct ecore_igu_block *p_block;
+ u32 min_vf = 0, max_vf = 0, val;
+ u16 sb_id, last_iov_sb_id = 0;
+ u16 prev_sb_id = 0xFF;
+
+ p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_igu_info));
+ if (!p_hwfn->hw_info.p_igu_info)
+ return ECORE_NOMEM;
+
+ OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Initialize base sb / sb cnt for PFs and VFs */
+ p_igu_info->igu_base_sb = 0xffff;
+ p_igu_info->igu_sb_cnt = 0;
+ p_igu_info->igu_dsb_id = 0xffff;
+ p_igu_info->igu_base_sb_iov = 0xffff;
+
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+ for (sb_id = 0;
+ sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ break;
+
+ if (p_block->is_pf) {
+ if (p_block->function_id == p_hwfn->rel_pf_id) {
+ p_block->status |= ECORE_IGU_STATUS_PF;
+
+ if (p_block->vector_number == 0) {
+ if (p_igu_info->igu_dsb_id == 0xffff)
+ p_igu_info->igu_dsb_id = sb_id;
+ } else {
+ if (p_igu_info->igu_base_sb == 0xffff) {
+ p_igu_info->igu_base_sb = sb_id;
+ } else if (prev_sb_id != sb_id - 1) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "consecutive igu"
+ " vectors for HWFN"
+ " %x broken",
+ p_hwfn->rel_pf_id);
+ break;
+ }
+ prev_sb_id = sb_id;
+ /* we don't count the default */
+ (p_igu_info->igu_sb_cnt)++;
+ }
+ }
+ } else {
+ if ((p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ if (p_igu_info->igu_base_sb_iov == 0xffff) {
+ p_igu_info->igu_base_sb_iov = sb_id;
+ } else if (last_iov_sb_id != sb_id - 1) {
+ if (!val)
+ DP_VERBOSE(p_hwfn->p_dev,
+ ECORE_MSG_INTR,
+ "First uninited IGU"
+ " CAM entry at"
+ " index 0x%04x\n",
+ sb_id);
+ else
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Consecutive igu"
+ " vectors for HWFN"
+ " %x vfs is broken"
+ " [jumps from %04x"
+ " to %04x]\n",
+ p_hwfn->rel_pf_id,
+ last_iov_sb_id,
+ sb_id);
+ break;
+ }
+ p_block->status |= ECORE_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ last_iov_sb_id = sb_id;
+ }
+ }
+ }
+
+ /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
+ * the number of VF SBs [especially for first VF on engine, as we can't
+ * diffrentiate between empty entries and its entries].
+ * Since we don't really support more SBs than VFs today, prevent any
+ * such configuration by sanitizing the number of SBs to equal the
+ * number of VFs.
+ */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (total_vfs < p_igu_info->free_blks) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Limiting number of SBs for IOV - %04x --> %04x\n",
+ p_igu_info->free_blks,
+ p_hwfn->p_dev->p_iov_info->total_vfs);
+ p_igu_info->free_blks = total_vfs;
+ } else if (total_vfs > p_igu_info->free_blks) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
+ p_igu_info->free_blks, total_vfs);
+ return ECORE_INVAL;
+ }
+ }
+
+ p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
+ "igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
+ p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
+ p_igu_info->igu_dsb_id);
+
+ if (p_igu_info->igu_base_sb == 0xffff ||
+ p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU CAM returned invalid values igu_base_sb=0x%x "
+ "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
+ IGU_CMD_INT_ACK_BASE)
+#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
+ IGU_CMD_INT_ACK_BASE)
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
+{
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ LSB_IGU_CMD_ADDR * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ MSB_IGU_CMD_ADDR * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
+ if (!p_hwfn->sp_dpc)
+ return ECORE_NOMEM;
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
+}
+
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_int_sp_dpc_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+
+ rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+
+ rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
+
+ return rc;
+}
+
+void ecore_int_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_int_sp_sb_free(p_hwfn);
+ ecore_int_sb_attn_free(p_hwfn);
+ ecore_int_sp_dpc_free(p_hwfn);
+}
+
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
+ return;
+
+ ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+ ecore_int_sp_dpc_setup(p_hwfn);
+}
+
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_cnt_info *p_sb_cnt_info)
+{
+ struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info || !p_sb_cnt_info)
+ return;
+
+ p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+ p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+ p_sb_cnt_info->sb_free_blk = info->free_blks;
+}
+
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i)
+ p_dev->hwfns[i].b_int_requested = false;
+}
+
+void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
+{
+ p_dev->attn_clr_en = clr_enable;
+}
+
+enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ struct cau_sb_entry sb_entry;
+ enum _ecore_status_t rc;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ if (tx)
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ int i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY +
+ sbid * 4 * PIS_PER_SB + i * 4);
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_int.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_int.h
new file mode 100644
index 00000000..0c8929e3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_int.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_H__
+#define __ECORE_INT_H__
+
+#include "ecore.h"
+#include "ecore_int_api.h"
+
+#define ECORE_CAU_DEF_RX_TIMER_RES 0
+#define ECORE_CAU_DEF_TX_TIMER_RES 0
+
+#define ECORE_SB_ATT_IDX 0x0001
+#define ECORE_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct ecore_igu_block {
+ u8 status;
+#define ECORE_IGU_STATUS_FREE 0x01
+#define ECORE_IGU_STATUS_VALID 0x02
+#define ECORE_IGU_STATUS_PF 0x04
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+};
+
+struct ecore_igu_map {
+ struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct ecore_igu_info {
+ struct ecore_igu_map igu_map;
+ u16 igu_dsb_id;
+ u16 igu_base_sb;
+ u16 igu_base_sb_iov;
+ u16 igu_sb_cnt;
+ u16 igu_sb_cnt_iov;
+ u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set, bool b_slowpath);
+
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief ecore_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx, __le16 **p_fw_cons);
+/**
+ * @brief ecore_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of ecore_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
+
+/**
+ * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, u16 opaque, bool b_set);
+
+/**
+ * @brief ecore_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid);
+
+/**
+* @brief ecore_int_alloc
+*
+* @param p_hwfn
+ * @param p_ptt
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_int_free
+*
+* @param p_hwfn
+*/
+void ecore_int_free(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_int_setup
+*
+* @param p_hwfn
+* @param p_ptt
+*/
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+* @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry, u8 pf_id,
+ u16 vf_number, u8 vf_valid);
+
+enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
+#ifndef ASIC_ONLY
+#define ECORE_MAPPING_MEMORY_SIZE(dev) \
+ ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
+ 136 : NUM_OF_SBS(dev))
+#else
+#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
+#endif
+
+#endif /* __ECORE_INT_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_int_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_int_api.h
new file mode 100644
index 00000000..799fbe82
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_int_api.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_API_H__
+#define __ECORE_INT_API_H__
+
+#ifndef __EXTRACT__LINUX__
+#define ECORE_SB_IDX 0x0002
+
+#define RX_PI 0
+#define TX_PI(tc) (RX_PI + 1 + tc)
+
+#ifndef ECORE_INT_MODE
+#define ECORE_INT_MODE
+enum ecore_int_mode {
+ ECORE_INT_MODE_INTA,
+ ECORE_INT_MODE_MSIX,
+ ECORE_INT_MODE_MSI,
+ ECORE_INT_MODE_POLL,
+};
+#endif
+
+struct ecore_sb_info {
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void OSAL_IOMEM *igu_addr;
+ u8 flags;
+#define ECORE_SB_INFO_INIT 0x1
+#define ECORE_SB_INFO_SETUP 0x2
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ struct ecore_hwfn *p_hwfn;
+#endif
+ struct ecore_dev *p_dev;
+};
+
+struct ecore_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
+struct ecore_sb_cnt_info {
+ int sb_cnt;
+ int sb_iov_cnt;
+ int sb_free_blk;
+};
+
+static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
+{
+ u32 prod = 0;
+ u16 rc = 0;
+
+ /* barrier(); status block is written to by the chip */
+ /* FIXME: need some sort of barrier. */
+ prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
+ STATUS_BLOCK_PROD_INDEX_MASK;
+ if (sb_info->sb_ack != prod) {
+ sb_info->sb_ack = prod;
+ rc |= ECORE_SB_IDX;
+ }
+
+ OSAL_MMIOWB(sb_info->p_dev);
+ return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ * written to the IGU.
+ *
+ * @param sb_info - This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using ecore_sb_init
+ * @param int_cmd - Enable/Disable/Nop
+ * @param upd_flg - whether igu consumer should be
+ * updated.
+ *
+ * @return OSAL_INLINE void
+ */
+static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
+ enum igu_int_cmd int_cmd, u8 upd_flg)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
+ igu_ack.sb_id_and_flags);
+#else
+ DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
+#endif
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ OSAL_MMIOWB(sb_info->p_dev);
+ OSAL_BARRIER(sb_info->p_dev);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
+ data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+{
+ __internal_ram_wr_relaxed(p_hwfn, addr, size, data);
+}
+#else
+static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
+ int size, u32 *data)
+{
+ __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
+}
+#endif
+
+#endif
+
+struct ecore_hwfn;
+struct ecore_ptt;
+
+enum ecore_coalescing_fsm {
+ ECORE_COAL_RX_STATE_MACHINE,
+ ECORE_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief ecore_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ *
+ * @brief ecore_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode);
+
+/**
+ *
+ * @brief ecore_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ *
+ * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_SP_SB_ID 0xffff
+/**
+ * @brief ecore_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use ECORE_SP_SB_ID for SP Status block
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id);
+/**
+ * @brief ecore_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
+
+/**
+ * @brief ecore_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to ECORE_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
+
+/**
+ * @brief ecore_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_sb_cnt_info
+ *
+ * @return
+ */
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param p_dev
+ * @param clr_enable
+ *
+ */
+void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
+
+/**
+ * @brief Read debug information regarding a given SB.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_sb - point to Status block for which we want to get info.
+ * @param p_info - pointer to struct to fill with information regarding SB.
+ *
+ * @return ECORE_SUCCESS if pointer is filled; failure otherwise.
+ */
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info);
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_iov_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_iov_api.h
new file mode 100644
index 00000000..50cb3f2b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_iov_api.h
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SRIOV_API_H__
+#define __ECORE_SRIOV_API_H__
+
+#include "common_hsi.h"
+#include "ecore_status.h"
+
+#define ECORE_ETH_VF_NUM_MAC_FILTERS 1
+#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
+#define ECORE_VF_ARRAY_LENGTH (3)
+
+#define IS_VF(p_dev) ((p_dev)->b_is_vf)
+#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
+#ifdef CONFIG_ECORE_SRIOV
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->p_iov_info))
+#else
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
+
+/* @@@ TBD MichalK - what should this number be*/
+#define ECORE_MAX_VF_CHAINS_PER_PF 16
+
+/* vport update extended feature tlvs flags */
+enum ecore_iov_vport_update_flag {
+ ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
+ ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
+ ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
+ ECORE_IOV_VP_UPDATE_MCAST = 3,
+ ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
+ ECORE_IOV_VP_UPDATE_RSS = 5,
+ ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
+ ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
+ ECORE_IOV_VP_UPDATE_MAX = 8,
+};
+
+/* PF to VF STATUS is part of vfpf-channel API
+ * and must be forward compatible
+*/
+enum ecore_iov_pf_to_vf_status {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
+};
+
+struct ecore_mcp_link_params;
+struct ecore_mcp_link_state;
+struct ecore_mcp_link_capabilities;
+
+/* These defines are used by the hw-channel; should never change order */
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct ecore_vf_acquire_sw_info {
+ u32 driver_version;
+ u8 os_type;
+
+ /* We have several close releases that all use ~same FW with different
+ * versions [making it incompatible as the versioning scheme is still
+ * tied directly to FW version], allow to override the checking. Only
+ * those versions would actually support this feature [so it would not
+ * break forward compatibility with newer HV drivers that are no longer
+ * suited].
+ */
+ bool override_fw_version;
+};
+
+struct ecore_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+};
+
+struct ecore_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+ /* TODO - remove this limitation */
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ u8 vport_id;
+
+ /* Should be set in case RSS is going to be used for VF */
+ u8 rss_eng_id;
+};
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* This is SW channel related only... */
+enum mbx_state {
+ VF_PF_UNKNOWN_STATE = 0,
+ VF_PF_WAIT_FOR_START_REQUEST = 1,
+ VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
+ VF_PF_REQUEST_IN_PROCESSING = 3,
+ VF_PF_RESPONSE_READY = 4,
+};
+
+struct ecore_iov_sw_mbx {
+ enum mbx_state mbx_state;
+
+ u32 request_size;
+ u32 request_offset;
+
+ u32 response_size;
+ u32 response_offset;
+};
+
+/**
+ * @brief Get the vf sw mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return struct ecore_iov_sw_mbx*
+ */
+struct ecore_iov_sw_mbx*
+ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+#endif
+
+/* This struct is part of ecore_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct ecore_hw_sriov_info {
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+#ifdef CONFIG_ECORE_SRIOV
+#ifndef LINUX_REMOVE
+/**
+ * @brief mark/clear all VFs before/after an incoming PCIe sriov
+ * disable.
+ *
+ * @param p_dev
+ * @param to_disable
+ */
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+ u8 to_disable);
+
+/**
+ * @brief mark/clear chosen VF before/after an incoming PCIe
+ * sriov disable.
+ *
+ * @param p_dev
+ * @param rel_vf_id
+ * @param to_disable
+ */
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
+ u16 rel_vf_id,
+ u8 to_disable);
+
+/**
+ * @brief ecore_iov_init_hw_for_vf - initialize the HW for
+ * enabling access of a VF. Also includes preparing the
+ * IGU for VF access. This needs to be called AFTER hw is
+ * initialized and BEFORE VF is loaded inside the VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params
+ *p_params);
+
+/**
+ * @brief ecore_iov_process_mbx_req - process a request received
+ * from the VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid);
+
+/**
+ * @brief ecore_iov_release_hw_for_vf - called once upper layer
+ * knows VF is done with - can release any resources
+ * allocated for VF at this point. this must be done once
+ * we know VF is no longer loaded in VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id);
+
+/**
+ * @brief ecore_iov_set_vf_ctx - set a context for a given VF
+ *
+ * @param p_hwfn
+ * @param vf_id
+ * @param ctx
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 vf_id,
+ void *ctx);
+
+/**
+ * @brief FLR cleanup for all VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief FLR cleanup for single VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id);
+
+/**
+ * @brief Update the bulletin with link information. Notice this does NOT
+ * send a bulletin update, only updates the PF's bulletin.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param params - the link params to use for the VF link configuration
+ * @param link - the link output to use for the VF link configuration
+ * @param p_caps - the link default capabilities.
+ */
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief Returns link information as perceived by VF.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_params - the link params visible to vf.
+ * @param p_link - the link state visible to vf.
+ * @param p_caps - the link default capabilities visible to vf.
+ */
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief return if the VF is pending FLR
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return bool
+ */
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled VF id is valid
+ * else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+/**
+ * @brief Get VF's public info structure
+ *
+ * @param p_hwfn
+ * @param vfid - Relative VF ID
+ * @param b_enabled_only - false if want to access even if vf is disabled
+ *
+ * @return struct ecore_public_vf_info *
+ */
+struct ecore_public_vf_info*
+ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 vfid, bool b_enabled_only);
+
+/**
+ * @brief Set pending events bitmap for given @vfid
+ *
+ * @param p_hwfn
+ * @param vfid
+ */
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+ * @brief Copy pending events bitmap in @events and clear
+ * original copy of events
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events);
+
+/**
+ * @brief Copy VF's message to PF's buffer
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param vfid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *ptt,
+ int vfid);
+/**
+ * @brief Set forced MAC address in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid);
+
+/**
+ * @brief Set MAC address in PFs copy of bulletin board without
+ * configuring FW/HW.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid);
+
+/**
+ * @brief Set default behaviour of VF in case no vlans are configured for it
+ * whether to accept only untagged traffic or all.
+ * Must be called prior to the VF vport-start.
+ *
+ * @param p_hwfn
+ * @param b_untagged_only
+ * @param vfid
+ *
+ * @return ECORE_SUCCESS if configuration would stick.
+ */
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+ bool b_untagged_only,
+ int vfid);
+
+/**
+ * @brief Get VFs opaque fid.
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param opaque_fid
+ */
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 *opaque_fid);
+
+/**
+ * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ * Setting of pvid 0 would clear the feature.
+ * @param p_hwfn
+ * @param pvid
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid);
+
+/**
+ * @brief Check if VF has VPORT instance. This can be used
+ * to check if VPORT is active.
+ *
+ * @param p_hwfn
+ */
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief PF posts the bulletin to the VF
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+ int vfid,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Check if given VF (@vfid) is marked as stopped
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool : true if stopped
+ */
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Configure VF anti spoofing
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param val - spoofchk value - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ int vfid, bool val);
+
+/**
+ * @brief Get VF's configured spoof value.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - spoofchk value - true/false
+ */
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Check for SRIOV sanity by PF.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - true if sanity checks passes, else false
+ */
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Get the num of VF chains.
+ *
+ * @param p_hwfn
+ *
+ * @return u8
+ */
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Get vf request mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_req_virt_addr
+ * @param p_req_virt_size
+ */
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_req_virt_addr,
+ u16 *p_req_virt_size);
+
+/**
+ * @brief Get vf mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_reply_virt_addr
+ * @param p_reply_virt_size
+ */
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_reply_virt_addr,
+ u16 *p_reply_virt_size);
+
+/**
+ * @brief Validate if the given length is a valid vfpf message
+ * length
+ *
+ * @param length
+ *
+ * @return bool
+ */
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
+
+/**
+ * @brief Return the max pfvf message length
+ *
+ * @return u32
+ */
+u32 ecore_iov_pfvf_msg_length(void);
+
+/**
+ * @brief Returns forced MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief Returns pvid if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return 0 if no pvid is configured, otherwise the pvid.
+ */
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+/**
+ * @brief Configure VFs tx rate
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param val - tx rate value in Mb/sec.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid, int val);
+
+/**
+ * @brief - Retrieves the statistics associated with a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param p_stats - this will be filled with the VF statistics
+ *
+ * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
+ */
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid,
+ struct ecore_eth_stats *p_stats);
+
+/**
+ * @brief - Retrieves num of rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return num of rxqs chains.
+ */
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves num of active rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves ctx pointer
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves VF`s num sbs
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is waiting for acquire
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired but not initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired and initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF has started in FW
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return - rate in Mbps
+ */
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+#endif
+
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port);
+#endif /* CONFIG_ECORE_SRIOV */
+
+#define ecore_for_each_vf(_p_hwfn, _i) \
+ for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < E4_MAX_NUM_VFS; \
+ _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_iro.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_iro.h
new file mode 100644
index 00000000..b4bfe89f
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_iro.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_H__
+#define __IRO_H__
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) (IRO[2].base + \
+ ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[3].base + \
+ ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) (IRO[6].base + \
+ ((queue_zone_id) * IRO[6].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + \
+ ((queue_zone_id) * IRO[7].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[14].base + \
+ ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[18].base + \
+ ((stat_counter_id) * IRO[18].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[19].base + \
+ ((queue_id) * IRO[19].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[20].base + \
+ ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[22].base + ((pf_id) * IRO[22].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[23].base + \
+ ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \
+ ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[27].base + \
+ ((ethType_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[30].base + \
+ ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[31].base + \
+ ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
+ ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[33].base + \
+ ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[34].base + \
+ ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[35].base + \
+ ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
+ ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[37].base + \
+ ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
+ ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
+ ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[40].base + \
+ ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
+ ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
+ ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[43].base + \
+ ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[44].base + \
+ ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
+ ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
+ ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
+ ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+
+#endif /* __IRO_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_iro_values.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_iro_values.h
new file mode 100644
index 00000000..6764bfa6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_iro_values.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_VALUES_H__
+#define __IRO_VALUES_H__
+
+static const struct iro iro_arr[49] = {
+/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
+ { 0x0, 0x0, 0x0, 0x0, 0x8},
+/* TSTORM_PORT_STAT_OFFSET(port_id) */
+ { 0x4cb0, 0x80, 0x0, 0x0, 0x80},
+/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
+ { 0x6518, 0x20, 0x0, 0x0, 0x20},
+/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
+ { 0xb00, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
+ { 0xa80, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_EQE_CONS_OFFSET(pf_id) */
+ { 0x0, 0x8, 0x0, 0x0, 0x2},
+/* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) */
+ { 0x80, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
+ { 0x84, 0x8, 0x0, 0x0, 0x2},
+/* XSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4bc0, 0x0, 0x0, 0x0, 0x78},
+/* YSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x3df0, 0x0, 0x0, 0x0, 0x78},
+/* PSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x29b0, 0x0, 0x0, 0x0, 0x78},
+/* TSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4c38, 0x0, 0x0, 0x0, 0x78},
+/* MSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4990, 0x0, 0x0, 0x0, 0x78},
+/* USTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x7e48, 0x0, 0x0, 0x0, 0x78},
+/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
+ { 0xa28, 0x8, 0x0, 0x0, 0x8},
+/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
+ { 0x61f8, 0x10, 0x0, 0x0, 0x10},
+/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
+ { 0xb820, 0x30, 0x0, 0x0, 0x30},
+/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
+ { 0x95b8, 0x30, 0x0, 0x0, 0x30},
+/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x4b60, 0x80, 0x0, 0x0, 0x40},
+/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
+ { 0x1f8, 0x4, 0x0, 0x0, 0x4},
+/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
+ { 0x53a0, 0x80, 0x4, 0x0, 0x4},
+/* MSTORM_TPA_TIMEOUT_US_OFFSET */
+ { 0xc7c8, 0x0, 0x0, 0x0, 0x4},
+/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0x4ba0, 0x80, 0x0, 0x0, 0x20},
+/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x8050, 0x40, 0x0, 0x0, 0x30},
+/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0xe770, 0x60, 0x0, 0x0, 0x60},
+/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x2b48, 0x80, 0x0, 0x0, 0x38},
+/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0xf1b0, 0x78, 0x0, 0x0, 0x78},
+/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
+ { 0x1f8, 0x4, 0x0, 0x0, 0x4},
+/* TSTORM_ETH_PRS_INPUT_OFFSET */
+ { 0xaef8, 0x0, 0x0, 0x0, 0xf0},
+/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
+ { 0xafe8, 0x8, 0x0, 0x0, 0x8},
+/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
+ { 0x1f8, 0x8, 0x0, 0x0, 0x8},
+/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
+ { 0xac0, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
+ { 0x2578, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
+ { 0x24f8, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
+ { 0x0, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
+ { 0x200, 0x10, 0x8, 0x0, 0x8},
+/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
+ { 0xb78, 0x10, 0x8, 0x0, 0x2},
+/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0xd9a8, 0x38, 0x0, 0x0, 0x24},
+/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0x12988, 0x10, 0x0, 0x0, 0x8},
+/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0x11aa0, 0x38, 0x0, 0x0, 0x18},
+/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0xa8c0, 0x38, 0x0, 0x0, 0x10},
+/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0x86f8, 0x30, 0x0, 0x0, 0x18},
+/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0x101f8, 0x10, 0x0, 0x0, 0x10},
+/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
+ { 0xde28, 0x48, 0x0, 0x0, 0x38},
+/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
+ { 0x10660, 0x20, 0x0, 0x0, 0x20},
+/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
+ { 0x2b80, 0x80, 0x0, 0x0, 0x10},
+/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
+ { 0x5020, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
+ { 0xc9b0, 0x30, 0x0, 0x0, 0x10},
+/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
+ { 0xeec0, 0x10, 0x0, 0x0, 0x10},
+};
+
+#endif /* __IRO_VALUES_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_l2.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_l2.c
new file mode 100644
index 00000000..4ab8fd5f
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_l2.c
@@ -0,0 +1,2104 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_l2.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_vf.h"
+#include "ecore_sriov.h"
+#include "ecore_mcp.h"
+
+#define ECORE_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+struct ecore_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ osal_mutex_t lock;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return ECORE_SUCCESS;
+
+ p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
+ if (!p_l2_info)
+ return ECORE_NOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ ecore_vf_get_num_rxqs(p_hwfn, &rx);
+ ecore_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
+ }
+
+ pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
+ sizeof(unsigned long *) *
+ p_l2_info->queues);
+ if (pp_qids == OSAL_NULL)
+ return ECORE_NOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
+ MAX_QUEUES_PER_QZONE / 8);
+ if (pp_qids[i] == OSAL_NULL)
+ return ECORE_NOMEM;
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
+{
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_l2_free(struct ecore_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ if (p_hwfn->p_l2_info == OSAL_NULL)
+ return;
+
+ if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
+ break;
+ OSAL_VFREE(p_hwfn->p_dev,
+ p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ /* Lock is last to initialize, if everything else was */
+ if (i == p_hwfn->p_l2_info->queues)
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
+#endif
+
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = OSAL_NULL;
+}
+
+/* TODO - we'll need locking around these... */
+static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
+
+ if (queue_id > p_l2_info->queues) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ OSAL_MUTEX_RELEASE(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
+
+ OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ /* For VF-queues, stuff is a bit complicated as:
+ * - They always maintain the qid_usage on their own.
+ * - In legacy mode, they also maintain their CIDs.
+ */
+
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+ if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+ if (!p_cid->b_legacy_vf)
+ ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+static struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+ if (p_cid == OSAL_NULL)
+ return OSAL_NULL;
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params != OSAL_NULL) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->b_legacy_vf = p_vf_params->b_legacy;
+ } else {
+ p_cid->vfid = ECORE_QUEUE_CID_PF;
+ }
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->p_dev)) {
+ p_cid->abs = p_cid->rel;
+
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * The would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+ &p_cid->abs.queue_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+ /* SBs relevant information was already provided as absolute */
+ p_cid->abs.sb = p_cid->rel.sb;
+ p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->rel.vport_id, p_cid->abs.vport_id,
+ p_cid->rel.queue_id, p_cid->qid_usage_idx,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id, p_cid->abs.stats_id,
+ p_cid->abs.sb, p_cid->abs.sb_idx);
+
+ return p_cid;
+
+fail:
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+ return OSAL_NULL;
+}
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ u8 vfid = ECORE_CXT_PF_CID;
+ bool b_legacy_vf = false;
+ u32 cid = 0;
+
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->b_legacy) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
+ if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return OSAL_NULL;
+ }
+ }
+
+ p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, p_vf_params);
+ if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, cid, vfid);
+
+ return p_cid;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params)
+{
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params)
+{
+ struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u16 rx_mode = 0, tx_err = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
+ p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+
+ /* Handle requests for strict behavior on transmission errors */
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
+ p_params->b_err_illegal_vlan_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
+ p_params->b_err_small_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
+ p_params->b_err_anti_spoof ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
+ p_params->b_err_illegal_inband_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
+ p_params->b_err_vlan_insert_with_inband ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
+ p_params->b_err_big_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
+ p_params->b_err_ctrl_frame ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
+
+ /* TPA related fields */
+ OSAL_MEMSET(&p_ramrod->tpa_param, 0,
+ sizeof(struct eth_vport_tpa_param));
+ p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case ECORE_TPA_MODE_GRO:
+ p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+ p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
+ p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+ p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ p_ramrod->tx_switching_en = 0;
+#endif
+
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
+ p_params->concrete_fid);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+
+ return ecore_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static enum _ecore_status_t
+ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_rss_params *p_rss)
+{
+ struct eth_vport_rss_config *p_config;
+ int i, table_size;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!p_rss) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+ p_config = &p_ramrod->rss_config;
+
+ OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+ p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+ p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+ p_config->update_rss_key = p_rss->update_rss_key;
+
+ p_config->rss_mode = p_rss->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
+
+ p_config->capabilities = 0;
+
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
+ p_config->tbl_size = p_rss->rss_table_size_log;
+ p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ p_config->rss_mode,
+ p_config->update_rss_capabilities,
+ p_config->capabilities,
+ p_config->update_rss_ind_table, p_config->update_rss_key);
+
+ table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return ECORE_INVAL;
+
+ p_config->indirection_table[i] =
+ OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
+ }
+
+ for (i = 0; i < 10; i++)
+ p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
+
+ return rc;
+}
+
+static void
+ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+#ifndef ASIC_ONLY
+ /* On B0 emulation we cannot enable Tx, since this would cause writes
+ * to PVFC HW block which isn't implemented in emulation.
+ */
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Non-Asic - prevent Tx mode in vport update\n");
+ p_ramrod->common.update_tx_mode_flg = 0;
+ }
+#endif
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+ p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & ECORE_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & ECORE_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
+ }
+}
+
+static void
+ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ p_tpa->tpa_max_size = p_params->tpa_max_size;
+ p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
+ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ int i;
+
+ OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (!p_params->update_approx_mcast_flg)
+ return;
+
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ }
+}
+
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct ecore_sp_init_data init_data;
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ u8 abs_vport_id = 0, val;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+ p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (p_ramrod->common.tx_switching_en ||
+ p_ramrod->common.update_tx_switching_en_flg) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA - why are we seeing tx-switching? Overriding it\n");
+ p_ramrod->common.tx_switching_en = 0;
+ p_ramrod->common.update_tx_switching_en_flg = 1;
+ }
+#endif
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
+ rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc != ECORE_SUCCESS) {
+ /* Return spq entry which is taken in ecore_sp_init_request()*/
+ ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
+ p_params->sge_tpa_params);
+ if (p_params->mtu) {
+ p_ramrod->common.update_mtu_flg = 1;
+ p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+ }
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u8 vport_id)
+{
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct ecore_sp_init_data init_data;
+ struct ecore_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_vport_stop(p_hwfn);
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_accept_flags *p_accept_flags)
+{
+ struct ecore_sp_vport_update_params s_params;
+
+ OSAL_MEMSET(&s_params, 0, sizeof(s_params));
+ OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct ecore_filter_accept_flags));
+
+ return ecore_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+ vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ vport_update_params.accept_any_vlan = accept_any_vlan;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ continue;
+ }
+
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+
+ if (update_accept_any_vlan)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "accept_any_vlan=%d configured\n",
+ accept_any_vlan);
+ }
+
+ return 0;
+}
+
+enum _ecore_status_t
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+ p_cid->abs.vport_id, p_cid->abs.sb);
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+ if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Queue%s is meant for VF rxq[%02x]\n",
+ !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+ }
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod)
+{
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+
+ return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ /* Allocate a CID for the queue */
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ else
+ rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ &p_ret_params->p_prod);
+
+ /* Provide the caller with a reference to as handler */
+ if (rc != ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+ void **pp_rxq_handles,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_rxqs_update(p_hwfn,
+ (struct ecore_queue_cid **)
+ pp_rxq_handles,
+ num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg);
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
+
+ /* Get SPQ entry */
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool b_eq_completion_only,
+ bool b_cqe_completion)
+{
+ struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
+ b_eq_completion_only;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+
+ p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
+
+ p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell)
+{
+ enum _ecore_status_t rc;
+
+ /* TODO - set tc in the pq_params for multi-cos */
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = (u8 OSAL_IOMEM *)
+ p_hwfn->doorbells +
+ DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_INVAL;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+
+ if (rc != ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
+}
+
+static enum eth_filter_action
+ecore_filter_action(enum ecore_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case ECORE_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case ECORE_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case ECORE_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REMOVE_ALL;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static enum _ecore_status_t
+ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct ecore_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ struct ecore_sp_init_data init_data;
+ enum eth_filter_action action;
+ enum _ecore_status_t rc;
+
+ rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, pp_ent,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Non-Asic - prevent Tx filters\n");
+ p_ramrod->filter_cmd_hdr.tx = 0;
+ }
+#endif
+
+ switch (p_filter_cmd->opcode) {
+ case ECORE_FILTER_REPLACE:
+ case ECORE_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
+ break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
+ break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case ECORE_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC;
+ break;
+ case ECORE_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN;
+ break;
+ case ECORE_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR;
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
+ break;
+ case ECORE_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI;
+ break;
+ case ECORE_FILTER_UNUSED: /* @DPDK */
+ p_first_filter->type = MAX_ETH_FILTER_TYPE;
+ break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
+ ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
+ p_first_filter->vport_id = vport_to_add_to;
+ OSAL_MEMCPY(p_second_filter, p_first_filter,
+ sizeof(*p_second_filter));
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ } else {
+ action = ecore_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn, true,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return ECORE_NOTIMPL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id =
+ (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+ vport_to_remove_from : vport_to_add_to;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct eth_filter_cmd_header *p_header;
+ enum _ecore_status_t rc;
+
+ rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0], p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2], p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4], p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return ECORE_SUCCESS;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 ecore_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length, u32 crc32_seed, u8 complement)
+{
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
+
+ if ((crc32_packet == OSAL_NULL) ||
+ (crc32_length == 0) || ((crc32_length % 8) != 0)) {
+ return crc32_result;
+ }
+
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1;
+ }
+ }
+ }
+
+ return crc32_result;
+}
+
+static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+u8 ecore_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static enum _ecore_status_t
+ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc;
+ int i;
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
+ rc = ecore_fw_vport(p_hwfn,
+ p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ else
+ rc = ecore_fw_vport(p_hwfn,
+ p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
+ 0, sizeof(p_ramrod->approx_mcast.bins));
+ OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport.
+ */
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ OSAL_SET_BIT(bit, bins);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
+ u32 *p_bins = (u32 *)bins;
+
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
+ (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
+ return ECORE_INVAL;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u16 opaque_fid;
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_eth_filter_mcast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u16 opaque_fid;
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+/* Statistics related code */
+static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
+}
+
+static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
+
+ OSAL_MEMSET(&tstats, 0, sizeof(tstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+ p_stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
+}
+
+static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
+}
+
+static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+ p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats)
+{
+ struct port_stats port_stats;
+ int j;
+
+ OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_stats->rx_64_byte_packets += port_stats.eth.r64;
+ p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
+ p_stats->rx_crc_errors += port_stats.eth.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_stats->rx_pause_frames += port_stats.eth.rxpf;
+ p_stats->rx_pfc_frames += port_stats.eth.rxpp;
+ p_stats->rx_align_errors += port_stats.eth.raln;
+ p_stats->rx_carrier_errors += port_stats.eth.rfcr;
+ p_stats->rx_oversize_packets += port_stats.eth.rovr;
+ p_stats->rx_jabbers += port_stats.eth.rjbr;
+ p_stats->rx_undersize_packets += port_stats.eth.rund;
+ p_stats->rx_fragments += port_stats.eth.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.eth.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
+ p_stats->tx_pause_frames += port_stats.eth.txpf;
+ p_stats->tx_pfc_frames += port_stats.eth.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
+ p_stats->tx_total_collisions += port_stats.eth.tncl;
+ p_stats->rx_mac_bytes += port_stats.eth.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_stats->tx_mac_bytes += port_stats.eth.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ for (j = 0; j < 8; j++) {
+ p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+}
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats)
+{
+ __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+#ifndef ASIC_ONLY
+ /* Avoid getting PORT stats for emulation. */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return;
+#endif
+
+ if (b_get_port_stats && p_hwfn->mcp_info)
+ __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ OSAL_MEMSET(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+ ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+
+ if (IS_PF(p_dev)) {
+ /* The main vport index is relative first */
+ if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
+ }
+
+ if (IS_PF(p_dev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ IS_PF(p_dev) ? true : false);
+
+out:
+ if (IS_PF(p_dev) && p_ptt)
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats)
+{
+ u32 i;
+
+ if (!p_dev) {
+ OSAL_MEMSET(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _ecore_get_vport_stats(p_dev, stats);
+
+ if (!p_dev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void ecore_reset_vport_stats(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+ ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+ u32 addr = 0, len = 0;
+
+ if (IS_PF(p_dev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+ __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+ __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+ __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ if (IS_PF(p_dev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!p_dev->reset_stats)
+ DP_INFO(p_dev, "Reset stats not allocated\n");
+ else
+ _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+}
+
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+ }
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
+ p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing",
+ (unsigned long)p_addr, length);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_l2.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_l2.h
new file mode 100644
index 00000000..7fe4cbcb
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_l2.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_H__
+#define __ECORE_L2_H__
+
+
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_spq.h"
+#include "ecore_l2_api.h"
+
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define ECORE_QUEUE_CID_PF (0xff)
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
+ */
+struct ecore_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ bool b_legacy;
+
+ /* For VFs, this index arrives via TLV to diffrentiate between
+ * different queues opened on the same qzone, and is passed
+ * [where the PF would have allocated it internally for its own].
+ */
+ u8 qid_usage_idx;
+};
+
+struct ecore_queue_cid {
+ /* 'Relative' is a relative term ;-). Usually the indices [not counting
+ * SBs] would be PF-relative, but there are some cases where that isn't
+ * the case - specifically for a PF configuring its VF indices it's
+ * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+ */
+ struct ecore_queue_start_common_params rel;
+ struct ecore_queue_start_common_params abs;
+ u32 cid;
+ u16 opaque_fid;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ u8 vfid;
+ u8 vf_qid;
+
+ /* We need an additional index to diffrentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to diffrentiate].
+ */
+ u8 qid_usage_idx;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ bool b_legacy_vf;
+
+ struct ecore_hwfn *p_owner;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
+void ecore_l2_free(struct ecore_hwfn *p_hwfn);
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params);
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id);
+
+u8 ecore_mcast_bin_from_mac(u8 *mac);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_ptt
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ it with cookie and callback function address, if not
+ using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_l2_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_l2_api.h
new file mode 100644
index 00000000..d09f3c4a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_l2_api.h
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_API_H__
+#define __ECORE_L2_API_H__
+
+#include "ecore_status.h"
+#include "ecore_sp_api.h"
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_rss_caps {
+ ECORE_RSS_IPV4 = 0x1,
+ ECORE_RSS_IPV6 = 0x2,
+ ECORE_RSS_IPV4_TCP = 0x4,
+ ECORE_RSS_IPV6_TCP = 0x8,
+ ECORE_RSS_IPV4_UDP = 0x10,
+ ECORE_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define ECORE_RSS_IND_TABLE_SIZE 128
+#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
+#endif
+
+struct ecore_queue_start_common_params {
+ /* Should always be relative to entity sending this. */
+ u8 vport_id;
+ u16 queue_id;
+
+ /* Relative, but relevant only for PFs */
+ u8 stats_id;
+
+ /* These are always absolute */
+ u16 sb;
+ u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+ void OSAL_IOMEM *p_prod;
+ void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+ void OSAL_IOMEM *p_doorbell;
+ void *p_handle;
+};
+
+struct ecore_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ u32 rss_key[ECORE_RSS_KEY_SIZE];
+};
+
+struct ecore_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum ecore_filter_opcode {
+ ECORE_FILTER_ADD,
+ ECORE_FILTER_REMOVE,
+ ECORE_FILTER_MOVE,
+ ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ ECORE_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum ecore_filter_ucast_type {
+ ECORE_FILTER_MAC,
+ ECORE_FILTER_VLAN,
+ ECORE_FILTER_MAC_VLAN,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_FILTER_VNI,
+ ECORE_FILTER_UNUSED, /* @DPDK */
+};
+
+struct ecore_filter_ucast {
+ enum ecore_filter_opcode opcode;
+ enum ecore_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct ecore_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum ecore_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define ECORE_MAX_MC_ADDRS 64
+ unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct ecore_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define ECORE_ACCEPT_NONE 0x01
+#define ECORE_ACCEPT_UCAST_MATCHED 0x02
+#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
+#define ECORE_ACCEPT_MCAST_MATCHED 0x08
+#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
+#define ECORE_ACCEPT_BCAST 0x20
+};
+
+struct ecore_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable; /* Enable or disable arfs mode */
+};
+
+/* Add / remove / move / remove-all unicast MAC-VLAN filters.
+ * FW will assert in the following cases, so driver should take care...:
+ * 1. Adding a filter to a full table.
+ * 2. Adding a filter which already exists on that vport.
+ * 3. Removing a filter which doesn't exist.
+ */
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/* Add / remove / move multicast MAC filters. */
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/* Set "accept" filters */
+enum _ecore_status_t
+ecore_filter_accept_cmd(
+ struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
+ *
+ * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
+ * the VPort ID is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @p_params Inputs; Relative for PF [SB being an exception]
+ * @param bd_max_bytes Maximum bytes that can be placed on a BD
+ * @param bd_chain_phys_addr Physical address of BDs for receive.
+ * @param cqe_pbl_addr Physical address of the CQE PBL Table.
+ * @param cqe_pbl_size Size of the CQE PBL Table
+ * @param p_ret_params Pointed struct to be filled with outputs.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params);
+
+/**
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
+ *
+ * @param p_hwfn
+ * @param p_rxq Handler of queue to close
+ * @param eq_completion_only If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @param cqe_completion If True completion will be
+ * receive on CQe.
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion);
+
+/**
+ * @brief - TX Queue Start Ramrod
+ *
+ * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
+ * the VPort is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @p_params
+ * @param tc traffic class to use with this L2 txq
+ * @param pbl_addr address of the pbl array
+ * @param pbl_size number of entries in pbl
+ * @param p_ret_params Pointer to fill the return parameters in.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params);
+
+/**
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_txq - handle to Tx queue needed to be closed
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_txq);
+
+enum ecore_tpa_mode {
+ ECORE_TPA_MODE_NONE,
+ ECORE_TPA_MODE_RSC,
+ ECORE_TPA_MODE_GRO,
+ ECORE_TPA_MODE_MAX
+};
+
+struct ecore_sp_vport_start_params {
+ enum ecore_tpa_mode tpa_mode;
+ bool remove_inner_vlan; /* Inner VLAN removal is enabled */
+ bool tx_switching; /* Vport supports tx-switching */
+ bool handle_ptp_pkts; /* Handle PTP packets */
+ bool only_untagged; /* Untagged pkt control */
+ bool drop_ttl0; /* Drop packets with TTL = 0 */
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id; /* VPORT ID */
+ u16 mtu; /* VPORT MTU */
+ bool zero_placement_offset;
+ bool check_mac;
+ bool check_ethtype;
+
+ /* Strict behavior on transmission errors */
+ bool b_err_illegal_vlan_mode;
+ bool b_err_illegal_inband_mode;
+ bool b_err_vlan_insert_with_inband;
+ bool b_err_small_pkt;
+ bool b_err_big_pkt;
+ bool b_err_anti_spoof;
+ bool b_err_ctrl_frame;
+};
+
+/**
+ * @brief ecore_sp_vport_start -
+ *
+ * This ramrod initializes a VPort. An Assert if generated if the Function ID
+ * of the VPort is not enabled.
+ *
+ * @param p_hwfn
+ * @param p_params VPORT start params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params);
+
+struct ecore_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct ecore_rss_params *rss_params;
+ struct ecore_filter_accept_flags accept_flags;
+ struct ecore_sge_tpa_params *sge_tpa_params;
+ /* MTU change - notice this requires the vport to be disabled.
+ * If non-zero, value would be used.
+ */
+ u16 mtu;
+};
+
+/**
+ * @brief ecore_sp_vport_update -
+ *
+ * This ramrod updates the parameters of the VPort. Every field can be updated
+ * independently, according to flags.
+ *
+ * This ramrod is also used to set the VPort state to active after creation.
+ * An Assert is generated if the VPort does not contain an RX queue.
+ *
+ * @param p_hwfn
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+/**
+ * @brief ecore_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 vport_id);
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+ void **pp_rxq_handlers,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats);
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats);
+
+void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+
+/**
+ *@brief ecore_arfs_mode_configure -
+ *
+ *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ *and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ *@param p_hwfn
+ *@param p_ptt
+ *@param p_cfg_params arfs mode configuration parameters.
+ *
+ */
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params);
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.c
new file mode 100644
index 00000000..a834ac74
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.c
@@ -0,0 +1,3208 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_iov_api.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_dcbx.h"
+
+#define CHIP_MCP_RESP_ITER_US 10
+#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+
+#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ OFFSETOF(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ OFFSETOF(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+#ifndef ASIC_ONLY
+static int loaded;
+static int loaded_port[MAX_NUM_PORTS] = { 0 };
+#endif
+
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ OSAL_BE32 tmp;
+ u32 i;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
+ return;
+#endif
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ OSAL_BE32_TO_CPU(tmp);
+ }
+}
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
+ p_info->public_base = 0;
+ return ECORE_INVAL;
+ }
+#endif
+
+ p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return ECORE_INVAL;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
+ " mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->mcp_info));
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return ECORE_SUCCESS;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ /* Initialize the MFW spinlock */
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->lock);
+
+ return ECORE_SUCCESS;
+
+err:
+ DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+ ecore_mcp_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+/* Locks the MFW mailbox of a PF to ensure a single access.
+ * The lock is achieved in most cases by holding a spinlock, causing other
+ * threads to wait till a previous access is done.
+ * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
+ * access is achieved by setting a blocking flag, which will fail other
+ * competing contexts to send their mailboxes.
+ */
+static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
+ u32 cmd)
+{
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+ /* The spinlock shouldn't be acquired when the mailbox command is
+ * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
+ * pending [UN]LOAD_REQ command of another PF together with a spinlock
+ * (i.e. interrupts are disabled) - can lead to a deadlock.
+ * It is assumed that for a single PF, no other mailbox commands can be
+ * sent from another context while sending LOAD_REQ, and that any
+ * parallel commands to UNLOAD_REQ can be cancelled.
+ */
+ if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (p_hwfn->mcp_info->block_mb_sending) {
+ DP_NOTICE(p_hwfn, false,
+ "Trying to send a MFW mailbox command [0x%x]"
+ " in parallel to [UN]LOAD_REQ. Aborting.\n",
+ cmd);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ return ECORE_BUSY;
+ }
+
+ if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ p_hwfn->mcp_info->block_mb_sending = true;
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
+{
+ if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+}
+
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 org_mcp_reset_seq, cnt = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Set drv command along with the updated sequence */
+ org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ OSAL_UDELAY(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < ECORE_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = ECORE_AGAIN;
+ }
+
+ ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd, u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
+ u32 seq, cnt = 1, actual_mb_seq;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
+#endif
+
+ /* Get actual driver mailbox sequence */
+ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Use MCP history register to check if MCP reset occurred between
+ * init time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist !=
+ ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+ /* Set drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+ /* Set drv command along with the updated sequence */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+ do {
+ /* Wait for MFW response */
+ OSAL_UDELAY(delay);
+ *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+ (cnt++ < max_retries));
+
+ /* Is this a reply to our command? */
+ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+ *o_mcp_resp &= FW_MSG_CODE_MASK;
+ /* Get the MCP param */
+ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+ } else {
+ /* FW BUG! */
+ DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+ cmd, param);
+ *o_mcp_resp = 0;
+ rc = ECORE_AGAIN;
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ }
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+ enum _ecore_status_t rc;
+
+ /* MCP not initialized */
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ return ECORE_BUSY;
+ }
+
+ if (p_mb_params->data_src_size > sizeof(union_data) ||
+ p_mb_params->data_dst_size > sizeof(union_data)) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size, p_mb_params->data_dst_size,
+ sizeof(union_data));
+ return ECORE_INVAL;
+ }
+
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
+ p_mb_params->param, &p_mb_params->mcp_resp,
+ &p_mb_params->mcp_param);
+
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size)
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+
+ ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+ u32 *o_mcp_resp, u32 *o_mcp_param)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ loaded--;
+ loaded_port[p_hwfn->port_id]--;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
+ loaded);
+ }
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size, u32 *i_buf)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf)
+{
+ struct ecore_mcp_mb_params mb_params;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ *o_txn_size = *o_mcp_param;
+ /* @DPDK */
+ OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
+
+ return ECORE_SUCCESS;
+}
+
+#ifndef ASIC_ONLY
+static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
+ u32 *p_load_code)
+{
+ static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ if (!loaded)
+ load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+ else if (!loaded_port[p_hwfn->port_id])
+ load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+ /* On CMT, always tell that it's engine */
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ *p_load_code = load_phase;
+ loaded++;
+ loaded_port[p_hwfn->port_id]++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
+ *p_load_code, loaded, p_hwfn->port_id,
+ loaded_port[p_hwfn->port_id]);
+}
+#endif
+
+static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+{
+ return (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+}
+
+static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
+#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
+#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
+#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
+#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
+#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
+#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
+
+static u32 ecore_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+#ifdef CONFIG_ECORE_L2
+ config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_SRIOV
+ config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ROCE
+ config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_IWARP
+ config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_FCOE
+ config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+ config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_LL2
+ config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
+#endif
+
+ return config_bitmap;
+}
+
+struct ecore_load_req_in_params {
+ u8 hsi_ver;
+#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
+#define ECORE_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct ecore_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static enum _ecore_status_t
+__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_load_req_in_params *p_in_params,
+ struct ecore_load_req_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&load_req, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
+ p_in_params->drv_role);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0, load_req.drv_ver_1,
+ load_req.fw_ver, load_req.misc0,
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_FLAGS0));
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send load request, rc = %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0, load_rsp.drv_ver_1,
+ load_rsp.fw_ver, load_rsp.misc0,
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0,
+ LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
+ enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case ECORE_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case ECORE_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum ecore_load_req_force {
+ ECORE_LOAD_REQ_FORCE_NONE,
+ ECORE_LOAD_REQ_FORCE_PF,
+ ECORE_LOAD_REQ_FORCE_ALL,
+};
+
+static enum _ecore_status_t
+ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
+ enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case ECORE_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case ECORE_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case ECORE_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_load_req_params *p_params)
+{
+ struct ecore_load_req_out_params out_params;
+ struct ecore_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = ECORE_VERSION;
+ in_params.drv_ver_1 = ecore_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
+ */
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
+
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (ecore_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ rc = ecore_get_mfw_force_cmd(p_hwfn,
+ ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ in_params.force_cmd = mfw_force_cmd;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return ECORE_BUSY;
+ }
+ }
+
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
+ */
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ * This is unexpected since a quasi-FLR request was
+ * previously sent as part of ecore_hw_prepare().
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
+ return ECORE_INVAL;
+ }
+ break;
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
+ DP_NOTICE(p_hwfn, false,
+ "MFW refused a load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ return ECORE_BUSY;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ break;
+ }
+
+ p_params->load_code = out_params.load_code;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ /* @DPDK */
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
+static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ ECORE_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Reading Disabled VF information from [offset %08x],"
+ " path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
+ path_addr +
+ OFFSETOF(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ OSAL_VF_FLR_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
+ &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to pass ACK for VF flr to MFW\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* TMP - clear the ACK bits; should be done by MFW */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ ecore_wr(p_hwfn, p_ptt,
+ func_addr +
+ OFFSETOF(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
+static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw"
+ " [Addr 0x%x]\n",
+ transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ transceiver_data)));
+
+ transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
+
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+}
+
+static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_reset)
+{
+ struct ecore_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
+ u32 status = 0;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
+ "Received link update [0x%08x] from mfw"
+ " [Addr 0x%x]\n",
+ status, (u32)(p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Resetting link indications\n");
+ return;
+ }
+
+ if (p_hwfn->b_drv_link_init)
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ else
+ p_link->link_up = false;
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* We never store total line speed as p_link->speed is
+ * again changes according to bandwidth allocation.
+ */
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ /* Mintz bandwidth configuration */
+ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+ p_link->min_pf_rate);
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ OSAL_LINK_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool b_up)
+{
+ struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ struct ecore_mcp_mb_params mb_params;
+ struct eth_phy_cfg phy_cfg;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 cmd;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ /* Set the shmem configuration according to params */
+ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+ p_hwfn->b_drv_link_init = b_up;
+
+ if (b_up)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
+ " adv_speed 0x%08x, loopback 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ /* if mcp fails to respond we must abort */
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Reset the link status if needed */
+ if (!b_up)
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+ return rc;
+}
+
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
+ path_addr +
+ OFFSETOF(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ ecore_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
+ return;
+
+ if (p_dev->recov_in_prog) {
+ DP_NOTICE(p_hwfn, false,
+ "Ignoring the indication since a recovery"
+ " process is already in progress\n");
+ return;
+ }
+
+ p_dev->recov_in_prog = true;
+
+ proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
+
+ OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
+}
+
+static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum ecore_mcp_protocol_type stats_type;
+ union ecore_mcp_protocol_stats stats;
+ struct ecore_mcp_mb_params mb_params;
+ u32 hsi_param;
+ enum _ecore_status_t rc;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = ECORE_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
+}
+
+static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct ecore_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ /* TODO - bandwidth min/max should have valid values of 1-100,
+ * as well as some indication that the feature is disabled.
+ * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
+ * limit and correct value to min `1' and max `100' if limit isn't in
+ * range.
+ */
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data),
+ SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static void
+ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
+
+ ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+ return;
+
+ DP_NOTICE(p_hwfn, false,
+ "Fan failure was detected on the network interface card"
+ " and it's going to be shut down.\n");
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
+}
+
+struct ecore_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static enum _ecore_status_t
+ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_NOTICE(p_hwfn, false,
+ "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 epoch)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
+ mdump_cmd_params.p_data_src = &epoch;
+ mdump_cmd_params.data_src_size = sizeof(epoch);
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+static enum _ecore_status_t
+ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct mdump_config_stc *p_mdump_config)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
+ mdump_cmd_params.p_data_dst = p_mdump_config;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info)
+{
+ u32 addr, global_offsize, global_addr;
+ struct mdump_config_stc mdump_config;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
+ global_addr +
+ OFFSETOF(struct public_global,
+ mdump_reason));
+
+ if (p_mdump_info->reason) {
+ rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_info->version = mdump_config.version;
+ p_mdump_info->config = mdump_config.config;
+ p_mdump_info->epoch = mdump_config.epoc;
+ p_mdump_info->num_of_logs = mdump_config.num_of_logs;
+ p_mdump_info->valid_logs = mdump_config.valid_logs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
+ p_mdump_info->reason, p_mdump_info->version,
+ p_mdump_info->config, p_mdump_info->epoch,
+ p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d\n", p_mdump_info->reason);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+ return;
+
+ DP_NOTICE(p_hwfn, false,
+ "Received a critical error notification from the MFW!\n");
+
+ if (p_hwfn->p_dev->mdump_en) {
+ DP_NOTICE(p_hwfn, false,
+ "Not acknowledging the notification to allow the MFW crash dump\n");
+ p_hwfn->p_dev->mdump_en = false;
+ return;
+ }
+
+ ecore_mcp_mdump_ack(p_hwfn, p_ptt);
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+}
+
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *info = p_hwfn->mcp_info;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ ecore_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ ecore_mcp_update_bw(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = ECORE_INVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ ecore_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32), val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn, false,
+ "Received an MFW message indication but no"
+ " new message!\n");
+ rc = ECORE_INVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id)
+{
+ u32 global_offsize;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return ECORE_SUCCESS;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ global_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver =
+ ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + OFFSETOF(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != OSAL_NULL) {
+ *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) +
+ OFFSETOF(struct public_global,
+ running_bundle_id));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+ u32 *p_media_type)
+{
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ return ECORE_BUSY;
+ }
+
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, media_type));
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+/* @DPDK */
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
+ enum ecore_pci_personality *p_proto)
+{
+ *p_proto = ECORE_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32)*p_proto);
+}
+
+/* @DPDK */
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32)*p_proto, resp, param);
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_info,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
+ ECORE_SUCCESS)
+ ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
+ break;
+ default:
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return ECORE_INVAL;
+ }
+
+ ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ /* TODO - are there protocols for which there's no MAC? */
+ DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
+ }
+
+ /* TODO - are these calculations true for BE machine? */
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ if (info->mtu == 0)
+ info->mtu = 1500;
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x"
+ " protocol %02x BW [%02x - %02x]"
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
+ " node %lx ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ (unsigned long)info->wwn_port,
+ (unsigned long)info->wwn_node, info->ovlan);
+
+ return ECORE_SUCCESS;
+}
+
+struct ecore_mcp_link_params
+*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct ecore_mcp_link_state
+*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
+ p_hwfn->mcp_info->link_output.link_up = true;
+ }
+#endif
+
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ OSAL_MSLEEP(1020);
+
+ return rc;
+}
+
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->func_info;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_nvm_params *params)
+{
+ enum _ecore_status_t rc;
+
+ switch (params->type) {
+ case ECORE_MCP_NVM_RD:
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+ params->nvm_common.offset,
+ &params->nvm_common.resp,
+ &params->nvm_common.param,
+ params->nvm_rd.buf_size,
+ params->nvm_rd.buf);
+ break;
+ case ECORE_MCP_CMD:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+ params->nvm_common.offset,
+ &params->nvm_common.resp,
+ &params->nvm_common.param);
+ break;
+ case ECORE_MCP_NVM_WR:
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+ params->nvm_common.offset,
+ &params->nvm_common.resp,
+ &params->nvm_common.param,
+ params->nvm_wr.buf_size,
+ params->nvm_wr.buf);
+ break;
+ default:
+ rc = ECORE_NOTIMPL;
+ break;
+ }
+ return rc;
+}
+
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 personalities)
+{
+ enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
+ struct public_func shmem_info;
+ int i, count = 0, num_pfs;
+
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+
+ for (i = 0; i < num_pfs; i++) {
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID_BY_REL(p_hwfn, i));
+ if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &protocol) !=
+ ECORE_SUCCESS)
+ continue;
+
+ if ((1 << ((u32)protocol)) & personalities)
+ count++;
+ }
+
+ return count;
+}
+
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
+ return ECORE_INVAL;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ if (p_dev->recov_in_prog) {
+ DP_NOTICE(p_hwfn, false,
+ "Avoid triggering a recovery since such a process"
+ " is already in progress\n");
+ return ECORE_AGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ enum _ecore_status_t rc;
+
+/* Only Leader can configure MSIX, and need to take CMT into account */
+
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return ECORE_SUCCESS;
+ num *= p_hwfn->p_dev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
+ vf_id);
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_version *p_ver)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct drv_version_stc drv_version;
+ u32 num_words, i;
+ void *p_name;
+ OSAL_BE32 val;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
+ drv_version.version = p_ver->version;
+ num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
+ for (i = 0; i < num_words; i++) {
+ /* The driver name is expected to be in a big-endian format */
+ p_name = &p_ver->name[i * sizeof(u32)];
+ val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
+ *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 value, cpu_mode;
+
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+ return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_client client)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (client) {
+ case ECORE_OV_CLIENT_DRV:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+ break;
+ case ECORE_OV_CLIENT_USER:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+ break;
+ case ECORE_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_driver_state drv_state)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (drv_state) {
+ case ECORE_OV_DRIVER_STATE_NOT_LOADED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+ break;
+ case ECORE_OV_DRIVER_STATE_DISABLED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+ break;
+ case ECORE_OV_DRIVER_STATE_ACTIVE:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_fc_npiv_tbl *p_table)
+{
+ return 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mtu)
+{
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_led_mode mode)
+{
+ u32 resp = 0, param = 0, drv_mb_param;
+ enum _ecore_status_t rc;
+
+ switch (mode) {
+ case ECORE_LED_MODE_ON:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+ break;
+ case ECORE_LED_MODE_OFF:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+ break;
+ case ECORE_LED_MODE_RESTORE:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 mask_parities)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not ack mask parity request. Old MFW?\n");
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+ u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 bytes_left, offset, bytes_to_copy, buf_size;
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ bytes_left = len;
+ offset = 0;
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &buf_size;
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
+ while (bytes_left > 0) {
+ bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+ MCP_DRV_NVM_BUF_LEN);
+ params.nvm_common.offset = (addr + offset) |
+ (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
+ params.nvm_rd.buf = (u32 *)(p_buf + offset);
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
+ FW_MSG_CODE_NVM_OK)) {
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (bytes_left % 0x1000 <
+ (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
+ OSAL_MSLEEP(1);
+
+ offset += *params.nvm_rd.buf_size;
+ bytes_left -= *params.nvm_rd.buf_size;
+ }
+
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &len;
+ params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
+ params.nvm_common.offset = addr;
+ params.nvm_rd.buf = (u32 *)p_buf;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_CMD;
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
+ params.nvm_common.offset = addr;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+ u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_CMD;
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ params.nvm_common.offset = addr;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+/* rc receives ECORE_INVAL as default parameter because
+ * it might not enter the while loop if the len is 0
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ u32 buf_idx, buf_size;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_NVM_WR;
+ if (cmd == ECORE_PUT_FILE_DATA)
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ else
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ buf_idx = 0;
+ while (buf_idx < len) {
+ buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ params.nvm_common.offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT)
+ | addr) + buf_idx;
+ params.nvm_wr.buf_size = buf_size;
+ params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS ||
+ ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
+ (params.nvm_common.resp !=
+ FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (buf_idx % 0x1000 >
+ (buf_idx + buf_size) % 0x1000)
+ OSAL_MSLEEP(1);
+
+ buf_idx += buf_size;
+ }
+
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_NVM_WR;
+ params.nvm_wr.buf_size = len;
+ params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
+ DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
+ params.nvm_common.offset = addr;
+ params.nvm_wr.buf = (u32 *)p_buf;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+ u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_CMD;
+ params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
+ params.nvm_common.offset = addr;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 bytes_left, bytes_to_copy, buf_size;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.nvm_common.offset =
+ (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &buf_size;
+ params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
+ while (bytes_left > 0) {
+ bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ params.nvm_rd.buf = (u32 *)(p_buf + offset);
+ params.nvm_common.offset &=
+ (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ params.nvm_common.offset |=
+ ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
+ params.nvm_common.offset |=
+ (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ return ECORE_NODEV;
+ } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ offset += *params.nvm_rd.buf_size;
+ bytes_left -= *params.nvm_rd.buf_size;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 buf_idx, buf_size;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.nvm_common.offset =
+ (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
+ params.type = ECORE_MCP_NVM_WR;
+ params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+ buf_idx = 0;
+ while (buf_idx < len) {
+ buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+ MAX_I2C_TRANSACTION_SIZE);
+ params.nvm_common.offset &=
+ (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ params.nvm_common.offset |=
+ ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
+ params.nvm_common.offset |=
+ (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
+ params.nvm_wr.buf_size = buf_size;
+ params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ return ECORE_NODEV;
+ } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ buf_idx += buf_size;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_val)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 drv_mb_param = 0, rsp;
+
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
+ drv_mb_param, &rsp, gpio_val);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u16 gpio_val)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 drv_mb_param = 0, param, rsp;
+
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_direction,
+ u32 *gpio_ctrl)
+{
+ u32 drv_mb_param = 0, rsp, val = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
+ drv_mb_param, &rsp, &val);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
+ DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
+ *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
+ DRV_MB_PARAM_GPIO_CTRL_SHIFT;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
+{
+ u32 drv_mb_param = 0, rsp;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, num_images);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att, u32 image_index)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 buf_size;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ params.nvm_common.offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
+
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &buf_size;
+ params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
+ params.nvm_rd.buf = (u32 *)p_image_att;
+
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (p_image_att->return_code != 1))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_temperature_info *p_temp_info)
+{
+ struct ecore_temperature_sensor *p_temp_sensor;
+ struct temperature_status_stc mfw_temp_info;
+ struct ecore_mcp_mb_params mb_params;
+ u32 val;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
+ mb_params.p_data_dst = &mfw_temp_info;
+ mb_params.data_dst_size = sizeof(mfw_temp_info);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
+ p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
+ ECORE_MAX_NUM_OF_SENSORS);
+ for (i = 0; i < p_temp_info->num_sensors; i++) {
+ val = mfw_temp_info.sensor[i];
+ p_temp_sensor = &p_temp_info->sensors[i];
+ p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
+ SENSOR_LOCATION_SHIFT;
+ p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
+ THRESHOLD_HIGH_SHIFT;
+ p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
+ CRITICAL_TEMPERATURE_SHIFT;
+ p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
+ CURRENT_TEMP_SHIFT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_mba_versions(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mba_vers *p_mba_vers)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 buf_size;
+
+ OSAL_MEM_ZERO(&params, sizeof(params));
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
+ params.nvm_common.offset = 0;
+ params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
+ params.nvm_rd.buf_size = &buf_size;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ if (buf_size != MCP_DRV_NVM_BUF_LEN)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 *num_events)
+{
+ u32 rsp;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
+ 0, &rsp, (u32 *)num_events);
+}
+
+static enum resource_id_enum
+ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case ECORE_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case ECORE_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case ECORE_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case ECORE_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case ECORE_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case ECORE_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case ECORE_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case ECORE_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case ECORE_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define ECORE_RESC_ALLOC_VERSION_MAJOR 2
+#define ECORE_RESC_ALLOC_VERSION_MINOR 0
+#define ECORE_RESC_ALLOC_VERSION \
+ ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ (ECORE_RESC_ALLOC_VERSION_MINOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+
+struct ecore_resc_alloc_in_params {
+ u32 cmd;
+ enum ecore_resources res_id;
+ u32 resc_max_val;
+};
+
+struct ecore_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static enum _ecore_status_t
+ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_resc_alloc_in_params *p_in_params,
+ struct ecore_resc_alloc_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct resource_info mfw_resc_info;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id));
+ return ECORE_INVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = p_in_params->cmd;
+ mb_params.param = ECORE_RESC_ALLOC_VERSION;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd, p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num, p_out_params->resc_start,
+ p_out_params->vf_resc_num, p_out_params->vf_resc_start,
+ p_out_params->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp,
+ u32 *p_mcp_param)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn, false,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ switch (p_params->timeout) {
+ case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case ECORE_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
+ RESOURCE_CMD_RSP_OWNER);
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ enum _ecore_status_t rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ OSAL_MSLEEP(retry_interval_in_ms);
+ } else {
+ OSAL_UDELAY(p_params->retry_interval);
+ }
+ }
+
+ rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.h
new file mode 100644
index 00000000..37d1835f
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp.h
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_H__
+#define __ECORE_MCP_H__
+
+#include "bcm_osal.h"
+#include "mcp_public.h"
+#include "ecore.h"
+#include "ecore_mcp_api.h"
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in ecore_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->p_dev->num_ports_in_engines * \
+ ecore_device_num_engines((_p_hwfn)->p_dev)))
+
+struct ecore_mcp_info {
+ /* Spinlock used for protecting the access to the MFW mailbox */
+ osal_spinlock_t lock;
+ /* Flag to indicate whether sending a MFW mailbox is forbidden */
+ bool block_mb_sending;
+
+ /* Address of the MCP public area */
+ u32 public_base;
+ /* Address of the driver mailbox */
+ u32 drv_mb_addr;
+ /* Address of the MFW mailbox */
+ u32 mfw_mb_addr;
+ /* Address of the port configuration (link) */
+ u32 port_addr;
+
+ /* Current driver mailbox sequence */
+ u16 drv_mb_seq;
+ /* Current driver pulse sequence */
+ u16 drv_pulse_seq;
+
+ struct ecore_mcp_link_params link_input;
+ struct ecore_mcp_link_state link_output;
+ struct ecore_mcp_link_capabilities link_capabilities;
+
+ struct ecore_mcp_function_info func_info;
+
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u16 mcp_hist;
+};
+
+struct ecore_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+ u32 mcp_param;
+};
+
+struct ecore_drv_tlv_hdr {
+ u8 tlv_type; /* According to the enum below */
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief When MFW doesn't get driver pulse for couple of seconds, at some
+ * threshold before timeout expires, it will generate interrupt
+ * through a dedicated status block (DPSB - Driver Pulse Status
+ * Block), which the driver should respond immediately, by
+ * providing keepalive indication after setting the PTT to the
+ * driver-MFW mailbox. This function is called directly from the
+ * DPC upon receiving the DPSB attention.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successful.
+ */
+enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+enum ecore_drv_role {
+ ECORE_DRV_ROLE_OS,
+ ECORE_DRV_ROLE_KDUMP,
+};
+
+struct ecore_load_req_params {
+ enum ecore_drv_role drv_role;
+ u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
+ bool avoid_eng_reset;
+ u32 load_code;
+};
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *vfs_to_ack);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Sends an NVM write command request to the MFW with
+ * payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ * DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size - Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size,
+ u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size,
+ u32 *o_buf);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num);
+
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 max_bw);
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 min_bw);
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 mask_parities);
+/**
+ * @brief - Sends crash mdump related info to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 epoch);
+
+/**
+ * @brief - Triggers a MFW crash dump procedure.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start);
+
+/**
+ * @brief - Initiates PF FLR
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */
+#define ECORE_MCP_RESC_LOCK_MAX_VAL 31
+
+enum ecore_resc_lock {
+ ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL,
+ /* Locks that the MFW is aware of should be added here downwards */
+
+ /* Ecore only locks should be added here upwards */
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
+};
+
+struct ecore_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0
+#define ECORE_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params);
+
+struct ecore_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params);
+
+#endif /* __ECORE_MCP_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp_api.h
new file mode 100644
index 00000000..190c1352
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_mcp_api.h
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_API_H__
+#define __ECORE_MCP_API_H__
+
+#include "ecore_status.h"
+
+struct ecore_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct ecore_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+struct ecore_mcp_link_params {
+ struct ecore_mcp_link_speed_params speed;
+ struct ecore_mcp_link_pause_params pause;
+ u32 loopback_mode; /* in PMM_LOOPBACK values */
+};
+
+struct ecore_mcp_link_capabilities {
+ u32 speed_capabilities;
+ bool default_speed_autoneg; /* In Mb/s */
+ u32 default_speed; /* In Mb/s */
+};
+
+struct ecore_mcp_link_state {
+ bool link_up;
+
+ u32 line_speed; /* In Mb/s */
+ u32 min_pf_rate; /* In Mb/s */
+ u32 speed; /* In Mb/s */
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0)
+#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1)
+#define ECORE_LINK_PARTNER_SPEED_10G (1 << 2)
+#define ECORE_LINK_PARTNER_SPEED_20G (1 << 3)
+#define ECORE_LINK_PARTNER_SPEED_25G (1 << 4)
+#define ECORE_LINK_PARTNER_SPEED_40G (1 << 5)
+#define ECORE_LINK_PARTNER_SPEED_50G (1 << 6)
+#define ECORE_LINK_PARTNER_SPEED_100G (1 << 7)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define ECORE_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+};
+
+struct ecore_mcp_function_info {
+ u8 pause_on_host;
+
+ enum ecore_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define ECORE_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+
+ u16 mtu;
+};
+
+struct ecore_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct ecore_mcp_nvm_rd {
+ u32 *buf_size;
+ u32 *buf;
+};
+
+struct ecore_mcp_nvm_wr {
+ u32 buf_size;
+ u32 *buf;
+};
+
+struct ecore_mcp_nvm_params {
+#define ECORE_MCP_CMD (1 << 0)
+#define ECORE_MCP_NVM_RD (1 << 1)
+#define ECORE_MCP_NVM_WR (1 << 2)
+ u8 type;
+
+ struct ecore_mcp_nvm_common nvm_common;
+
+ union {
+ struct ecore_mcp_nvm_rd nvm_rd;
+ struct ecore_mcp_nvm_wr nvm_wr;
+ };
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_nvm_images {
+ ECORE_NVM_IMAGE_ISCSI_CFG,
+ ECORE_NVM_IMAGE_FCOE_CFG,
+};
+#endif
+
+struct ecore_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct ecore_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+#ifndef ECORE_PROTO_STATS
+#define ECORE_PROTO_STATS
+struct ecore_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct ecore_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ecore_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum ecore_mcp_protocol_type {
+ ECORE_MCP_LAN_STATS,
+ ECORE_MCP_FCOE_STATS,
+ ECORE_MCP_ISCSI_STATS,
+ ECORE_MCP_RDMA_STATS
+};
+
+union ecore_mcp_protocol_stats {
+ struct ecore_mcp_lan_stats lan_stats;
+ struct ecore_mcp_fcoe_stats fcoe_stats;
+ struct ecore_mcp_iscsi_stats iscsi_stats;
+ struct ecore_mcp_rdma_stats rdma_stats;
+};
+#endif
+
+enum ecore_ov_client {
+ ECORE_OV_CLIENT_DRV,
+ ECORE_OV_CLIENT_USER,
+ ECORE_OV_CLIENT_VENDOR_SPEC
+};
+
+enum ecore_ov_driver_state {
+ ECORE_OV_DRIVER_STATE_NOT_LOADED,
+ ECORE_OV_DRIVER_STATE_DISABLED,
+ ECORE_OV_DRIVER_STATE_ACTIVE
+};
+
+#define ECORE_MAX_NPIV_ENTRIES 128
+#define ECORE_WWN_SIZE 8
+struct ecore_fc_npiv_tbl {
+ u32 count;
+ u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+ u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_led_mode {
+ ECORE_LED_MODE_OFF,
+ ECORE_LED_MODE_ON,
+ ECORE_LED_MODE_RESTORE
+};
+#endif
+
+struct ecore_temperature_sensor {
+ u8 sensor_location;
+ u8 threshold_high;
+ u8 critical;
+ u8 current_temp;
+};
+
+#define ECORE_MAX_NUM_OF_SENSORS 7
+struct ecore_temperature_info {
+ u32 num_sensors;
+ struct ecore_temperature_sensor sensors[ECORE_MAX_NUM_OF_SENSORS];
+};
+
+enum ecore_mba_img_idx {
+ ECORE_MBA_LEGACY_IDX,
+ ECORE_MBA_PCI3CLP_IDX,
+ ECORE_MBA_PCI3_IDX,
+ ECORE_MBA_FCODE_IDX,
+ ECORE_EFI_X86_IDX,
+ ECORE_EFI_IPF_IDX,
+ ECORE_EFI_EBC_IDX,
+ ECORE_EFI_X64_IDX,
+ ECORE_MAX_NUM_OF_ROMIMG
+};
+
+struct ecore_mba_vers {
+ u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG];
+};
+
+enum ecore_mfw_tlv_type {
+ ECORE_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ ECORE_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ ECORE_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ ECORE_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ ECORE_MFW_TLV_MAX = 0x16,
+};
+
+struct ecore_mfw_tlv_generic {
+ u16 feat_flags;
+ bool feat_flags_set;
+ u64 local_mac;
+ bool local_mac_set;
+ u64 additional_mac1;
+ bool additional_mac1_set;
+ u64 additional_mac2;
+ bool additional_mac2_set;
+ u8 drv_state;
+ bool drv_state_set;
+ u8 pxe_progress;
+ bool pxe_progress_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+struct ecore_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+struct ecore_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ u8 crc_err_tstamp[5][14];
+ bool crc_err_tstamp_set[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ u8 flogi_tstamp[14];
+ bool flogi_tstamp_set;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ u8 flogi_acc_tstamp[14];
+ bool flogi_acc_tstamp_set;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ u8 flogi_rjt_tstamp[14];
+ bool flogi_rjt_tstamp_set;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ u8 plogi_tstamp[5][14];
+ bool plogi_tstamp_set[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ u8 plogi_acc_tstamp[5][14];
+ bool plogi_acc_tstamp_set[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ u8 plogo_tstamp[5][14];
+ bool plogo_tstamp_set[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ u8 abts_tstamp[5][14];
+ bool abts_tstamp_set[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ u8 scsi_chk_tstamp[5][14];
+ bool scsi_chk_tstamp_set[5];
+};
+
+struct ecore_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union ecore_mfw_tlv_data {
+ struct ecore_mfw_tlv_generic generic;
+ struct ecore_mfw_tlv_eth eth;
+ struct ecore_mfw_tlv_fcoe fcoe;
+ struct ecore_mfw_tlv_iscsi iscsi;
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param mfw_ver - media type value
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+ u32 *media_type);
+
+/**
+ * @brief - Sends a command to the MCP mailbox.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP
+ * @param param - optional param
+ * @param o_mcp_resp - the MCP response code (exclude sequence)
+ * @param o_mcp_param - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - operation was successful
+ * ECORE_BUSY - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+ u32 *o_mcp_resp, u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief - return the mcp function info of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to mcp function info
+ */
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
+#endif
+
+/**
+ * @brief - Function for reading/manipulating the nvram. Following are supported
+ * functionalities.
+ * 1. Read: Read the specified nvram offset.
+ * input values:
+ * type - ECORE_MCP_NVM_RD
+ * cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
+ * offset - nvm offset
+ *
+ * output values:
+ * buf - buffer
+ * buf_size - buffer size
+ *
+ * 2. Write: Write the data at the specified nvram offset
+ * input values:
+ * type - ECORE_MCP_NVM_WR
+ * cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
+ * offset - nvm offset
+ * buf - buffer
+ * buf_size - buffer size
+ *
+ * 3. Command: Send the NVM command to MCP.
+ * input values:
+ * type - ECORE_MCP_CMD
+ * cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
+ * offset - nvm offset
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param params
+ *
+ * @return ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_nvm_params *params);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief - count number of function with a matching personality on engine.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param personalities - a bitmask of ecore_pci_personality values
+ *
+ * @returns the count of all devices on engine whose personality match one of
+ * the bitsmasks.
+ */
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 personalities);
+#endif
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_version *p_ver);
+
+/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param client - ecore client type
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param drv_state - Driver state
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_driver_state drv_state);
+
+/**
+ * @brief Read NPIV settings form the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_table - Array to hold the FC NPIV data. Client need allocate the
+ * required buffer. The field 'count' specifies number of NPIV
+ * entries. A value of 0 means the table was not populated.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_fc_npiv_tbl *p_table);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mtu - MTU size
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Set LED status
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mode - LED mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_led_mode mode);
+
+/**
+ * @brief Set secure mode
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Write to phy
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Write to nvm
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Put file begin
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Delete file
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Check latest response
+ *
+ * @param p_dev
+ * @param p_buf - nvm write buffer
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
+
+/**
+ * @brief Read from phy
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from nvm
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+ u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf);
+
+/**
+ * @brief Write to sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to write from
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf);
+
+/**
+ * @brief Gpio read
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_val - value read from gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_val);
+
+/**
+ * @brief Gpio write
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_val - value to write to gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u16 gpio_val);
+
+/**
+ * @brief Gpio get information
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_direction - gpio is output (0) or input (1)
+ * @param gpio_ctrl - gpio control is uninitialized (0),
+ * path 0 (1), path 1 (2) or shared(3)
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_direction,
+ u32 *gpio_ctrl);
+
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Bist nvm test - get number of images
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_images - number of images if operation was
+ * successful. 0 if not.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *num_images);
+
+/**
+ * @brief Bist nvm test - get image attributes by index
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_image_att - Attributes of image
+ * @param image_index - Index of image to get information for
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index);
+
+/**
+ * @brief ecore_mcp_get_temperature_info - get the status of the temperature
+ * sensors
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_temp_status - A pointer to an ecore_temperature_info structure to
+ * be filled with the temperature data
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_temperature_info *p_temp_info);
+
+/**
+ * @brief Get MBA versions - get MBA sub images versions
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_mba_vers - MBA versions array to fill
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mba_versions(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mba_vers *p_mba_vers);
+
+/**
+ * @brief Count memory ecc events
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_events - number of memory ecc events
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 *num_events);
+
+struct ecore_mdump_info {
+ u32 reason;
+ u32 version;
+ u32 config;
+ u32 epoch;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+/**
+ * @brief - Gets the MFW crash dump configuration and logs info.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_info
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info);
+
+/**
+ * @brief - Clears the MFW crash dump logs.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the ecore client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_mng_tlv.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_mng_tlv.c
new file mode 100644
index 00000000..0bf1be88
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_mng_tlv.c
@@ -0,0 +1,1535 @@
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+static enum _ecore_status_t
+ecore_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= ECORE_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = ECORE_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_ISCSI;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+ecore_mfw_get_gen_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_generic *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->feat_flags_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->feat_flags;
+ return sizeof(p_drv_buf->feat_flags);
+ }
+ break;
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ if (p_drv_buf->local_mac_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->local_mac;
+ return sizeof(p_drv_buf->local_mac);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ if (p_drv_buf->additional_mac1_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac1;
+ return sizeof(p_drv_buf->additional_mac1);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ if (p_drv_buf->additional_mac2_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac2;
+ return sizeof(p_drv_buf->additional_mac2);
+ }
+ break;
+ case DRV_TLV_OS_DRIVER_STATES:
+ if (p_drv_buf->drv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->drv_state;
+ return sizeof(p_drv_buf->drv_state);
+ }
+ break;
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ if (p_drv_buf->pxe_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->pxe_progress;
+ return sizeof(p_drv_buf->pxe_progress);
+ }
+ break;
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_eth_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_eth *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_fcoe_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_fcoe *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[0];
+ return sizeof(p_drv_buf->crc_err_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[1];
+ return sizeof(p_drv_buf->crc_err_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[2];
+ return sizeof(p_drv_buf->crc_err_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[3];
+ return sizeof(p_drv_buf->crc_err_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[4];
+ return sizeof(p_drv_buf->crc_err_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[0];
+ return sizeof(p_drv_buf->crc_err_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[1];
+ return sizeof(p_drv_buf->crc_err_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[2];
+ return sizeof(p_drv_buf->crc_err_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[3];
+ return sizeof(p_drv_buf->crc_err_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[4];
+ return sizeof(p_drv_buf->crc_err_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[0];
+ return sizeof(p_drv_buf->flogi_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[1];
+ return sizeof(p_drv_buf->flogi_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[2];
+ return sizeof(p_drv_buf->flogi_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[3];
+ return sizeof(p_drv_buf->flogi_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ if (p_drv_buf->flogi_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_tstamp;
+ return sizeof(p_drv_buf->flogi_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_acc_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[0];
+ return sizeof(p_drv_buf->flogi_acc_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_acc_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[1];
+ return sizeof(p_drv_buf->flogi_acc_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_acc_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[2];
+ return sizeof(p_drv_buf->flogi_acc_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_acc_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[3];
+ return sizeof(p_drv_buf->flogi_acc_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ if (p_drv_buf->flogi_acc_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_tstamp;
+ return sizeof(p_drv_buf->flogi_acc_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ if (p_drv_buf->flogi_rjt_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt_tstamp;
+ return sizeof(p_drv_buf->flogi_rjt_tstamp);
+ }
+ break;
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[0];
+ return sizeof(p_drv_buf->plogi_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[1];
+ return sizeof(p_drv_buf->plogi_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[2];
+ return sizeof(p_drv_buf->plogi_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[3];
+ return sizeof(p_drv_buf->plogi_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[4];
+ return sizeof(p_drv_buf->plogi_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[0];
+ return sizeof(p_drv_buf->plogi_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[1];
+ return sizeof(p_drv_buf->plogi_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[2];
+ return sizeof(p_drv_buf->plogi_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[3];
+ return sizeof(p_drv_buf->plogi_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[4];
+ return sizeof(p_drv_buf->plogi_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[0];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[1];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[2];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[3];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[4];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[0];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[1];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[2];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[3];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[4];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[0];
+ return sizeof(p_drv_buf->plogo_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[1];
+ return sizeof(p_drv_buf->plogo_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[2];
+ return sizeof(p_drv_buf->plogo_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[3];
+ return sizeof(p_drv_buf->plogo_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[4];
+ return sizeof(p_drv_buf->plogo_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[0];
+ return sizeof(p_drv_buf->plogo_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[1];
+ return sizeof(p_drv_buf->plogo_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[2];
+ return sizeof(p_drv_buf->plogo_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[3];
+ return sizeof(p_drv_buf->plogo_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[4];
+ return sizeof(p_drv_buf->plogo_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[0];
+ return sizeof(p_drv_buf->abts_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[1];
+ return sizeof(p_drv_buf->abts_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[2];
+ return sizeof(p_drv_buf->abts_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[3];
+ return sizeof(p_drv_buf->abts_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[4];
+ return sizeof(p_drv_buf->abts_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[0];
+ return sizeof(p_drv_buf->abts_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[1];
+ return sizeof(p_drv_buf->abts_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[2];
+ return sizeof(p_drv_buf->abts_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[3];
+ return sizeof(p_drv_buf->abts_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[4];
+ return sizeof(p_drv_buf->abts_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ if (p_drv_buf->rx_rscn_nport_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[0];
+ return sizeof(p_drv_buf->rx_rscn_nport[0]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ if (p_drv_buf->rx_rscn_nport_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[1];
+ return sizeof(p_drv_buf->rx_rscn_nport[1]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ if (p_drv_buf->rx_rscn_nport_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[2];
+ return sizeof(p_drv_buf->rx_rscn_nport[2]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ if (p_drv_buf->rx_rscn_nport_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[3];
+ return sizeof(p_drv_buf->rx_rscn_nport[3]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[0];
+ return sizeof(p_drv_buf->scsi_rx_chk[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[1];
+ return sizeof(p_drv_buf->scsi_rx_chk[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[2];
+ return sizeof(p_drv_buf->scsi_rx_chk[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[3];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[4];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[0];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[1];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[2];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[3];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[4];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[4]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_iscsi *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static enum _ecore_status_t
+ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+{
+ union ecore_mfw_tlv_data *p_tlv_data;
+ struct ecore_drv_tlv_hdr tlv;
+ u8 *p_tlv_ptr = OSAL_NULL, *p_temp;
+ u32 offset;
+ int len;
+
+ p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return ECORE_NOMEM;
+
+ if (OSAL_MFW_FILL_TLV_DATA(p_hwfn, tlv_group, p_tlv_data)) {
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+ return ECORE_INVAL;
+ }
+
+ offset = 0;
+ OSAL_MEMSET(&tlv, 0, sizeof(tlv));
+ while (offset < size) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ tlv.tlv_flags = TLV_FLAGS(p_temp);
+ DP_INFO(p_hwfn, "Type %d length = %d flags = 0x%x\n",
+ tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags);
+
+ offset += sizeof(tlv);
+ if (tlv_group == ECORE_MFW_TLV_GENERIC)
+ len = ecore_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_ETH)
+ len = ecore_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_FCOE)
+ len = ecore_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe, &p_tlv_ptr);
+ else
+ len = ecore_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi, &p_tlv_ptr);
+
+ if (len > 0) {
+ OSAL_WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length");
+ len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
+ /* TODO: Endianness handling? */
+ OSAL_MEMCPY(p_mfw_buf, &tlv, sizeof(tlv));
+ OSAL_MEMCPY(p_mfw_buf + offset, p_tlv_ptr, len);
+ }
+
+ offset += sizeof(u32) * tlv.tlv_length;
+ }
+
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val;
+ u8 tlv_group = 0, id, *p_mfw_buf = OSAL_NULL, *p_temp;
+ u32 global_offsize, global_addr;
+ enum _ecore_status_t rc;
+ struct ecore_drv_tlv_hdr tlv;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, data_ptr);
+ size = ecore_rd(p_hwfn, p_ptt, global_addr +
+ OFFSETOF(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, false, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = (void *)OSAL_VZALLOC(p_hwfn->p_dev, size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = ecore_rd(p_hwfn, p_ptt, addr + offset);
+ OSAL_MEMCPY(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (ecore_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ goto drv_done;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id) {
+ if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
+ size))
+ goto drv_done;
+ }
+ }
+
+ /* Write the TLV data to shared memory */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = (u32)p_mfw_buf[offset];
+ ecore_wr(p_hwfn, p_ptt, addr + offset, val);
+ offset += sizeof(u32);
+ }
+
+drv_done:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ OSAL_VFREE(p_hwfn->p_dev, p_mfw_buf);
+
+ return rc;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_proto_if.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_proto_if.h
new file mode 100644
index 00000000..226e3d2a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_proto_if.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_PROTO_IF_H__
+#define __ECORE_PROTO_IF_H__
+
+/*
+ * PF parameters (according to personality/protocol)
+ */
+
+#define ECORE_ROCE_PROTOCOL_INDEX (3)
+
+struct ecore_eth_pf_params {
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
+};
+
+/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+struct ecore_iscsi_pf_params {
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u32 two_msl_timer;
+ u16 tx_sws_timer;
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 half_way_close_timeout;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 cmdq_xoff_threshold;
+ u16 cmdq_xon_threshold;
+ u16 rq_buffer_size;
+
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 log_page_size;
+ u8 rqe_log_size;
+ u8 max_fin_rt;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u8 ooo_enable;
+
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+};
+
+enum ecore_rdma_protocol {
+ ECORE_RDMA_PROTOCOL_DEFAULT,
+ ECORE_RDMA_PROTOCOL_ROCE,
+ ECORE_RDMA_PROTOCOL_IWARP,
+};
+
+struct ecore_rdma_pf_params {
+ /* Supplied to ECORE during resource allocation (may affect the ILT and
+ * the doorbell BAR).
+ */
+ u32 min_dpis; /* number of requested DPIs */
+ u32 num_mrs; /* number of requested memory regions*/
+ u32 num_qps; /* number of requested Queue Pairs */
+ u32 num_srqs; /* number of requested SRQ */
+ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
+ u8 gl_pi; /* protocol index */
+
+ /* Will allocate rate limiters to be used with QPs */
+ u8 enable_dcqcn;
+
+ /* TCP port number used for the iwarp traffic */
+ u16 iwarp_port;
+ enum ecore_rdma_protocol rdma_protocol;
+};
+
+struct ecore_pf_params {
+ struct ecore_eth_pf_params eth_pf_params;
+ struct ecore_iscsi_pf_params iscsi_pf_params;
+ struct ecore_rdma_pf_params rdma_pf_params;
+};
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_rt_defs.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_rt_defs.h
new file mode 100644
index 00000000..846dc6d1
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_rt_defs.h
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __RT_DEFS_H__
+#define __RT_DEFS_H__
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30857
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30873
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31677
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32701
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33213
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
+
+#define RUNTIME_ARRAY_SIZE 34311
+
+#endif /* __RT_DEFS_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_api.h
new file mode 100644
index 00000000..c8e564f9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_api.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_API_H__
+#define __ECORE_SP_API_H__
+
+#include "ecore_status.h"
+
+enum spq_mode {
+ ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ ECORE_SPQ_MODE_CB, /* Client supplies a callback */
+ ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
+};
+
+struct ecore_hwfn;
+union event_ring_data;
+struct eth_slow_path_rx_cqe;
+
+struct ecore_spq_comp_cb {
+ void (*function)(struct ecore_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+
+/**
+ * @brief ecore_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ * update Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.c
new file mode 100644
index 00000000..8fd64d7a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_dcbx.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct ecore_sp_init_data *p_data)
+{
+ u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc;
+
+ if (!pp_ent)
+ return ECORE_INVAL;
+
+ /* Get an SPQ entry */
+ rc = ecore_spq_get_entry(p_hwfn, pp_ent);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Fill the SPQ entry */
+ p_ent = *pp_ent;
+ p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+ p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_data->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case ECORE_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case ECORE_SPQ_MODE_BLOCK:
+ if (!p_data->p_comp_data)
+ return ECORE_INVAL;
+
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+ break;
+
+ case ECORE_SPQ_MODE_CB:
+ if (!p_data->p_comp_data)
+ p_ent->comp_cb.function = OSAL_NULL;
+ else
+ p_ent->comp_cb = *p_data->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+ ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+ return ECORE_SUCCESS;
+}
+
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
+{
+ switch (type) {
+ case ECORE_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case ECORE_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case ECORE_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src,
+ bool b_pf_start)
+{
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
+
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
+
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
+
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
+
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
+}
+
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ enum tunnel_clss type;
+
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ /* @DPDK - typecast tunnul class */
+ type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
+
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
+
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
+
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
+
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
+
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
+
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct ecore_tunn_update_udp_port *p_udp_port)
+{
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
+ tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
+ }
+}
+
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
+}
+
+static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunnel_info *p_tun)
+{
+ ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
+
+ ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
+
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn)
+{
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel hw config is not supported\n");
+ return;
+ }
+
+ if (p_tunn->vxlan_port.b_update_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
+
+ if (p_tunn->geneve_port.b_update_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
+
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+}
+
+static void
+ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf start config is not supported\n");
+ return;
+ }
+
+ if (!p_src)
+ return;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+}
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum ecore_mf_mode mode,
+ bool allow_npar_tx_switch)
+{
+ struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 page_cnt;
+
+ /* update initial eq producer */
+ ecore_eq_prod_update(p_hwfn,
+ ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ /* Initialize the SPQ entry for the ramrod */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Fill the ramrod data */
+ p_ramrod = &p_ent->ramrod.pf_start;
+ p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
+
+ /* For easier debugging */
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+
+ switch (mode) {
+ case ECORE_MF_DEFAULT:
+ case ECORE_MF_NPAR:
+ p_ramrod->mf_mode = MF_NPAR;
+ break;
+ case ECORE_MF_OVLAN:
+ p_ramrod->mf_mode = MF_OVLAN;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Unsupported MF mode, init as DEFAULT\n");
+ p_ramrod->mf_mode = MF_NPAR;
+ }
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ /* Place EQ address in RAMROD */
+ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+ p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
+ page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+ p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
+
+ ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
+
+ if (IS_MF_SI(p_hwfn))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
+ }
+ /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
+ * version is available.
+ */
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag);
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+ if (p_tunn)
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_rl_update_params *params)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct rl_update_ramrod_data *rl_update;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rl_update = &p_ent->ramrod.rl_update;
+
+ rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
+ rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
+ rl_update->rl_init_flg = params->rl_init_flg;
+ rl_update->rl_start_flg = params->rl_start_flg;
+ rl_update->rl_stop_flg = params->rl_stop_flg;
+ rl_update->rl_id_first = params->rl_id_first;
+ rl_update->rl_id_last = params->rl_id_last;
+ rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
+ rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
+ rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
+ rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
+ rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
+ rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+ rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
+ rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
+ params->dcqcn_timeuot_us);
+ rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Set pf update ramrod command params */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf update config is not supported\n");
+ return rc;
+ }
+
+ if (!p_tunn)
+ return ECORE_INVAL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.h
new file mode 100644
index 00000000..33e31e42
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_sp_commands.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_COMMANDS_H__
+#define __ECORE_SP_COMMANDS_H__
+
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "ecore_sp_api.h"
+
+#define ECORE_SP_EQ_COMPLETION 0x01
+#define ECORE_SP_CQE_COMPLETION 0x02
+
+struct ecore_sp_init_data {
+ /* The CID and FID aren't necessarily derived from hwfn,
+ * e.g., in IOV scenarios. CID might defer between SPQ and
+ * other elements.
+ */
+ u32 cid;
+ u16 opaque_fid;
+
+ /* Information regarding operation upon sending & completion */
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb *p_comp_data;
+
+};
+
+/**
+ * @brief Acquire and initialize and SPQ entry for a given ramrod.
+ *
+ * @param p_hwfn
+ * @param pp_ent - will be filled with a pointer to an entry upon success
+ * @param cmd - dependent upon protocol
+ * @param protocol
+ * @param p_data - various configuration required for ramrod
+ *
+ * @return ECORE_SUCCESS upon success, otherwise failure.
+ */
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct ecore_sp_init_data *p_data);
+
+/**
+ * @brief ecore_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf start tunneling configuration
+ * @param mode
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum ecore_mf_mode mode,
+ bool allow_npar_tx_switch);
+
+/**
+ * @brief ecore_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
+
+struct ecore_rl_update_params {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
+ u32 rl_bc_rate; /* Byte Counter Limit */
+ u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
+ u16 rl_r_ai; /* Active increase rate */
+ u16 rl_r_hai; /* Hyper active increase rate */
+ u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
+ u32 dcqcn_k_us; /* DCQCN Alpha update interval */
+ u32 dcqcn_timeuot_us;
+ u32 qcn_timeuot_us;
+};
+
+/**
+ * @brief ecore_sp_rl_update - Update rate limiters
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_rl_update_params *params);
+
+#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_spq.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_spq.c
new file mode 100644
index 00000000..3c1d05b3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_spq.c
@@ -0,0 +1,998 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_sp_api.h"
+#include "ecore_spq.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_int.h"
+#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+
+/***************************************************************************
+ * Structures & Definitions
+ ***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER (10)
+#define SPQ_BLOCK_DELAY_US (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MS (5)
+
+/***************************************************************************
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct ecore_spq_comp_done *comp_done;
+
+ comp_done = (struct ecore_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ OSAL_SMP_WMB(p_hwfn->p_dev);
+}
+
+static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret,
+ bool sleep_between_iter)
+{
+ struct ecore_spq_comp_done *comp_done;
+ u32 iter_cnt;
+
+ comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+ iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ : SPQ_BLOCK_DELAY_MAX_ITER;
+
+ while (iter_cnt--) {
+ OSAL_POLL_MODE_DPC(p_hwfn);
+ OSAL_SMP_RMB(p_hwfn->p_dev);
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+
+ if (sleep_between_iter)
+ OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
+ else
+ OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
+ }
+
+ return ECORE_TIMEOUT;
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret, bool skip_quick_poll)
+{
+ struct ecore_spq_comp_done *comp_done;
+ enum _ecore_status_t rc;
+
+ /* A relatively short polling period w/o sleeping, to allow the FW to
+ * complete the ramrod and thus possibly to avoid the following sleeps.
+ */
+ if (!skip_quick_poll) {
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+ }
+
+ /* Move to polling with a sleeping period between iterations */
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+ goto err;
+ }
+
+ /* Retry after drain */
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+
+ comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+err:
+ DP_NOTICE(p_hwfn, true,
+ "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
+ OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
+ OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+
+ return ECORE_BUSY;
+}
+
+/***************************************************************************
+ * SPQ entries inner API
+ ***************************************************************************/
+static enum _ecore_status_t
+ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
+{
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case ECORE_SPQ_MODE_EBLOCK:
+ case ECORE_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = ecore_spq_blocking_cb;
+ break;
+ case ECORE_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
+ " Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+ ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * HSI access
+ ***************************************************************************/
+static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq *p_spq)
+{
+ struct ecore_cxt_info cxt_info;
+ struct core_conn_context *p_cxt;
+ enum _ecore_status_t rc;
+ u16 physical_q;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ /* @@@TBD we zero the context until we have ilt_reset implemented. */
+ OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+ */
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ }
+
+ /* CDU validation - FIXME currently disabled */
+
+ /* QM physical queue */
+ physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+ p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq *p_spq,
+ struct ecore_spq_entry *p_ent)
+{
+ struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+ u16 echo = ecore_chain_get_prod_idx(p_chain);
+ struct slow_path_element *elem;
+ struct core_db_data db;
+
+ p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
+ elem = ecore_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
+ return ECORE_INVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ OSAL_MEMSET(&db, 0, sizeof(db));
+ SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+ db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+
+ /* make sure the SPQE is updated before the doorbell */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
+ *(u32 *)&db);
+
+ /* make sure doorbell is rang */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
+ " agg_params: %02x, prod: %04x\n",
+ DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
+ db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Asynchronous events
+ ***************************************************************************/
+
+static enum _ecore_status_t
+ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ switch (p_eqe->protocol_id) {
+ case PROTOCOLID_COMMON:
+ return ecore_sriov_eqe_event(p_hwfn,
+ p_eqe->opcode,
+ p_eqe->echo, &p_eqe->data);
+ default:
+ DP_NOTICE(p_hwfn,
+ true, "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return ECORE_INVAL;
+ }
+}
+
+/***************************************************************************
+ * EQ API
+ ***************************************************************************/
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+}
+
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie)
+{
+ struct ecore_eq *p_eq = cookie;
+ struct ecore_chain *p_chain = &p_eq->chain;
+ enum _ecore_status_t rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
+ ecore_chain_get_usable_per_page(p_chain)) {
+ fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
+ }
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
+ if (!p_eqe) {
+ rc = ECORE_INVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode, /* Event Opcode */
+ p_eqe->protocol_id, /* Event Protocol ID */
+ p_eqe->reserved0, /* Reserved */
+ /* Echo value from ramrod data on the host */
+ OSAL_LE16_TO_CPU(p_eqe->echo),
+ p_eqe->fw_return_code, /* FW return code for SP
+ * ramrods
+ */
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (ecore_async_event_completion(p_hwfn, p_eqe))
+ rc = ECORE_INVAL;
+ } else if (ecore_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = ECORE_INVAL;
+ }
+
+ ecore_chain_recycle_consumed(p_chain);
+ }
+
+ ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+{
+ struct ecore_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_eq'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ num_elem,
+ sizeof(union event_ring_element),
+ &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ ecore_int_register_cb(p_hwfn, ecore_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
+
+ p_hwfn->p_eq = p_eq;
+ return ECORE_SUCCESS;
+
+eq_allocate_fail:
+ OSAL_FREE(p_hwfn->p_dev, p_eq);
+ return ECORE_NOMEM;
+}
+
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
+{
+ ecore_chain_reset(&p_hwfn->p_eq->chain);
+}
+
+void ecore_eq_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_eq)
+ return;
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
+ p_hwfn->p_eq = OSAL_NULL;
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe
+ *cqe,
+ enum protocol_type protocol)
+{
+ if (IS_VF(p_hwfn->p_dev))
+ return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
+
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+ }
+
+ return rc;
+}
+
+/***************************************************************************
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
+
+ OSAL_LIST_INIT(&p_spq->pending);
+ OSAL_LIST_INIT(&p_spq->completion_pending);
+ OSAL_LIST_INIT(&p_spq->free_pool);
+ OSAL_LIST_INIT(&p_spq->unlimited_pending);
+ OSAL_SPIN_LOCK_INIT(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
+ DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
+
+ OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct ecore_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+
+ OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
+ SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
+ p_spq->comp_bitmap_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ ecore_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ ecore_chain_reset(&p_spq->chain);
+}
+
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct ecore_spq *p_spq = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
+
+ /* SPQ struct */
+ p_spq =
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_spq'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* SPQ ring */
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_SINGLE,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain, OSAL_NULL)) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
+ capacity *
+ sizeof(struct ecore_spq_entry));
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+
+ p_hwfn->p_spq = p_spq;
+ return ECORE_SUCCESS;
+
+spq_allocate_fail:
+ ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_spq);
+ return ECORE_NOMEM;
+}
+
+void ecore_spq_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ u32 capacity;
+
+ if (!p_spq)
+ return;
+
+ if (p_spq->p_virt) {
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_spq->p_virt,
+ p_spq->p_phys,
+ capacity *
+ sizeof(struct ecore_spq_entry));
+ }
+
+ ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+ OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+ OSAL_FREE(p_hwfn->p_dev, p_spq);
+}
+
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+
+ if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate an SPQ entry for a pending"
+ " ramrod\n");
+ rc = ECORE_NOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry, list);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent)
+{
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent)
+{
+ OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief ecore_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent, enum spq_priority priority)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ OSAL_LIST_PUSH_TAIL(&p_ent->list,
+ &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return ECORE_SUCCESS;
+
+ } else {
+ struct ecore_spq_entry *p_en2;
+
+ p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry,
+ list);
+ OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+ *p_en2 = *p_ent;
+
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+ p_ent = p_en2;
+ }
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case ECORE_SPQ_PRIORITY_NORMAL:
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case ECORE_SPQ_PRIORITY_HIGH:
+ OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Accessor
+ ***************************************************************************/
+
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+ * Posting new Ramrods
+ ***************************************************************************/
+
+static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
+ osal_list_t *head,
+ u32 keep_reserve)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ enum _ecore_status_t rc;
+
+ /* TODO - implementation might be wasteful; will always keep room
+ * for an additional high priority ramrod (even if one is already
+ * pending FW)
+ */
+ while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !OSAL_LIST_IS_EMPTY(head)) {
+ struct ecore_spq_entry *p_ent =
+ OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
+ if (p_ent != OSAL_NULL) {
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011 28182)
+#endif
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+ OSAL_LIST_PUSH_TAIL(&p_ent->list,
+ &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+ while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
+ struct ecore_spq_entry, list);
+ if (!p_ent)
+ return ECORE_INVAL;
+
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011)
+#endif
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
+
+ ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return ecore_spq_post_list(p_hwfn,
+ &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return ECORE_INVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
+ return ECORE_INVAL;
+ }
+
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Recovery is in progress -> skip spq post"
+ " [cmd %02x protocol %02x]\n",
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+ /* Return success to let the flows to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+
+ /* Complete the entry */
+ rc = ecore_spq_fill_entry(p_hwfn, p_ent);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = ecore_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
+ /* For entries in ECORE BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
+ p_ent->queue == &p_spq->unlimited_pending);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+ /* TBD: handle error flow and remove p_ent from
+ * completion pending
+ */
+ return rc;
+ }
+
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ ecore_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
+ ecore_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct ecore_spq *p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_spq_entry *tmp;
+ struct ecore_spq_entry *found = OSAL_NULL;
+ enum _ecore_status_t rc;
+
+ if (!p_hwfn)
+ return ECORE_INVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return ECORE_INVAL;
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
+ tmp,
+ &p_spq->completion_pending,
+ list, struct ecore_spq_entry) {
+ if (p_ent->elem.hdr.echo == echo) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+
+ /* Avoid overriding of SPQ entries when getting
+ * out-of-order completions, by marking the completions
+ * in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+ SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
+ while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
+ p_spq->comp_bitmap_idx)) {
+ SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
+ p_spq->comp_bitmap_idx);
+ p_spq->comp_bitmap_idx++;
+ ecore_chain_return_produced(&p_spq->chain);
+ }
+
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+
+ /* This is debug and should be relatively uncommon - depends
+ * on scenarios which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match"
+ " echo %04x in completion pending list\n",
+ OSAL_LE16_TO_CPU(echo),
+ OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to find an entry this"
+ " EQE [echo %04x] completes\n",
+ OSAL_LE16_TO_CPU(echo));
+ return ECORE_EXISTS;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ OSAL_LE16_TO_CPU(echo),
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got a completion without a callback function\n");
+
+ if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
+ ecore_spq_return_entry(p_hwfn, found);
+
+ /* Attempt to post pending requests */
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ rc = ecore_spq_pend_post(p_hwfn);
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq =
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_consq'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Allocate and initialize EQ chain */
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ ECORE_CHAIN_PAGE_SIZE / 0x80,
+ 0x80,
+ &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ p_hwfn->p_consq = p_consq;
+ return ECORE_SUCCESS;
+
+consq_allocate_fail:
+ OSAL_FREE(p_hwfn->p_dev, p_consq);
+ return ECORE_NOMEM;
+}
+
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
+{
+ ecore_chain_reset(&p_hwfn->p_consq->chain);
+}
+
+void ecore_consq_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_consq)
+ return;
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_spq.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_spq.h
new file mode 100644
index 00000000..e530f834
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_spq.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SPQ_H__
+#define __ECORE_SPQ_H__
+
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_sp_api.h"
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
+ struct rl_update_ramrod_data rl_update;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
+ struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ ECORE_SPQ_PRIORITY_NORMAL,
+ ECORE_SPQ_PRIORITY_HIGH,
+};
+
+union ecore_spq_req_comp {
+ struct ecore_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+/* SPQ_MODE_EBLOCK */
+struct ecore_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct ecore_spq_entry {
+ osal_list_entry_t list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ osal_list_t *queue;
+
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb comp_cb;
+ struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct ecore_eq {
+ struct ecore_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct ecore_consq {
+ struct ecore_chain chain;
+};
+
+struct ecore_spq {
+ osal_spinlock_t lock;
+
+ osal_list_t unlimited_pending;
+ osal_list_t pending;
+ osal_list_t completion_pending;
+ osal_list_t free_pool;
+
+ struct ecore_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct ecore_spq_entry *p_virt;
+
+ /* Bitmap for handling out-of-order completions */
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+/* BITS_PER_LONG */
+#define SPQ_COMP_BMAP_SIZE (SPQ_RING_SIZE / (sizeof(unsigned long) * 8))
+ unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
+ u8 comp_bitmap_idx;
+#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
+ (OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
+ (OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
+ (OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+};
+
+struct ecore_port;
+struct ecore_hwfn;
+
+/**
+ * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent);
+
+/**
+ * @brief ecore_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent);
+/**
+ * @brief ecore_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+
+/**
+ * @brief ecore_eq_setup - Reset the EQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_eq_free - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_eq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * @brief ecore_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief ecore_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_setup - Reset the ConsQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_consq_free(struct ecore_hwfn *p_hwfn);
+
+#endif /* __ECORE_SPQ_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.c
new file mode 100644
index 00000000..db2873e7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.c
@@ -0,0 +1,4515 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "reg_addr.h"
+#include "ecore_sriov.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_hw_defs.h"
+#include "ecore_int.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_l2.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_mcp.h"
+#include "ecore_cxt.h"
+#include "ecore_vf.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+
+const char *ecore_channel_tlvs_string[] = {
+ "CHANNEL_TLV_NONE", /* ends tlv sequence */
+ "CHANNEL_TLV_ACQUIRE",
+ "CHANNEL_TLV_VPORT_START",
+ "CHANNEL_TLV_VPORT_UPDATE",
+ "CHANNEL_TLV_VPORT_TEARDOWN",
+ "CHANNEL_TLV_START_RXQ",
+ "CHANNEL_TLV_START_TXQ",
+ "CHANNEL_TLV_STOP_RXQ",
+ "CHANNEL_TLV_STOP_TXQ",
+ "CHANNEL_TLV_UPDATE_RXQ",
+ "CHANNEL_TLV_INT_CLEANUP",
+ "CHANNEL_TLV_CLOSE",
+ "CHANNEL_TLV_RELEASE",
+ "CHANNEL_TLV_LIST_END",
+ "CHANNEL_TLV_UCAST_FILTER",
+ "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
+ "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
+ "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
+ "CHANNEL_TLV_VPORT_UPDATE_MCAST",
+ "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
+ "CHANNEL_TLV_VPORT_UPDATE_RSS",
+ "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
+ "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+ "CHANNEL_TLV_COALESCE_UPDATE",
+ "CHANNEL_TLV_MAX"
+};
+
+/* IOV ramrods */
+static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 fp_minor;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_vf->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH_IWARP:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return ECORE_INVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is"
+ " slightly newer than PF's %02x.%02x; Configuring"
+ " PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR, fp_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid,
+ u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
+ return true;
+}
+
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return OSAL_NULL;
+ }
+
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+ ECORE_IOV_VALIDATE_Q_NA,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 qid,
+ enum ecore_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ int i;
+
+ if (mode == ECORE_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct ecore_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (p_qcid->p_cid == OSAL_NULL)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ /* Found. It's enabled. */
+ return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
+static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 rx_qid,
+ enum ecore_iov_validate_q_mode mode)
+{
+ if (rx_qid >= p_vf->num_rxqs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x];"
+ " Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
+ mode, false);
+}
+
+static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 tx_qid,
+ enum ecore_iov_validate_q_mode mode)
+{
+ if (tx_qid >= p_vf->num_txqs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x];"
+ " Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
+ mode, true);
+}
+
+static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
+ " one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
+/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+{
+ int i;
+
+ while (length--) {
+ crc ^= *ptr++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
+
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+ int vfid,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct ecore_dmae_params params;
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return ECORE_INVAL;
+
+ /* TODO - check VF is in a state where it can accept message */
+ if (!p_vf->vf_bulletin)
+ return ECORE_INVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.flags = ECORE_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
+{
+ struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_INITIAL_VF,
+ &iov->initial_vfs);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ /* @@@TODO - in future we might want to add an OSAL here to
+ * allow each OS to decide on its own how to act.
+ */
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV,
+ "Number of VFs are already set to non-zero value."
+ " Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ OSAL_PCI_READ_CONFIG_DWORD(p_dev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
+ "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
+ " stride %d, page size 0x%x\n",
+ iov->nres, iov->cap, iov->ctrl,
+ iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
+ iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
+ iov->total_vfs > NUM_OF_VFS(p_dev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(p_dev, false,
+ "IOV: Unexpected number of vfs set: %d"
+ " setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_block *p_sb;
+ u16 sb_id;
+ u32 val;
+
+ if (!p_hwfn->hw_info.p_igu_info) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
+ return;
+ }
+
+ for (sb_id = 0;
+ sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
+ p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
+ !(p_sb->status & ECORE_IGU_STATUS_PF)) {
+ val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+ }
+ }
+}
+
+static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct ecore_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_setup_vfdb called without alloc mem first\n");
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
+ vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct ecore_bulletin_content) + bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct ecore_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ /* TODO - need to devise a better way of getting opaque */
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+
+ vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ }
+}
+
+static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_msg_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->mbx_reply_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->bulletins_phys,
+ p_iov_info->bulletins_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%lx phys], "
+ "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
+ " [%p virt 0x%lx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (unsigned long)p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (unsigned long)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins,
+ (unsigned long)p_iov_info->bulletins_phys);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_msg_size);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->mbx_reply_size);
+
+ if (p_iov_info->p_bulletins)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys,
+ p_iov_info->bulletins_size);
+}
+
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return ECORE_SUCCESS;
+ }
+
+ p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sriov'\n");
+ return ECORE_NOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ return ecore_iov_allocate_vfdb(p_hwfn);
+}
+
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ ecore_iov_setup_vfdb(p_hwfn);
+ ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void ecore_iov_free(struct ecore_hwfn *p_hwfn)
+{
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ ecore_iov_free_vfdb(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
+ }
+}
+
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
+{
+ OSAL_FREE(p_dev, p_dev->p_iov_info);
+}
+
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ int pos;
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ /* Learn the PCI configuration */
+ pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* Allocate a new struct for IOV information */
+ /* TODO - can change to VALLOC when its available */
+ p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->p_iov_info));
+ if (!p_dev->p_iov_info) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't support IOV due to lack of memory\n");
+ return ECORE_NOMEM;
+ }
+ p_dev->p_iov_info->pos = pos;
+
+ rc = ecore_iov_pci_cfg_info(p_dev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!p_dev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ OSAL_FREE(p_dev, p_dev->p_iov_info);
+ return ECORE_SUCCESS;
+ }
+
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only be later
+ * to diffrentiate between the two.
+ */
+
+ if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+
+ if (ECORE_PATH_ID(p_hwfn))
+ p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ p_dev->p_iov_info->first_vf_in_pf);
+
+ return ECORE_SUCCESS;
+}
+
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_fail_malicious)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
+ return false;
+
+ return true;
+}
+
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct ecore_vf_info *vf;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+ u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_ECORE_SRIOV(p_dev))
+ return;
+
+ for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
+ ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
+}
+
+#ifndef LINUX_REMOVE
+/* @@@TBD Consider taking outside of ecore... */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 vf_id,
+ void *ctx)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
+
+ if (vf != OSAL_NULL) {
+ vf->ctx = ctx;
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+ } else {
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+ return rc;
+}
+#endif
+
+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vfid)
+{
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+}
+
+static enum _ecore_status_t
+ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ enum _ecore_status_t rc;
+
+ if (vf->to_disable)
+ return ECORE_SUCCESS;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
+ ECORE_VF_ABS_ID(p_hwfn, vf));
+
+ ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
+ ECORE_VF_ABS_ID(p_hwfn, vf));
+
+ ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ /* It's possible VF was previously considered malicious */
+ vf->b_malicious = false;
+
+ rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ *
+ * @brief ecore_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ ecore_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf,
+ u16 num_rx_queues)
+{
+ struct ecore_igu_block *igu_blocks;
+ int qid = 0, igu_id = 0;
+ u32 val = 0;
+
+ igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+
+ p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ while ((qid < num_rx_queues) &&
+ (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
+ if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
+ struct cau_sb_entry sb_entry;
+
+ vf->igu_sbs[qid] = (u16)igu_id;
+ igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+ val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_id * sizeof(u64), 2, 0);
+ qid++;
+ }
+ igu_id++;
+ }
+
+ vf->num_sbs = (u8)num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+/**
+ *
+ * @brief The function invalidates all the VF entries,
+ * technically this isn't required, but added for
+ * cleaness and ease of debugging incase a VF attempts to
+ * produce an interrupt after it has been taken down.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ */
+static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->igu_map.igu_blocks[igu_id].status |=
+ ECORE_IGU_STATUS_FREE;
+
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
+{
+ struct ecore_mcp_link_capabilities link_caps;
+ struct ecore_mcp_link_params link_params;
+ struct ecore_mcp_link_state link_state;
+ u8 num_of_vf_available_chains = 0;
+ struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 cids;
+ u8 i;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
+ p_params->rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Perform sanity checking on the requested vport/rss */
+ if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+ p_params->rel_vf_id, p_params->vport_id);
+ return ECORE_INVAL;
+ }
+
+ if ((p_params->num_queues > 1) &&
+ (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+ p_params->rel_vf_id, p_params->rss_eng_id);
+ return ECORE_INVAL;
+ }
+
+ /* TODO - remove this once we get confidence of change */
+ if (!p_params->vport_id) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ vf->vport_id = p_params->vport_id;
+ vf->rss_eng_id = p_params->rss_eng_id;
+
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues"
+ " [0x%04x CIDs available]\n",
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
+
+ num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_irqs);
+ if (num_of_vf_available_chains == 0) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_available_chains;
+ vf->num_txqs = num_of_vf_available_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
+
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
+ }
+
+ /* Update the link configuration in bulletin.
+ */
+ OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+ sizeof(link_state));
+ OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
+ rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+
+ if (rc == ECORE_SUCCESS) {
+ vf->b_init = true;
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+ (1ULL << (vf->relative_vf_id % 64));
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id)
+{
+ struct ecore_mcp_link_capabilities caps;
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ if (vf->bulletin.p_virt)
+ OSAL_MEMSET(vf->bulletin.p_virt, 0,
+ sizeof(*vf->bulletin.p_virt));
+
+ OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
+ OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
+ OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(caps));
+ ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ /* Forget the VF's acquisition message */
+ OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
+ ~(1ULL << (vf->relative_vf_id / 64));
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs--;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static bool ecore_iov_tlv_supported(u16 tlvtype)
+{
+ return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf, u16 tlv)
+{
+ /* lock the channel */
+ /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
+
+ /* record the locking op */
+ /* vf->op_current = tlv; @@@TBD MichalK */
+
+ /* log the lock */
+ if (ecore_iov_tlv_supported(tlv))
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %s\n",
+ vf->abs_vf_id,
+ ecore_channel_tlvs_string[tlv]);
+ else
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %04x\n",
+ vf->abs_vf_id, tlv);
+}
+
+static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 expected_tlv)
+{
+ /* log the unlock */
+ if (ecore_iov_tlv_supported(expected_tlv))
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %s\n",
+ vf->abs_vf_id,
+ ecore_channel_tlvs_string[expected_tlv]);
+ else
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %04x\n",
+ vf->abs_vf_id, expected_tlv);
+
+ /* record the locking op */
+ /* vf->op_current = CHANNEL_TLV_NONE; */
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+ u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ /* cast current tlv list entry to channel tlv header */
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ if (ecore_iov_tlv_supported(tlv->type))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV number %d: type %s, length %d\n",
+ i, ecore_channel_tlvs_string[tlv->type],
+ tlv->length);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
+ return;
+ }
+ total_length += tlv->length;
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ mbx->sw_mbx.response_size =
+ length + sizeof(struct channel_list_end_tlv);
+
+ if (!p_hwfn->p_dev->b_hw_channel)
+ return;
+#endif
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = ECORE_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
+ enum ecore_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case ECORE_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case ECORE_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case ECORE_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case ECORE_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case ECORE_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u8 status, u16 tlvs_mask,
+ u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
+ ecore_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - vport_update resp: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
+}
+
+struct ecore_public_vf_info
+*ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return OSAL_NULL;
+
+ return &vf->p_vf_info;
+}
+
+static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u32 i, j;
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
+
+ OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
+ OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
+}
+
+static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ /* TODO - what's this sb_qid field? Is it deprecated?
+ * or is there an ecore_client that looks at this?
+ */
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = i;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
+ " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
+ " vlan [%02x/%02x] mc [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs, p_resp->num_rxqs,
+ p_req->num_rxqs, p_resp->num_txqs,
+ p_req->num_sbs, p_resp->num_sbs,
+ p_req->num_mac_filters, p_resp->num_mac_filters,
+ p_req->num_vlan_filters, p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ OFFSETOF(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ OFFSETOF(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ struct pf_vf_resc *resc = &resp->resc;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(resp, 0, sizeof(*resp));
+
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overridden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ /* TODO - not doing anything is bad since we'll assert, but this isn't
+ * necessarily the right behavior - perhaps we should have allowed some
+ * versatility here.
+ */
+ if (vf->state != VF_FREE &&
+ vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
+
+ /* This legacy support would need to be removed once
+ * the major has changed.
+ */
+ OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is"
+ " incompatible with loaded FW's faspath"
+ " HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support"
+ " 100g\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+
+#ifndef __EXTRACT__LINUX__
+ if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+#endif
+
+ /* Store the acquire message */
+ OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
+
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
+ pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+
+ OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
+ ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
+ pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
+ ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
+ OSAL_NULL);
+
+ pfdev_info->dev_type = p_hwfn->p_dev->type;
+ pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
+
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
+
+ /* Start the VF in FW */
+ rc = ecore_sp_vf_start(p_hwfn, vf);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
+ vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Fill agreed size of bulletin board in response, and post
+ * an initial image to the bulletin board.
+ */
+ resp->bulletin_size = vf->bulletin.size;
+ ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
+ " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
+ " n_vlans-%d\n",
+ vf->abs_vf_id, resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
+ (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
+ resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
+ resc->num_vlan_filters);
+
+ vf->state = VF_ACQUIRED;
+
+out:
+ /* Prepare Response */
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv),
+ vfpf_status);
+}
+
+static enum _ecore_status_t
+__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool val)
+{
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return ECORE_SUCCESS;
+ }
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc == ECORE_SUCCESS) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_filter_ucast filter;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = ECORE_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = ECORE_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, ECORE_SPQ_MODE_CB,
+ OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to configure VLAN [%04x]"
+ " to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, u64 events)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /*TODO - what about MACs? */
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u64 events)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return ECORE_INVAL;
+
+ if (events & (1 << MAC_ADDR_FORCED)) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.type = ECORE_FILTER_MAC;
+ filter.opcode = ECORE_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct ecore_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.type = ECORE_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
+ ECORE_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ?
+ 1 : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update,
+ ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct ecore_queue_cid *p_cid = OSAL_NULL;
+
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
+ p_queue);
+ if (p_cid == OSAL_NULL)
+ continue;
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
+ 1, 0, 1,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to send Rx update"
+ " fo queue[0x%04x]\n",
+ p_cid->rel.queue_id);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_sp_vport_start_params params = { 0 };
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct ecore_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ enum _ecore_status_t rc;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1);
+ }
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
+ params.tx_switching = false;
+ }
+#endif
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+ params.check_mac = true;
+
+ rc = ecore_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+ OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
+ vf->vport_id, vf->opaque_fid);
+ __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn, false,
+ "VF [%02x] - considered malicious;"
+ " Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ }
+
+ rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf,
+ u8 status, bool b_legacy)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ length);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
+ req = &mbx->req_virt->start_rxq;
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
+ }
+
+ ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_queue *p_queue;
+ struct vfpf_start_rxq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ bool b_legacy_vf = false;
+ u8 qid_usage_idx;
+ enum _ecore_status_t rc;
+
+ req = &mbx->req_virt->start_rxq;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ ECORE_IOV_VALIDATE_Q_DISABLE) ||
+ !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ /* Legacy VFs made assumptions on the CID their queues connected to,
+ * assuming queue X used CID X.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy_vf = true;
+
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->rx_qid];
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.queue_id = (u8)p_queue->fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '0' for Rx.
+ */
+ qid_usage_idx = 0;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (!b_legacy_vf)
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
+ status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
+ status = PFVF_STATUS_SUCCESS;
+ vf->num_active_rxqs++;
+ }
+
+out:
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ b_legacy_vf);
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct ecore_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ enum ecore_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & (1 << mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & (1 << mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ struct ecore_tunn_update_udp_port *p_port,
+ enum ecore_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct ecore_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ ECORE_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ ECORE_MODE_L2GRE_TUNN,
+ p_req->l2gre_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ ECORE_MODE_IPGRE_TUNN,
+ p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If ECORE client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ ecore_for_each_vf(p_hwfn, i) {
+ ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ u32 cid,
+ u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ length);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
+}
+
+static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_queue *p_queue;
+ struct vfpf_start_txq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ bool b_legacy_vf = false;
+ u8 qid_usage_idx;
+ u32 cid = 0;
+ enum _ecore_status_t rc;
+ u16 pq;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ /* In case this is a legacy VF - need to know to use the right cids.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy_vf = true;
+
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->tx_qid];
+
+ params.queue_id = p_queue->fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '1' for Tx.
+ */
+ qid_usage_idx = 1;
+
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
+ status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
+ status = PFVF_STATUS_SUCCESS;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
+ }
+
+out:
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+ cid, status);
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 rxq_id,
+ u8 num_rxqs,
+ bool cqe_completion)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int qid, i;
+
+ /* TODO - improve validation [wrap around] */
+ if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ return ECORE_INVAL;
+
+ for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+ struct ecore_queue_cid **pp_cid = OSAL_NULL;
+
+ /* There can be at most a single Rx per qzone. Find it */
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx) {
+ pp_cid = &p_queue->cids[i].p_cid;
+ break;
+ }
+ }
+ if (pp_cid == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
+ vf->relative_vf_id, qid);
+ continue;
+ }
+
+ rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_cid = OSAL_NULL;
+ vf->num_active_rxqs--;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 txq_id, u8 num_txqs)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_queue *p_queue;
+ int qid, j;
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
+ ECORE_IOV_VALIDATE_Q_NA))
+ return ECORE_INVAL;
+
+ for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+ p_queue = &vf->vf_queues[qid];
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (p_queue->cids[j].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[j].b_is_tx)
+ continue;
+
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[j].p_cid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
+
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_rxqs_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->num_rxqs, req->cqe_completion);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_txqs_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ enum _ecore_status_t rc;
+ u16 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ /* Validate inputs */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
+ }
+ }
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ struct ecore_vf_queue *p_queue;
+ u16 qid = req->rx_qid + i;
+
+ p_queue = &vf->vf_queues[qid];
+ handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ p_queue);
+ }
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
+ return OSAL_NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Extended tlv type %s, length %d found\n",
+ ecore_channel_tlvs_string[p_tlv->type],
+ p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, true,
+ "TLVs has overrun the buffer size\n");
+ return OSAL_NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return OSAL_NULL;
+}
+
+static void
+ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_vf_info *p_vf,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Ignore tx-switching configuration originating"
+ " from VFs\n");
+ return;
+ }
+#endif
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_rss_params *p_rss,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ bool b_reject = false;
+ u16 table_size;
+ u16 i, q_idx;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = OSAL_NULL;
+ return;
+ }
+
+ OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
+
+ p_rss->update_rss_config =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->rss_eng_id;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
+ sizeof(p_rss->rss_key));
+
+ table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_cid;
+
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
+ }
+
+ p_data->rss_params = p_rss;
+out:
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+}
+
+static void
+ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_sge_tpa_params *p_sge_tpa,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = OSAL_NULL;
+ return;
+ }
+
+ OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
+ struct ecore_sp_vport_update_params params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct ecore_sge_tpa_params sge_tpa_params;
+ u16 tlvs_mask = 0, tlvs_accepted = 0;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 length;
+ enum _ecore_status_t rc;
+
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d],"
+ " failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = OSAL_NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct ecore_sp_vport_update_params.
+ */
+ ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
+ &params, &tlvs_accepted) !=
+ ECORE_SUCCESS) {
+ tlvs_accepted = 0;
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (!tlvs_accepted) {
+ if (tlvs_mask)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Upper-layer prevents said VF"
+ " configuration\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
+ length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_accepted);
+ ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ int i;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == ECORE_FILTER_REMOVE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing"
+ " vlan\n",
+ p_vf->relative_vf_id);
+ return ECORE_INVAL;
+ }
+ } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+ p_params->opcode == ECORE_FILTER_FLUSH) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return ECORE_SUCCESS;
+
+ if (p_params->opcode == ECORE_FILTER_ADD ||
+ p_params->opcode == ECORE_FILTER_REPLACE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d"
+ " vlan filters\n",
+ p_vf->relative_vf_id,
+ ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return ECORE_INVAL;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ char empty_mac[ETH_ALEN];
+ int i;
+
+ OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
+
+ /* If we're in forced-mode, we don't allow any change */
+ /* TODO - this would change if we were ever to implement logic for
+ * removing a forced MAC altogether [in which case, like for vlans,
+ * we should be able to re-trace previous configuration.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return ECORE_SUCCESS;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == ECORE_FILTER_REMOVE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+ p_params->mac, ETH_ALEN)) {
+ OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "MAC isn't configured\n");
+ return ECORE_INVAL;
+ }
+ } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+ p_params->opcode == ECORE_FILTER_FLUSH) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
+ OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != ECORE_FILTER_ADD &&
+ p_params->opcode != ECORE_FILTER_REPLACE)
+ return ECORE_SUCCESS;
+
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+ empty_mac, ETH_ALEN)) {
+ OSAL_MEMCPY(p_vf->shadow_config.macs[i],
+ p_params->mac, ETH_ALEN);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No available place for MAC\n");
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_params->type == ECORE_FILTER_MAC) {
+ rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ if (p_params->type == ECORE_FILTER_VLAN)
+ rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct ecore_filter_ucast params;
+ enum _ecore_status_t rc;
+
+ /* Prepare the unicast filter params */
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum ecore_filter_opcode)req->opcode;
+ params.type = (enum ecore_filter_ucast_type)req->type;
+
+ /* @@@TBD - We might need logic on HV side in determining this */
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1], params.mac[2],
+ params.mac[3], params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d],"
+ " failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
+ ECORE_SUCCESS) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == ECORE_FILTER_VLAN ||
+ params.type == ECORE_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == ECORE_FILTER_ADD ||
+ params.opcode == ECORE_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == ECORE_FILTER_MAC ||
+ params.type == ECORE_FILTER_MAC_VLAN)) {
+ if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
+ (params.opcode != ECORE_FILTER_ADD &&
+ params.opcode != ECORE_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
+ if (rc == ECORE_EXISTS) {
+ goto out;
+ } else if (rc == ECORE_INVAL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, status);
+}
+
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ u16 qid;
+ int i;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ OSAL_MSLEEP(20);
+ }
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return ECORE_TIMEOUT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS; i++) {
+ u32 prod;
+
+ cons[i] = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS; i++) {
+ u32 tmp;
+
+ tmp = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS)
+ break;
+
+ OSAL_MSLEEP(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return ECORE_TIMEOUT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+
+ /* TODO - add SRC and TM polling once we add storage IOV */
+
+ rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct ecore_vf_info *p_vf;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return ECORE_SUCCESS;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ /* TODO - should we lock channel? */
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ /* TODO - what to do in case of failure? */
+ rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ goto cleanup;
+
+ rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ /* TODO - what's now? What a mess.... */
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ /* TODO - again, a mess... */
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 i;
+
+ OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ OSAL_MSLEEP(100);
+
+ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
+ ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 rel_vf_id)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Wait instead of polling the BRB <-> PRS interface */
+ OSAL_MSLEEP(100);
+
+ ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
+
+ rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->p_dev->p_iov_info) {
+ DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
+ return false;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
+ struct ecore_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, int vfid)
+{
+ struct ecore_iov_vf_mbx *mbx;
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* ecore_iov_process_mbx_request */
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ OSAL_IOV_VF_MSG_TYPE(p_hwfn,
+ p_vf->relative_vf_id,
+ mbx->first_tlv.tl.type);
+
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ ecore_iov_lock_vf_pf_channel(p_hwfn,
+ p_vf, mbx->first_tlv.tl.type);
+
+ /* check if tlv type is known */
+ if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* If we've received a message from a VF we consider malicious
+ * we ignore the messasge unless it's one for RELEASE, in which
+ * case we'll let it have the benefit of doubt, allowing the
+ * next loaded driver to start again.
+ */
+ if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
+ /* TODO - initiate FLR, remove malicious indication */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
+ p_vf->abs_vf_id);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "VF[%02x]: unknown TLV. type %04x length %04x"
+ " padding %08x reply address %lu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding,
+ (unsigned long)mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address))
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Can't respond to TLV -"
+ " no valid reply address\n",
+ p_vf->abs_vf_id);
+ }
+
+ ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
+ mbx->first_tlv.tl.type);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
+ mbx->sw_mbx.response_offset = 0;
+#endif
+}
+
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ /* TODO - add locking mechanisms [no atomics in ecore, so we can't
+ * add the lock inside the ecore_pf_iov struct].
+ */
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ /* TODO - Take a lock */
+ OSAL_MEMCPY(events, p_pending_events,
+ sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+ OSAL_MEMSET(p_pending_events, 0,
+ sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+}
+
+static struct ecore_vf_info *
+ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
+{
+ u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+ if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Got indication for VF [abs 0x%08x] that cannot be"
+ " handled by PF\n",
+ abs_vfid);
+ return OSAL_NULL;
+ }
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+ u16 abs_vfid,
+ struct regpair *vf_msg)
+{
+ struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return ECORE_SUCCESS;
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
+ struct malicious_vf_eqe_data *p_data)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+
+ if (!p_vf)
+ return;
+
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->errId);
+
+ p_vf->b_malicious = true;
+
+ OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
+}
+
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
+ &data->vf_pf_channel.msg_addr);
+ case COMMON_EVENT_VF_FLR:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF-FLR is still not supported\n");
+ return ECORE_SUCCESS;
+ case COMMON_EVENT_MALICIOUS_VF:
+ ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+ return ECORE_SUCCESS;
+ default:
+ DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return ECORE_INVAL;
+ }
+}
+
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64)));
+}
+
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
+ return i;
+
+out:
+ return E4_MAX_NUM_VFS;
+}
+
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *ptt, int vfid)
+{
+ struct ecore_dmae_params params;
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return ECORE_INVAL;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (ecore_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return ECORE_IO;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced MAC to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ feature = 1 << MAC_ADDR_FORCED;
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set MAC, invalid vfid [%d]\n", vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set MAC, Forced MAC is configured\n");
+ return ECORE_INVAL;
+ }
+
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+ bool b_untagged_only, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set untagged default, invalid vfid [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set untagged default to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Since this is configurable only during vport-start, don't take it
+ * if we're past that point.
+ */
+ if (vf_info->state == VF_ENABLED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can't support untagged change for vfid[%d] -"
+ " VF is already active\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Set configuration; This will later be taken into account during the
+ * VF initialization.
+ */
+ feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
+ (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
+ : 0;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 *opaque_fid)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ *opaque_fid = vf_info->opaque_fid;
+}
+
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n",
+ vfid);
+ return;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced vlan to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *p_vf_info;
+
+ p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *p_vf_info;
+
+ p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ int vfid, bool val)
+{
+ struct ecore_vf_info *vf;
+ enum _ecore_status_t rc = ECORE_INVAL;
+
+ if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn, true,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = ECORE_SUCCESS;
+ goto out;
+ }
+
+ rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
+{
+ u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
+
+ max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
+ : ECORE_MAX_VF_CHAINS_PER_PF;
+
+ return max_chains_per_vf;
+}
+
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_req_virt_addr,
+ u16 *p_req_virt_size)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return;
+
+ if (pp_req_virt_addr)
+ *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
+
+ if (p_req_virt_size)
+ *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
+}
+
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_reply_virt_addr,
+ u16 *p_reply_virt_size)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return;
+
+ if (pp_reply_virt_addr)
+ *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
+
+ if (p_reply_virt_size)
+ *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return OSAL_NULL;
+
+ return &vf_info->vf_mbx.sw_mbx;
+}
+#endif
+
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
+{
+ return (length >= sizeof(struct vfpf_first_tlv) &&
+ (length <= sizeof(union vfpf_tlvs)));
+}
+
+u32 ecore_iov_pfvf_msg_length(void)
+{
+ return sizeof(union pfvf_tlvs);
+}
+
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid, int val)
+{
+ struct ecore_vf_info *vf;
+ u8 abs_vp_id = 0;
+ enum _ecore_status_t rc;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+
+ if (!vf)
+ return ECORE_INVAL;
+
+ rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid,
+ struct ecore_eth_stats *p_stats)
+{
+ struct ecore_vf_info *vf;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return ECORE_INVAL;
+
+ if (vf->state != VF_ENABLED)
+ return ECORE_INVAL;
+
+ __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
+ vf->abs_vf_id + 0x10, false);
+
+ return ECORE_SUCCESS;
+}
+
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_rxqs;
+}
+
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_active_rxqs;
+}
+
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return OSAL_NULL;
+
+ return p_vf->ctx;
+}
+
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_sbs;
+}
+
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_FREE);
+}
+
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_ACQUIRED);
+}
+
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_ENABLED);
+}
+
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
+enum _ecore_status_t
+ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_wfq_data *vf_vp_wfq;
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.h
new file mode 100644
index 00000000..3c2f58bd
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_sriov.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SRIOV_H__
+#define __ECORE_SRIOV_H__
+
+#include "ecore_status.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_iov_api.h"
+#include "ecore_hsi_common.h"
+#include "ecore_l2.h"
+
+#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+
+/* Represents a full message. Both the request filled by VF
+ * and the response filled by the PF. The VF needs one copy
+ * of this message, it fills the request part and sends it to
+ * the PF. The PF will copy the response to the response part for
+ * the VF to later read it. The PF needs to hold a message like this
+ * per VF, the request that is copied to the PF is placed in the
+ * request size, and the response is filled by the PF before sending
+ * it to the VF.
+ */
+struct ecore_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+/* This mailbox is maintained per VF in its PF
+ * contains all information required for sending / receiving
+ * a message
+ */
+struct ecore_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ struct ecore_iov_sw_mbx sw_mbx;
+#endif
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+
+ u8 flags;
+#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
+ * more then one pending msg
+ */
+};
+
+struct ecore_vf_queue_cid {
+ bool b_is_tx;
+ struct ecore_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct ecore_vf_queue {
+ /* Input from upper-layer, mapping relateive queue to queue-zone */
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+
+ struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
+ VF_ENABLED = 2, /* VF, Enabled */
+ VF_RESET = 3, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED = 4 /* VF, Stopped */
+};
+
+struct ecore_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct ecore_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct ecore_vf_info {
+ struct ecore_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ bool b_malicious;
+ u8 to_disable;
+
+ struct ecore_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 rss_eng_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance; /* Number of active vports */
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+
+ struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ /* TODO - Only windows is using it - should be removed */
+ u8 was_malicious;
+ u8 num_active_rxqs;
+ void *ctx;
+ struct ecore_public_vf_info p_vf_info;
+ bool spoof_chk; /* Current configured on HW */
+ bool req_spoofchk_val; /* Requested value */
+
+ /* Stores the configuration requested by VF */
+ struct ecore_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of ecore_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct ecore_pf_iov {
+ struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
+ u64 pending_events[ECORE_VF_ARRAY_LENGTH];
+ u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
+
+#ifndef REMOVE_DBG
+ /* This doesn't serve anything functionally, but it makes windows
+ * debugging of IOV related issues easier.
+ */
+ u64 active_vfs[ECORE_VF_ARRAY_LENGTH];
+#endif
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+ u8 **offset,
+ u16 type,
+ u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
+ void *tlvs_list);
+
+/**
+ * @brief ecore_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param p_dev
+ */
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data);
+
+/**
+ * @brief calculate CRC for bulletin board validation
+ *
+ * @param basic crc seed
+ * @param ptr to beginning of buffer
+ * @length in bytes of buffer
+ *
+ * @return calculated crc over buffer [with respect to seed].
+ */
+u32 ecore_crc32(u32 crc,
+ u8 *ptr,
+ u32 length);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+/**
+ * @brief ecore_iov_get_vf_info - return the database of a
+ * specific VF
+ *
+ * @param p_hwfn
+ * @param relative_vf_id - relative id of the VF for which info
+ * is requested
+ * @param b_enabled_only - false iff want to access even if vf is disabled
+ *
+ * @return struct ecore_vf_info*
+ */
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only);
+#endif
+#endif /* __ECORE_SRIOV_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_status.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_status.h
new file mode 100644
index 00000000..c77ec260
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_status.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_STATUS_H__
+#define __ECORE_STATUS_H__
+
+enum _ecore_status_t {
+ ECORE_CONN_RESET = -13,
+ ECORE_UNKNOWN_ERROR = -12,
+ ECORE_NORESOURCES = -11,
+ ECORE_NODEV = -10,
+ ECORE_ABORTED = -9,
+ ECORE_AGAIN = -8,
+ ECORE_NOTIMPL = -7,
+ ECORE_EXISTS = -6,
+ ECORE_IO = -5,
+ ECORE_TIMEOUT = -4,
+ ECORE_INVAL = -3,
+ ECORE_BUSY = -2,
+ ECORE_NOMEM = -1,
+ ECORE_SUCCESS = 0,
+ /* PENDING is not an error and should be positive */
+ ECORE_PENDING = 1,
+};
+
+#endif /* __ECORE_STATUS_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_utils.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_utils.h
new file mode 100644
index 00000000..034cf1eb
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_utils.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_UTILS_H__
+#define __ECORE_UTILS_H__
+
+/* dma_addr_t manip */
+/* Suppress "right shift count >= width of type" warning when that quantity is
+ * 32-bits rquires the >> 16) >> 16)
+ */
+#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16))
+
+#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
+#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
+
+#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x))
+#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x))
+
+/* It's assumed that whoever includes this has previously included an hsi
+ * file defining the regpair.
+ */
+#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64)
+#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_vf.c b/src/seastar/dpdk/drivers/net/qede/base/ecore_vf.c
new file mode 100644
index 00000000..f4d331cf
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_vf.c
@@ -0,0 +1,1705 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_sriov.h"
+#include "ecore_l2_api.h"
+#include "ecore_vf.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_status.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_l2.h"
+#include "ecore_mcp_api.h"
+#include "ecore_vf_api.h"
+
+static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in ecore_send_msg2pf().
+ * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
+ * must come in sequence.
+ */
+ OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "preparing to send %s tlv over vf pf channel\n",
+ ecore_channel_tlvs_string[type]);
+
+ /* Reset Request offset */
+ p_iov->offset = (u8 *)(p_iov->vf2pf_request);
+
+ /* Clear mailbox - both request and reply */
+ OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
+ enum _ecore_status_t req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
+}
+
+static enum _ecore_status_t
+ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+ u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int time = 100;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ ecore_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
+ " %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
+ *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
+ while ((!*done) && time) {
+ OSAL_MSLEEP(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = ECORE_TIMEOUT;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+ return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x]"
+ " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
+ " vlan [%02x/%02x] mc [%02x/%02x]."
+ " Try PF recommended amount\n",
+ p_req->num_rxqs, p_resp->num_rxqs,
+ p_req->num_rxqs, p_resp->num_txqs,
+ p_req->num_sbs, p_resp->num_sbs,
+ p_req->num_mac_filters, p_resp->num_mac_filters,
+ p_req->num_vlan_filters, p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+}
+
+static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct ecore_vf_acquire_sw_info vf_sw_info;
+ struct vf_pf_resc_request *p_resc;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int attempts = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
+
+ /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+
+ OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
+ OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
+
+ req->vfdev_info.os_type = vf_sw_info.os_type;
+ req->vfdev_info.driver_version = vf_sw_info.driver_version;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "attempting to acquire resources\n");
+
+ /* Clear response buffer, as this might be a re-send */
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0,
+ sizeof(union pfvf_tlvs));
+
+ /* send acquire request */
+ rc = ecore_send_msg2pf(p_hwfn,
+ &resp->hdr.status, sizeof(*resp));
+
+ /* PF timeout */
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to p_hwfn */
+ OSAL_MEMCPY(&p_iov->acquire_resp,
+ resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "resources acquired\n");
+ resources_acquired = true;
+ } /* PF refuses to allocate our resources */
+ else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
+
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn, false,
+ "PF uses an incompatible fastpath HSI"
+ " %02x.%02x [VF requires %02x.%02x]."
+ " Please change to a VF driver using"
+ " %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn, false,
+ "PF uses very old drivers."
+ " Please change to a VF"
+ " driver using no later than"
+ " 8.8.x.x.\n");
+ rc = ECORE_INVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to"
+ " see if it supports FW-version"
+ " override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF rejected acquisition by VF\n");
+ rc = ECORE_INVAL;
+ goto exit;
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned err %d to VF acquisition request\n",
+ resp->hdr.status);
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+ }
+
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
+ rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
+ " status = 0x%x.\n",
+ rc);
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
+ p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
+
+ DP_INFO(p_hwfn, "Chip details - %s%d\n",
+ ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
+
+ p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_INFO(p_hwfn, "100g VF\n");
+ p_hwfn->p_dev->num_hwfns = 2;
+ }
+ }
+
+ /* @DPDK */
+ if ((~p_iov->b_pre_fp_hsi &
+ ETH_HSI_VER_MINOR) &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI;"
+ " %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR,
+ resp->pfdev_info.minor_fp_hsi);
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov;
+ u32 reg;
+
+ /* Set number of hwfns - might be overridden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->num_hwfns = 1;
+
+ /* Set the doorbell bar. Assumption: regview is set */
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sriov'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->
+ vf2pf_request_phys,
+ sizeof(union
+ vfpf_tlvs));
+ if (!p_iov->vf2pf_request) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
+ }
+
+ p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->
+ pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+ if (!p_iov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%lx phys], "
+ "Response mailbox [%p virt 0x%lx phys]\n",
+ p_iov->vf2pf_request,
+ (unsigned long)p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply,
+ (unsigned long)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
+ p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->bulletin.
+ phys,
+ p_iov->bulletin.
+ size);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
+ p_iov->bulletin.size);
+
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+ OSAL_MUTEX_INIT(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+
+ return ecore_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+free_p_iov:
+ OSAL_FREE(p_hwfn->p_dev, p_iov);
+
+ return ECORE_NOMEM;
+}
+
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= (1 << mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= (1 << mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls,
+ struct ecore_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
+ enum ecore_tunn_mode val)
+{
+ if (feature_mask & (1 << val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void
+ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ ECORE_MODE_VXLAN_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ ECORE_MODE_L2GENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ ECORE_MODE_IPGENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ ECORE_MODE_L2GRE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ ECORE_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+}
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc;
+
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ ECORE_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = ECORE_INVAL;
+ }
+
+ ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ u16 rx_qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1; /* Keep initialized, for future compatibility */
+
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->p_dev) +
+ (hw_qid) * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ /* Learn the address of the producer from the response */
+ if (!p_iov->b_pre_fp_hsi) {
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0.
+ * It was actually the PF's responsibility, but since some
+ * old PFs might fail to do so, we do this as well.
+ */
+ OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)&init_prod_val);
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = p_cid->rel.queue_id;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_txq_tlv *req;
+ u16 qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = qid;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
+
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = p_cid->rel.queue_id;
+ req->num_txqs = 1;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct vfpf_update_rxq_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* TODO - API is limited to assuming continuous regions of queues,
+ * but VF queues might not fullfil this requirement.
+ * Need to consider whether we need new TLVs for this, or whether
+ * simply doing it iteratively is good enough.
+ */
+ if (!num_rxqs)
+ return ECORE_INVAL;
+
+again:
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
+
+ /* Find the length of the current contagious range of queues beginning
+ * at first queue's index.
+ */
+ req->rx_qid = (*pp_cid)->rel.queue_id;
+ for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
+ if (pp_cid[req->num_rxqs]->rel.queue_id !=
+ req->rx_qid + req->num_rxqs)
+ break;
+
+ if (comp_cqe_flg)
+ req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
+ if (comp_event_flg)
+ req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ /* Make sure we're done with all the queues */
+ if (req->num_rxqs < num_rxqs) {
+ num_rxqs -= req->num_rxqs;
+ pp_cid += req->num_rxqs;
+ /* TODO - should we give a non-locked variant instead? */
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ goto again;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
+ u16 mtu, u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+ int i;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+ if (p_hwfn->sbs_info[i])
+ req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+static bool
+ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+#ifndef ASIC_ONLY
+ /* FPGA doesn't have PVFC and so can't support tx-switching */
+ return !!(p_data->update_tx_switching_flg &&
+ !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
+#else
+ return !!p_data->update_tx_switching_flg;
+#endif
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
+ tlv, ecore_channel_tlvs_string[tlv]);
+ return false;
+ }
+}
+
+static void
+ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
+ tlv++) {
+ if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV[%d] type %s Configuration %s\n",
+ tlv, ecore_channel_tlvs_string[tlv],
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ enum _ecore_status_t rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_inner_vlan_removal_flg) {
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
+ p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
+ sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct ecore_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+
+ table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+
+ OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ if (p_params->sge_tpa_params) {
+ struct ecore_sge_tpa_params *sge_tpa_params;
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+
+ sge_tpa_params = p_params->sge_tpa_params;
+ size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
+ p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (sge_tpa_params->update_tpa_en_flg)
+ p_sge_tpa_tlv->update_sge_tpa_flags |=
+ VFPF_UPDATE_TPA_EN_FLAG;
+ if (sge_tpa_params->update_tpa_param_flg)
+ p_sge_tpa_tlv->update_sge_tpa_flags |=
+ VFPF_UPDATE_TPA_PARAM_FLAG;
+
+ if (sge_tpa_params->tpa_ipv4_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
+ if (sge_tpa_params->tpa_ipv6_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
+ if (sge_tpa_params->tpa_pkt_split_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
+ if (sge_tpa_params->tpa_hdr_data_split_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_HDR_DATA_SPLIT_FLAG;
+ if (sge_tpa_params->tpa_gro_consistent_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_GRO_CONSIST_FLAG;
+
+ p_sge_tpa_tlv->tpa_max_aggs_num =
+ sge_tpa_params->tpa_max_aggs_num;
+ p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
+ p_sge_tpa_tlv->tpa_min_size_to_start =
+ sge_tpa_params->tpa_min_size_to_start;
+ p_sge_tpa_tlv->tpa_min_size_to_cont =
+ sge_tpa_params->tpa_min_size_to_cont;
+
+ p_sge_tpa_tlv->max_buffers_per_cqe =
+ sge_tpa_params->max_buffers_per_cqe;
+ }
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+
+ p_hwfn->b_int_enabled = 0;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = ECORE_AGAIN;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+ if (p_iov->pf2vf_reply)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct ecore_bulletin_content);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys, size);
+ }
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+
+ return rc;
+}
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd)
+{
+ struct ecore_sp_vport_update_params sp_params;
+ int i;
+
+ OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ OSAL_SET_BIT(bit, sp_params.bins);
+ }
+ }
+
+ ecore_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast
+ *p_ucast)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* Sanitize */
+ if (p_ucast->opcode == ECORE_FILTER_MOVE) {
+ DP_NOTICE(p_hwfn, true,
+ "VFs don't support Moving of filters\n");
+ return ECORE_INVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
+ OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_coalesce *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(*req));
+
+ req->rx_coal = rx_coal;
+ req->tx_coal = tx_coal;
+ req->qid = p_cid->rel.queue_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+ rx_coal, tx_coal, req->qid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+ u8 *p_change)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct ecore_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return ECORE_SUCCESS;
+
+ /* Verify the bulletin we see is valid */
+ crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return ECORE_AGAIN;
+
+ /* Set the shadow bulletin and process it */
+ OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return ECORE_SUCCESS;
+}
+
+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *params)
+{
+ __ecore_vf_get_link_params(p_hwfn, params,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *link)
+{
+ __ecore_vf_get_link_state(p_hwfn, link,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps)
+{
+ __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
+{
+ OSAL_MEMCPY(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
+ ETH_ALEN);
+}
+
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
+}
+
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters)
+{
+ struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info;
+
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
+}
+
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
+ return false;
+
+ return false;
+}
+
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+ u8 *p_is_forced)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
+
+ return true;
+}
+
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port,
+ u16 *p_geneve_port)
+{
+ struct ecore_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return false;
+
+ if (dst_pvid)
+ *dst_pvid = bulletin->pvid;
+
+ return true;
+}
+
+bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->vf_iov_info->b_pre_fp_hsi;
+}
+
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
+ u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_vf.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_vf.h
new file mode 100644
index 00000000..f4713884
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_vf.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_H__
+#define __ECORE_VF_H__
+
+#include "ecore_status.h"
+#include "ecore_vf_api.h"
+#include "ecore_l2_api.h"
+#include "ecore_vfpf_if.h"
+
+/* This data is held in the ecore_hwfn structure for VFs only. */
+struct ecore_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ osal_mutex_t mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct ecore_bulletin bulletin;
+ struct ecore_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
+};
+
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the configuration.
+ *
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param queue_cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid);
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param p_cid - Only relative fields are relevant
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fasthpath
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param cqe_completion
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief VF - update the RX queue by sending a message to the
+ * PF
+ *
+ * @param p_hwfn
+ * @param pp_cid - list of queue-cids which we want to update
+ * @param num_rxqs
+ * @param comp_cqe_flg
+ * @param comp_event_flg
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
+#endif
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params);
+
+/**
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id);
+
+
+/**
+ * @brief ecore_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_start(
+ struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged);
+
+/**
+ * @brief ecore_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast *p_param);
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps,
+ struct ecore_bulletin_content *p_bulletin);
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn);
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
+#endif
+#endif /* __ECORE_VF_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_vf_api.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_vf_api.h
new file mode 100644
index 00000000..be3a326b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_vf_api.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_API_H__
+#define __ECORE_VF_API_H__
+
+#include "ecore_sp_api.h"
+#include "ecore_mcp_api.h"
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+ u8 *p_change);
+
+/**
+ * @brief Get link parameters for VF from ecore
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from ecore
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from ecore
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_rxqs);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
+ u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs);
+
+/**
+ * @brief Get number of MAC filters allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated MAC filters
+ */
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief Copy forced MAC address from bulletin board
+ *
+ * @param hwfn
+ * @param dst_mac
+ * @param p_is_forced - out param which indicate in case mac
+ * exist if it forced or not.
+ *
+ * @return bool - return true if mac exist and false if
+ * not.
+ */
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+ u8 *p_is_forced);
+
+/**
+ * @brief Check if force vlan is set and copy the forced vlan
+ * from bulletin board
+ *
+ * @param hwfn
+ * @param dst_pvid
+ * @return bool
+ */
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
+
+/**
+ * @brief Check if VF is based on PF whose driver is pre-fp-hsi version;
+ * This affects the fastpath implementation of the driver.
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true iff PF is pre-fp-hsi version.
+ */
+bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn);
+
+#endif
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire
+ * response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major,
+ u16 *fw_minor,
+ u16 *fw_rev,
+ u16 *fw_eng);
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port);
+#endif
+#endif
diff --git a/src/seastar/dpdk/drivers/net/qede/base/ecore_vfpf_if.h b/src/seastar/dpdk/drivers/net/qede/base/ecore_vfpf_if.h
new file mode 100644
index 00000000..66184421
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/ecore_vfpf_if.h
@@ -0,0 +1,619 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_PF_IF_H__
+#define __ECORE_VF_PF_IF_H__
+
+/* @@@ TBD MichalK this should be HSI? */
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */
+
+/***********************************************
+ *
+ * Common definitions for all HVs
+ *
+ **/
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+ u16 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+ u8 padding[5];
+};
+
+/***********************************************
+ *
+ * HW VF-PF channel definitions
+ *
+ * A.K.A VF-PF mailbox
+ *
+ **/
+#define TLV_BUFFER_SIZE 1024
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#ifndef LINUX_REMOVE
+ /* First bit was used on 8.7.x and 8.8.x versions, which had different
+ * FWs used but with the same faspath HSI. As this was prior to the
+ * fastpath versioning, wanted to have ability to override fw matching
+ * and allow them to interact.
+ */
+#endif
+/* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+ #define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
+ #define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
+ #define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
+ #define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0)
+#define PFVF_ACQUIRE_CAP_100G (1 << 1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#ifndef LINUX_REMOVE
+/* Said bug was in quest/serpens; Can't be certain no official release included
+ * the bug since the fix arrived very late in the programs.
+ */
+#endif
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's ecore_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ u8 padding;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
+ } pfdev_info;
+
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+ #define PFVF_MAX_QUEUES_PER_VF 16
+ #define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+ #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
+ #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
+ #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+ #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+ #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+ #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+ #define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
+ #define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
+ #define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
+ #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
+ #define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
+
+ u8 update_sge_tpa_flags;
+ #define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
+ #define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
+ #define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+struct vfpf_update_coalesce {
+ struct vfpf_first_tlv first_tlv;
+ u16 rx_coal;
+ u16 tx_coal;
+ u16 qid;
+ u8 padding[2];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
+ struct vfpf_update_coalesce update_coalesce;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+enum ecore_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+
+ /* The VF should not access the vfpf channel */
+ VFPF_CHANNEL_INVALID = 1,
+
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct ecore_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of ecore_mcp_link_state,
+ * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct ecore_bulletin {
+ dma_addr_t phys;
+ struct ecore_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_COALESCE_UPDATE,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+};
+extern const char *ecore_channel_tlvs_string[];
+
+#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/eth_common.h b/src/seastar/dpdk/drivers/net/qede/base/eth_common.h
new file mode 100644
index 00000000..6dc969b0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/eth_common.h
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+
+/* FP HSI version. FP HSI is compatible if (fwVer.major == drvVer.major &&
+ * fwVer.minor >= drvVer.minor)
+ */
+/* ETH FP HSI Major version */
+#define ETH_HSI_VER_MAJOR 3
+/* ETH FP HSI Minor version */
+#define ETH_HSI_VER_MINOR 10
+
+/* Alias for 8.7.x.x/8.8.x.x ETH FP HSI MINOR version. In this version driver
+ * is not required to set pkt_len field in eth_tx_1st_bd struct, and tunneling
+ * offload is not supported.
+ */
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP
+ * header (relevant to LSO for tunneled packet):
+ */
+/* Offset is limited to 253 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+/* Offset is limited to 251 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+/* (QM_REG_TASKBYTECRDCOST_0, QM_VOQ_BYTE_CRD_TASK_COST) -
+ * (VLAN-TAG + CRC + IPG + PREAMBLE)
+ */
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+/* Number of BDs to consider for LSO sliding window restriction is
+ * (ETH_TX_LSO_WINDOW_BDS_NUM - hdr_nbd)
+ */
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+/* Minimum data length (in bytes) in LSO sliding window */
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+/* Maximum LSO packet TCP payload length (in bytes) */
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+/* Number of same-as-last resources in tx switching */
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+/* Value for a connection for which same as last feature is disabled */
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
+
+/* Maximum number of statistics counters */
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+/* Maximum number of statistics counters when doubled VF zone used */
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+/* Maximum number of statistics counters when quad VF zone used */
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
+
+/* Maximum number of buffers, used for RX packet placement */
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
+
+/* approx. multicast constants */
+/* CRC seed for multicast bin calculation */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+
+/* ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+/* number of RSS indirection table entries, per Vport) */
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+/* Length of RSS key (in regs) */
+#define ETH_RSS_KEY_SIZE_REGS 10
+/* number of available RSS engines in K2 */
+#define ETH_RSS_ENGINE_NUM_K2 207
+/* number of available RSS engines in BB */
+#define ETH_RSS_ENGINE_NUM_BB 127
+
+/* TPA constants */
+/* Maximum number of open TPA aggregations */
+#define ETH_TPA_MAX_AGGS_NUM 64
+/* Maximum number of additional buffers, reported by TPA-start CQE */
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+/* Maximum number of buffers, reported by TPA-continue CQE */
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+/* Maximum number of buffers, reported by TPA-end CQE */
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+
+/* Control frame check constants */
+/* Number of etherType values configured by driver for control frame check */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+
+
+
+/*
+ * Destination port mode
+ */
+enum dest_port_mode {
+ DEST_PORT_PHY /* Send to physical port. */,
+ DEST_PORT_LOOPBACK /* Send to loopback port. */,
+ DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
+ DEST_PORT_DROP /* Drop the packet in PBF. */,
+ MAX_DEST_PORT_MODE
+};
+
+
+/*
+ * Ethernet address type
+ */
+enum eth_addr_type {
+ BROADCAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ UNICAST_ADDRESS,
+ UNKNOWN_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+struct eth_tx_1st_bd_flags {
+ u8 bitfields;
+/* Set to 1 in the first BD. (for debug) */
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+/* Do not allow additional VLAN manipulations on this packet. */
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+/* Recalculate TCP/UDP checksum.
+ * For tunneled packet - relevant to inner header.
+ */
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+/* If set, insert VLAN tag from vlan field to the packet.
+ * For tunneled packet - relevant to outer header.
+ */
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of
+ * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively)
+ * bytes, inclusive.
+ */
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+};
+
+/*
+ * The parsing information data for the first tx bd of a given packet.
+ */
+struct eth_tx_data_1st_bd {
+/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */
+ __le16 vlan;
+/* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least
+ * 3 in LSO (or Tunnel with IPv6+ext) packet.
+ */
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 bitfields;
+/* Indicates a tunneled packet. Must be set for encapsulated packet. */
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+/* Total packet length - must be filled for non-LSO packets. */
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
+};
+
+/*
+ * The parsing information data for the second tx bd of a given packet.
+ */
+struct eth_tx_data_2nd_bd {
+/* For tunnel with IPv6+ext - Tunnel header IP datagram length (in BYTEs) */
+ __le16 tunn_ip_size;
+ __le16 bitfields1;
+/* For Tunnel header with IPv6 ext. - Inner L2 Header Size (in 2-byte WORDs) */
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+/* For Tunnel header with IPv6 ext. - Inner L2 Header MAC DA Type
+ * (use enum eth_addr_type)
+ */
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+/* Destination port mode. (use enum dest_port_mode) */
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+/* For Tunnel header with IPv6 ext. - Tunnel Type (use enum eth_tx_tunn_type) */
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension.
+ * Otherwise set to 1 if the header is IPv6 with extension.
+ */
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD
+ * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext)
+ */
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
+ * inner or outer Ipv6+ext) and l4_csum is set)
+ */
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+/* The pseudo header checksum type in the L4 checksum field. Required when
+ * IPv6+ext and l4_csum is set. (use enum eth_l4_pseudo_checksum_mode)
+ */
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+ __le16 bitfields2;
+/* For inner/outer header IPv6+ext - (inner) L4 header offset (in 2-byte WORDs).
+ * For regular packet - offset from the beginning of the packet. For tunneled
+ * packet - offset from the beginning of the inner header
+ */
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+};
+
+/*
+ * Firmware data for L2-EDPM packet.
+ */
+struct eth_edpm_fw_data {
+/* Parsing information data from the 1st BD. */
+ struct eth_tx_data_1st_bd data_1st_bd;
+/* Parsing information data from the 2nd BD. */
+ struct eth_tx_data_2nd_bd data_2nd_bd;
+ __le32 reserved;
+};
+
+
+/*
+ * FW debug.
+ */
+struct eth_fast_path_cqe_fw_debug {
+ __le16 reserved2 /* FW reserved. */;
+};
+
+
+/*
+ * tunneling parsing flags
+ */
+struct eth_tunnel_parsing_flags {
+ u8 flags;
+/* 0 - no tunneling, 1 - GENEVE, 2 - GRE, 3 - VXLAN
+ * (use enum eth_rx_tunn_type)
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+/* If it s not an encapsulated packet then put 0x0. If it s an encapsulated
+ * packet but the tenant-id doesn t exist then put 0x0. Else put 0x1
+ *
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+/* Type of the next header above the tunneling: 0 - unknown, 1 - L2, 2 - Ipv4,
+ * 3 - IPv6 (use enum tunnel_next_protocol)
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+/* The result of comparing the DA-ip of the tunnel header. */
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
+
+/*
+ * PMD flow control bits
+ */
+struct eth_pmd_flow_flags {
+ u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 /* CQE valid bit */
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 /* CQE ring toggle bit */
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+};
+
+/*
+ * Regular ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_reg_cqe {
+ u8 type /* CQE type */;
+ u8 bitfields;
+/* Type of calculated RSS hash (use enum rss_hash_type) */
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+/* Traffic Class */
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+ __le16 pkt_len /* Total packet length (from the parser) */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag /* 802.1q VLAN tag */;
+ __le32 rss_hash /* RSS hash result */;
+ __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+ u8 placement_offset /* Offset of placement from BD start */;
+/* Tunnel Parsing Flags */
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
+ u8 bd_num /* Number of BDs, used for packet */;
+ u8 reserved[9];
+ struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+ u8 reserved1[3];
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * TPA-continue ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_cont_cqe {
+ u8 type /* CQE type */;
+ u8 tpa_agg_index /* TPA aggregation index */;
+/* List of the segment sizes */
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved;
+ u8 reserved1 /* FW reserved. */;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
+ u8 reserved3[3];
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * TPA-end ETH Rx FP CQE .
+ */
+struct eth_fast_path_rx_tpa_end_cqe {
+ u8 type /* CQE type */;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ __le16 total_packet_len /* Total aggregated packet length */;
+ u8 num_of_bds /* Total number of BDs comprising the packet */;
+/* Aggregation end reason. Use enum eth_tpa_end_reason */
+ u8 end_reason;
+ __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
+ __le32 ts_delta /* TCP timestamp delta */;
+/* List of the segment sizes */
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
+ __le16 reserved1;
+ u8 reserved2 /* FW reserved. */;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * TPA-start ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_start_cqe {
+ u8 type /* CQE type */;
+ u8 bitfields;
+/* Type of calculated RSS hash (use enum rss_hash_type) */
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+/* Traffic Class */
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+ __le16 seg_len /* Segment length (packetLen from the parser) */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag /* 802.1q VLAN tag */;
+ __le32 rss_hash /* RSS hash result */;
+ __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+ u8 placement_offset /* Offset of placement from BD start */;
+/* Tunnel Parsing Flags */
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ u8 header_len /* Packet L2+L3+L4 header length */;
+/* Additional BDs length list. */
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+ u8 reserved;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+
+/*
+ * The L4 pseudo checksum mode for Ethernet
+ */
+enum eth_l4_pseudo_checksum_mode {
+/* Pseudo Header checksum on packet is calculated with the correct packet length
+ * field.
+ */
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+/* Pseudo Header checksum on packet is calculated with zero length field. */
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+
+
+struct eth_rx_bd {
+ struct regpair addr /* single continues buffer */;
+};
+
+
+/*
+ * regular ETH Rx SP CQE
+ */
+struct eth_slow_path_rx_cqe {
+ u8 type /* CQE type */;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[25];
+ __le16 echo;
+ u8 reserved1;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
+};
+
+/*
+ * union for all ETH Rx CQE types
+ */
+union eth_rx_cqe {
+/* Regular FP CQE */
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+/* TPA-start CQE */
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+/* TPA-continue CQE */
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+/* TPA-end CQE */
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+ struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
+};
+
+
+/*
+ * ETH Rx CQE type
+ */
+enum eth_rx_cqe_type {
+ ETH_RX_CQE_TYPE_UNUSED,
+ ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Wrapper for PD RX CQE - used in order to cover full cache line when writing
+ * CQE
+ */
+struct eth_rx_pmd_cqe {
+ union eth_rx_cqe cqe /* CQE data itself */;
+ u8 reserved[ETH_RX_CQE_GAP];
+};
+
+
+/*
+ * Eth RX Tunnel Type
+ */
+enum eth_rx_tunn_type {
+ ETH_RX_NO_TUNN /* No Tunnel. */,
+ ETH_RX_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_RX_TUNN_GRE /* GRE Tunnel. */,
+ ETH_RX_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_RX_TUNN_TYPE
+};
+
+
+
+/*
+ * Aggregation end reason.
+ */
+enum eth_tpa_end_reason {
+ ETH_AGG_END_UNUSED,
+ ETH_AGG_END_SP_UPDATE /* SP configuration update */,
+/* Maximum aggregation length or maximum buffer number used. */
+ ETH_AGG_END_MAX_LEN,
+/* TCP PSH flag or TCP payload length below continue threshold. */
+ ETH_AGG_END_LAST_SEG,
+ ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
+/* Packet header not consistency: different IPv4 TOS, TTL or flags, IPv6 TC,
+ * Hop limit or Flow label, TCP header length or TS options. In GRO different
+ * TS value, SMAC, DMAC, ackNum, windowSize or VLAN
+ */
+ ETH_AGG_END_NOT_CONSISTENT,
+/* Out of order or retransmission packet: sequence, ack or timestamp not
+ * consistent with previous segment.
+ */
+ ETH_AGG_END_OUT_OF_ORDER,
+/* Next segment cant be aggregated due to LLC/SNAP, IP error, IP fragment, IPv4
+ * options, IPv6 extension, IP ECN = CE, TCP errors, TCP options, zero TCP
+ * payload length , TCP flags or not supported tunnel header options.
+ */
+ ETH_AGG_END_NON_TPA_SEG,
+ MAX_ETH_TPA_END_REASON
+};
+
+
+
+/*
+ * The first tx bd of a given packet
+ */
+struct eth_tx_1st_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_1st_bd data /* Parsing information data. */;
+};
+
+
+
+/*
+ * The second tx bd of a given packet
+ */
+struct eth_tx_2nd_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_2nd_bd data /* Parsing information data. */;
+};
+
+
+/*
+ * The parsing information data for the third tx bd of a given packet.
+ */
+struct eth_tx_data_3rd_bd {
+ __le16 lso_mss /* For LSO packet - the MSS in bytes. */;
+ __le16 bitfields;
+/* For LSO with inner/outer IPv6+ext - TCP header length (in 4-byte WORDs) */
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+/* LSO - number of BDs which contain headers. value should be in range
+ * (1..ETH_TX_MAX_LSO_HDR_NBD).
+ */
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+/* For tunnel with IPv6+ext - Pointer to tunnel L4 Header (in 2-byte WORDs) */
+ u8 tunn_l4_hdr_start_offset_w;
+/* For tunnel with IPv6+ext - Total size of Tunnel Header (in 2-byte WORDs) */
+ u8 tunn_hdr_size_w;
+};
+
+/*
+ * The third tx bd of a given packet
+ */
+struct eth_tx_3rd_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_3rd_bd data /* Parsing information data. */;
+};
+
+
+/*
+ * Complementary information for the regular tx bd of a given packet.
+ */
+struct eth_tx_data_bd {
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
+/*
+ * The common regular TX BD ring element
+ */
+struct eth_tx_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_bd data /* Complementary information. */;
+};
+
+
+union eth_tx_bd_types {
+ struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
+/* The second tx bd of a given packet */
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
+ struct eth_tx_bd reg_bd /* The common non-special bd */;
+};
+
+
+
+
+
+
+/*
+ * Eth Tx Tunnel Type
+ */
+enum eth_tx_tunn_type {
+ ETH_TX_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_TX_TUNN_TTAG /* T-Tag Tunnel. */,
+ ETH_TX_TUNN_GRE /* GRE Tunnel. */,
+ ETH_TX_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_TX_TUNN_TYPE
+};
+
+
+/*
+ * Ystorm Queue Zone
+ */
+struct xstorm_eth_queue_zone {
+/* Tx interrupt coalescing TimeSet */
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[7];
+};
+
+
+/*
+ * ETH doorbell data
+ */
+struct eth_db_data {
+ u8 params;
+/* destination of doorbell (use enum db_dest) */
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+/* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+/* aggregative value selection */
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+/* bit for every DQ counter flags in CM context that DQ can increment */
+ u8 agg_flags;
+ __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/mcp_public.h b/src/seastar/dpdk/drivers/net/qede/base/mcp_public.h
new file mode 100644
index 00000000..fcf98477
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/mcp_public.h
@@ -0,0 +1,1741 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name: mcp_public.h
+ *
+ * Description: MCP public data
+ *
+ * Created: 13/01/2013 yanivr
+ *
+ ****************************************************************************/
+
+#ifndef MCP_PUBLIC_H
+#define MCP_PUBLIC_H
+
+#define VF_MAX_STATIC 192 /* In case of AH */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) \
+ ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* SECTION_SIZE is calculating the size in bytes out of offsize */
+#define SECTION_SIZE(_offsize) \
+ (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section
+ */
+#define SECTION_ADDR(_offsize, idx) \
+ (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. Use
+ * offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
+/* PHY configuration */
+struct eth_phy_cfg {
+/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause; /* bitmask */
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE (0)
+/* Serdes loopback. In AH, it refers to Near End */
+#define ETH_LOOPBACK_INT_PHY (1)
+#define ETH_LOOPBACK_EXT_PHY (2) /* External PHY Loopback */
+/* External Loopback (Require loopback plug) */
+#define ETH_LOOPBACK_EXT (3)
+#define ETH_LOOPBACK_MAC (4) /* MAC Loopback - not supported */
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 (5) /* Port to itself */
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 (6) /* Port to Port */
+#define ETH_LOOPBACK_PCS_AH_ONLY (7) /* PCS loopback (TX to RX) */
+/* Loop RX packet from PCS to TX */
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY (8)
+/* Remote Serdes Loopback (RX to TX) */
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * 16xmicroseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in 16xmicroseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_TIMER_USEC_MASK (0x000fffff)
+#define EEE_MODE_TIMER_USEC_OFFSET (0)
+#define EEE_MODE_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_MODE_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_TIMER_USEC_LATENCY_TIME (0x6000)
+/* Set by the driver to request status timer will be in microseconds and and not
+ * in EEE policy definition
+ */
+#define EEE_MODE_OUTPUT_TIME (1 << 28)
+/* Set by the driver to override default nvm timer */
+#define EEE_MODE_OVERRIDE_NVRAM (1 << 29)
+#define EEE_MODE_ENABLE_LPI (1 << 30) /* Set when */
+#define EEE_MODE_ADV_LPI (1 << 31) /* Set when EEE is enabled */
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct eth_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter*/
+ u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter*/
+ u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
+/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
+ u64 r1518;
+/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
+ u64 r1522;
+ u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+ u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+ u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
+ u64 r16383;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
+ u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter*/
+ u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter*/
+ u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
+/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
+ u64 t1518;
+/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
+ u64 t2047;
+/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
+ u64 t4095;
+/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
+ u64 t9216;
+/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
+ u64 t16383;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ u64 tlpiec;
+ u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+/* 0x22 (Offset 0x138) RX good frame (good CRC, not oversized, no ERROR) */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+/* HSI - Cannot add more stats to this struct. If needed, then need to open new
+ * struct
+ */
+
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct eth_stats eth;
+};
+
+/*----+------------------------------------------------------------------------
+ * C | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines
+ * h | rate of | team #1 | team #2 |are used|per path | (paths)
+ * i | physical | | | | | enabled
+ * p | ports | | | | |
+ *====+============+=========+=========+========+==========+===================
+ * BB | 1x100G | This is special mode, where there are actually 2 HW func
+ * BB | 2x10/20Gbps| 0,1 | NA | No | 1 | 1
+ * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1
+ * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1
+ * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1
+ * AH | 2x10/20Gbps| 0,1 | NA | NA | 1 | NA
+ * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA
+ *====+============+=========+=========+========+==========+===================
+ */
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM (1 << 0)
+
+#define PORT_CMT_PORT_ROLE (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE (1 << 1)
+
+#define PORT_CMT_TEAM_MASK (1 << 2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 (1 << 2)
+};
+
+/**************************************
+ * LLDP and DCBX HSI structures
+ **************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+
+typedef enum _lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+} lldp_agent_e;
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ /* Holds local Chassis ID TLV header, subtype and 9B of payload.
+ * If firtst byte is 0, then we will use default chassis ID
+ */
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID TLV header, subtype and 9B of payload.
+ * If firtst byte is 0, then we will use default port ID
+ */
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 pri_tc_tbl[1];
+/* Fixed TCP OOO TC usage is deprecated and used only for driver backward
+ * compatibility
+ */
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 tc_bw_tbl[2];
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+ /* Not in use
+ #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ /*
+ #define DCBX_CONFIG_VERSION_MASK 0x00000007
+ #define DCBX_CONFIG_VERSION_SHIFT 0
+ #define DCBX_CONFIG_VERSION_DISABLED 0
+ #define DCBX_CONFIG_VERSION_IEEE 1
+ #define DCBX_CONFIG_VERSION_CEE 2
+ #define DCBX_CONFIG_VERSION_STATIC 4
+ */
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path; /* 32bit is wasty, but this will be used often */
+/* (Global) 32bit is wasty, but this will be used often */
+ u32 max_ports;
+#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+/* Temperature in Celcius (-255C / +255C), measured every second. */
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+#define MDUMP_REASON_INTERNAL_ERROR (1 << 0)
+#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1)
+#define MDUMP_REASON_DUMP_AGED (1 << 2)
+ u32 ext_phy_upgrade_fw;
+#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
+#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
+#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
+#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
+#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
+#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */
+
+/* Reset on mcp reset, and incremented for eveny process kill event. */
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+/* yaniv - tbd ? license */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+/* Points to struct eth_phy_cfg (For READ-ONLY) */
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_XFP_FIBER 0x2 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN (1 << 0)
+#define LFA_FORCE_INIT (1 << 1)
+#define LFA_LOOPBACK_MISMATCH (1 << 2)
+#define LFA_SPEED_MISMATCH (1 << 3)
+#define LFA_FLOW_CTRL_MISMATCH (1 << 4)
+#define LFA_ADV_SPEED_MISMATCH (1 << 5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+/* offset: 536 bytes? */
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+/* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+/* 1G Passive copper cable */
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+/* 1G Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+/* 10G Passive copper cable */
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+/* 10G Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+/* Active optical cable */
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+/* Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+/* 25G Passive copper cable - short */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+/* 25G Active copper cable - short */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+/* 25G Passive copper cable - medium */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+/* 25G Active copper cable - medium */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+/* 25G Passive copper cable - long */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+/* 25G Active copper cable - long */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+
+ /* the status of EEE auto-negotiation
+ * bits 19:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if 30:29
+ * are 2'b11.
+ * bit 31 - When 1'b0 bits 15:0 contain
+ * NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_XXX define as value.
+ * When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status;
+#define EEE_TIMER_MASK 0x000fffff
+#define EEE_ADV_STATUS_MASK 0x00f00000
+#define EEE_1G_ADV (1 << 1)
+#define EEE_10G_ADV (1 << 2)
+#define EEE_ADV_STATUS_SHIFT 20
+#define EEE_LP_ADV_STATUS_MASK 0x0f000000
+#define EEE_LP_ADV_STATUS_SHIFT 24
+#define EEE_REQUESTED_BIT 0x10000000
+#define EEE_LPI_REQUESTED_BIT 0x20000000
+#define EEE_ACTIVE_BIT 0x40000000
+#define EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 eee_remote; /* Used for EEE in LLDP */
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_SHIFT 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_SHIFT 16
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ /* MTU size per funciton is needed for the OV feature */
+ u32 mtu_size;
+/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower;
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper;
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default;
+
+ u32 reserved[4];
+
+ /* replace old mf_cfg */
+ u32 config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define LOAD_REQ_HSI_VERSION 2
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/* statistics for ncsi */
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+#define MFW_SENSOR_LOCATION_INTERNAL 1
+#define MFW_SENSOR_LOCATION_EXTERNAL 2
+#define MFW_SENSOR_LOCATION_SFP 3
+
+#define SENSOR_LOCATION_SHIFT 0
+#define SENSOR_LOCATION_MASK 0x000000ff
+#define THRESHOLD_HIGH_SHIFT 8
+#define THRESHOLD_HIGH_MASK 0x0000ff00
+#define CRITICAL_TEMPERATURE_SHIFT 16
+#define CRITICAL_TEMPERATURE_MASK 0x00ff0000
+#define CURRENT_TEMP_SHIFT 24
+#define CURRENT_TEMP_MASK 0xff000000
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+/* Not a real resource!! it's a factor used to calculate others */
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+/* Not a real resource!! it's a factor used to calculate others */
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT (1 << 0)
+};
+
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
+union drv_union_data {
+ struct mcp_mac wol_mac; /* UNLOAD_DONE */
+
+/* This configuration should be set by the driver for the LINK_SET command. */
+
+ struct eth_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc iscsi_stats;
+ struct rdma_stats_stc rdma_stats;
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ /* ... */
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+ /* Vitaly: LLDP commands */
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+ /* OneView feature driver HSI*/
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp,
+ * data: struct resource_info
+ */
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+
+/*deprecated don't use*/
+#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+/* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+/* Param should be set to the transaction size (up to 64 bytes) */
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+/* MFW will place the file offset and len in file_att struct */
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+/* Read 32bytes of nvram data. Param is [0:23] – Offset [24:31] –
+ * Len in Bytes
+ */
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+/* Writes up to 32Bytes to nvram. Param is [0:23] – Offset [24:31] –
+ * Len in Bytes. In case this address is in the range of secured file in
+ * secured mode, the operation will fail
+ */
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+/* Delete a file from nvram. Param is image_type. */
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+/* Reset MCP when no NVM operation is going on, and no drivers are loaded.
+ * In case operation succeed, MCP will not ack back.
+ */
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+/* Temporary command to set secure mode, where the param is 0 (None secure) /
+ * 1 (Secure) / 2 (Full-Secure)
+ */
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane,
+ * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port,
+ * [30:31] - port
+ */
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane,
+ * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port,
+ * [30:31] - port
+ */
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+/* Param: [0:15] - Address, [30:31] - port */
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+/* Param: [0:15] - Address, [30:31] - port */
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+/* Param: [0:3] - version, [4:15] - name (null terminated) */
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+/* Halts the MCP. To resume MCP, user will need to use
+ * MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers.
+ */
+#define DRV_MSG_CODE_MCP_HALT 0x00100000
+/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
+ * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
+ */
+#define DRV_MSG_CODE_SET_VMAC 0x00110000
+/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
+ * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
+ */
+#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
+/* indicate OCBB related information */
+#define DRV_MSG_CODE_OCBB_DATA 0x00180000
+/* Set function BW, params[15:8] - min, params[7:0] - max */
+#define DRV_MSG_CODE_SET_BW 0x00190000
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_SHIFT 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_SHIFT 8
+
+/* When param is set to 1, all parities will be masked(disabled). When params
+ * are set to 0, parities will be unmasked again.
+ */
+#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
+/* param[0] - Simulate fan failure, param[1] - simulate over temp. */
+#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
+#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_READ 0x001c0000
+/* Param: [0:15] - gpio number, [16:31] - gpio value */
+#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
+/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
+#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000
+
+/* Set LED mode params :0 operational, 1 LED turn ON, 2 LED turn OFF */
+#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+/* drv_data[7:0] - EPOC in seconds, drv_data[15:8] -
+ * driver version (MAJ MIN BUILD SUB)
+ */
+#define DRV_MSG_CODE_TIMESTAMP 0x00210000
+/* This is an empty mailbox just return OK*/
+#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+
+/* Param[0:4] - resource number (0-31), Param[5:7] - opcode,
+ * param[15:8] - age
+ */
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+/* request resource ownership with default aging */
+#define RESOURCE_OPCODE_REQ 1
+/* request resource ownership without aging */
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+/* request resource ownership with specific aging timer (in seconds) */
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
+/* force resource release */
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+/* resource is free and granted to requester */
+#define RESOURCE_OPCODE_GNT 1
+/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
+ * 16 = MFW, 17 = diag over serial
+ */
+#define RESOURCE_OPCODE_BUSY 2
+/* indicate release request was acknowledged */
+#define RESOURCE_OPCODE_RELEASED 3
+/* indicate release request was previously received by other owner */
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+/* indicate wrong owner during release */
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+/* dedicate resource 0 for dump */
+#define RESOURCE_DUMP 0
+
+#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+/* acknowledge reception of error indication */
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+/* set epoc and personality as follow: drv_data[3:0] - epoch,
+ * drv_data[7:4] - personality
+ */
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+/* trigger crash dump procedure */
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+/* Request valid logs and config words */
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+/* Set triggers mask. drv_mb_param should indicate (bitwise) which
+ * trigger enabled
+ */
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+/* Clear all logs */
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_INFO 0x00270000
+/* Value will be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
+/* Value should be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT 0
+#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
+#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
+#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
+ /* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+ /* OneView configuration parametres */
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP 4
+#define DRV_MB_PARAM_OV_CURR_CFG_CNU 5
+#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
+#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
+
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND (1 << 2)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_CHAP_SUCCESS (1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_LUN_FOUND (1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT (1 << 4)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED (1 << 5)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
+
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+/* Not Installed*/
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+/* installed but disabled by user/admin/OS */
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+/* installed and active */
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
+#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT 0
+#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
+#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16
+#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
+#define DRV_MB_PARAM_GPIO_DIRECTION_SHIFT 16
+#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000
+#define DRV_MB_PARAM_GPIO_CTRL_SHIFT 24
+#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000
+
+ /* Resource Allocation params - Driver version support*/
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000
+#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000
+#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE 0x28000000
+#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE 0x29000000
+#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0x31000000
+#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000
+#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE 0x33000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_GEN_ERR 0x37000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+#define FW_MSG_CODE_SET_DRIVER_DONE 0x02200000
+#define FW_MSG_CODE_SET_VMAC_SUCCESS 0x02300000
+#define FW_MSG_CODE_SET_VMAC_FAIL 0x02400000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+/* MFW reject "mcp reset" command if one of the drivers is up */
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_NVM_FAILED_CALC_HASH 0x00310000
+#define FW_MSG_CODE_NVM_PUBLIC_KEY_MISSING 0x00320000
+#define FW_MSG_CODE_NVM_INVALID_PUBLIC_KEY 0x00330000
+
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
+#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000
+#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_GPIO_OK 0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
+#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
+#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000
+#define FW_MSG_CODE_EXTPHY_INVALID_IMAGE_HEADER 0x00700000
+#define FW_MSG_CODE_EXTPHY_INVALID_PHY_TYPE 0x00710000
+#define FW_MSG_CODE_EXTPHY_OPERATION_FAILED 0x00720000
+#define FW_MSG_CODE_EXTPHY_NO_PHY_DETECTED 0x00730000
+#define FW_MSG_CODE_RECOVERY_MODE 0x00740000
+
+ /* mdump related response codes */
+#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000
+#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+#define FW_MSG_CODE_MDUMP_IN_PROGRESS 0x00040000
+#define FW_MSG_CODE_MDUMP_WRITE_FAILED 0x00050000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+
+ u32 fw_mb_param;
+/* Resource Allocation params - MFW version support */
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response
+ */
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+/* The union data is used by the driver to pass parameters to the scratchpad. */
+
+ union drv_union_data union_data;
+
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+ * Description
+ * Incremental Aggregative
+ * 8-bit MFW counter per message
+ * 8-bit ack-counter per message
+ * Capabilities
+ * Provides up to 256 aggregative message per type
+ * Provides 4 message types in dword
+ * Message type pointers to byte offset
+ * Backward Compatibility by using sizeof for the counters.
+ * No lock requires for 32bit messages
+ * Limitations:
+ * In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+ * is required to prevent data corruption.
+ **********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+#ifdef BIG_ENDIAN /* Like MFW */
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[msg_id]++;)
+#else
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;)
+#endif
+
+#define MFW_DRV_UPDATE(shmem_func, msg_id) \
+((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;)
+
+struct public_mfw_mb {
+ u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */
+/* Incremented by the MFW */
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+/* Incremented by the driver */
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+#endif /* MCP_PUBLIC_H */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/nvm_cfg.h b/src/seastar/dpdk/drivers/net/qede/base/nvm_cfg.h
new file mode 100644
index 00000000..4e588350
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/nvm_cfg.h
@@ -0,0 +1,1937 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name: nvm_cfg.h
+ *
+ * Description: NVM config file - Generated file from nvm cfg excel.
+ * DO NOT MODIFY !!!
+ *
+ * Created: 12/15/2016
+ *
+ ****************************************************************************/
+
+#ifndef NVM_CFG_H
+#define NVM_CFG_H
+
+#define NVM_CFG_version 0x81805
+
+#define NVM_CFG_new_option_seq 15
+
+#define NVM_CFG_removed_option_seq 0
+
+#define NVM_CFG_updated_value_seq 1
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+ #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+ #define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ u32 mac_addr_lo;
+};
+
+/******************************************
+ * nvm_cfg1 structs
+ ******************************************/
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+ #define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+ #define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+ #define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+ #define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+ #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+ #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+ #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+ #define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+ #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+ #define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+ #define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+ #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+ #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+ #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+ #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+ #define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+ #define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+ #define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_DISABLED \
+ 0x0
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_ENABLED 0x1
+ u32 engineering_change[3]; /* 0x4 */
+ u32 manufacturing_id; /* 0x10 */
+ u32 serial_number[4]; /* 0x14 */
+ u32 pcie_cfg; /* 0x24 */
+ #define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+ #define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+ #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK \
+ 0x00000020
+ #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5
+ #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+ #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+ /* Set the duration, in sec, fan failure signal should be sampled */
+ #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31
+ u32 mgmt_traffic; /* 0x28 */
+ #define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+ #define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+ #define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+ #define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000
+ #define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27
+ #define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0
+ #define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1
+ /* Indicates whether external thermal sonsor is available */
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1
+ u32 core_cfg; /* 0x2C */
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_OFFSET 9
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+ #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_MASK 0x0003FC00
+ #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_OFFSET 10
+ #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_OFFSET 18
+ #define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+ #define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+ #define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+ #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1
+ #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2
+ #define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+ u32 e_lane_cfg1; /* 0x30 */
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 e_lane_cfg2; /* 0x34 */
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ #define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+ #define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+ #define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+ #define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_NCSI_OFFSET 12
+ #define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+ #define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+ /* Maximum advertised pcie link width */
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_BB_16_LANES 0x0
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4
+ /* ASPM L1 mode */
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK \
+ 0x06000000
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3
+ /* Set the PLDM sensor modes */
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ /* ROL enable */
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_ENABLED 0x1
+ u32 f_lane_cfg1; /* 0x38 */
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 f_lane_cfg2; /* 0x3C */
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ /* Control the period between two successive checks */
+ #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK \
+ 0x0000FF00
+ #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
+ /* Set shutdown temperature */
+ #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
+ /* Set max. count for over operational temperature */
+ #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24
+ u32 mps10_preemphasis; /* 0x40 */
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 mps10_driver_current; /* 0x44 */
+ #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 mps25_preemphasis; /* 0x48 */
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 mps25_driver_current; /* 0x4C */
+ #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 pci_id; /* 0x50 */
+ #define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+ /* Set caution temperature */
+ #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
+ /* Set external thermal sensor I2C address */
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \
+ 0xFF000000
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
+ u32 pci_subsys_id; /* 0x54 */
+ #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+ #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+ #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+ u32 bar; /* 0x58 */
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+ /* BB VF BAR2 size */
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+ /* BB BAR2 size (global) */
+ #define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+ #define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+ #define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+ #define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+ #define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+ #define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+ #define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+ #define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+ #define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+ #define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+ #define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+ #define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+ #define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+ #define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+ #define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+ #define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+ /* Set the duration, in secs, fan failure signal should be sampled */
+ #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12
+ /* This field defines the board total budget for bar2 when disabled
+ * the regular bar size is used.
+ */
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_OFFSET 16
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64K 0x1
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128K 0x2
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256K 0x3
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512K 0x4
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1M 0x5
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_2M 0x6
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_4M 0x7
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_8M 0x8
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_16M 0x9
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_32M 0xA
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64M 0xB
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128M 0xC
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256M 0xD
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512M 0xE
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1G 0xF
+ /* Enable/Disable Crash dump triggers */
+ #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_OFFSET 24
+ u32 mps10_txfir_main; /* 0x5C */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 mps10_txfir_post; /* 0x60 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 mps25_txfir_main; /* 0x64 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 mps25_txfir_post; /* 0x68 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 manufacture_ver; /* 0x6C */
+ #define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+ #define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+ #define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+ #define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+ #define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+ #define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+ #define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+ #define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+ #define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+ #define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ /* Select package id method */
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_MASK 0x40000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_OFFSET 30
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_NVRAM 0x0
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_IO_PINS 0x1
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_OFFSET 31
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_ENABLED 0x1
+ u32 manufacture_time; /* 0x70 */
+ #define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+ #define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+ #define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+ #define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+ #define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+ #define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ /* Max MSIX for Ethernet in default mode */
+ #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18
+ /* PF Mapping */
+ #define NVM_CFG1_GLOB_PF_MAPPING_MASK 0x0C000000
+ #define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
+ #define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
+ #define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
+ u32 led_global_settings; /* 0x74 */
+ #define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+ #define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+ #define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+ #define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ /* Max. continues operating temperature */
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_OFFSET 16
+ /* GPIO which triggers run-time port swap according to the map
+ * specified in option 205
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO31 0x20
+ u32 generic_cont1; /* 0x78 */
+ #define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+ #define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE0_SWAP_MASK 0x00000C00
+ #define NVM_CFG1_GLOB_LANE0_SWAP_OFFSET 10
+ #define NVM_CFG1_GLOB_LANE1_SWAP_MASK 0x00003000
+ #define NVM_CFG1_GLOB_LANE1_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_LANE2_SWAP_MASK 0x0000C000
+ #define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
+ #define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
+ #define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+ /* Enable option 195 - Overriding the PCIe Preset value */
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1
+ /* PCIe Preset value - applies only if option 194 is enabled */
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19
+ /* Port mapping to be used when the run-time GPIO for port-swap is
+ * defined and set.
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_MASK 0x01800000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_OFFSET 23
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_MASK 0x06000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_OFFSET 25
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_MASK 0x18000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_OFFSET 27
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_MASK 0x60000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_OFFSET 29
+ u32 mbi_version; /* 0x7C */
+ #define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+ #define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+ #define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ /* If set to other than NA, 0 - Normal operation, 1 - Thermal event
+ * occurred
+ */
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO31 0x20
+ u32 mbi_date; /* 0x80 */
+ u32 misc_sig; /* 0x84 */
+ /* Define the GPIO mapping to switch i2c mux */
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ /* Interrupt signal used for SMBus/I2C management interface
+ * 0 = Interrupt event occurred
+ * 1 = Normal
+ */
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO31 0x20
+ /* Set aLOM FAN on GPIO */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO31 0x20
+ u32 device_capabilities; /* 0x88 */
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+ u32 power_dissipated; /* 0x8C */
+ #define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0
+ #define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8
+ #define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16
+ #define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24
+ u32 power_consumed; /* 0x90 */
+ #define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0
+ #define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8
+ #define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16
+ #define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24
+ u32 efi_version; /* 0x94 */
+ u32 multi_network_modes_capability; /* 0x98 */
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X10G 0x1
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X25G 0x2
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X25G 0x4
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X25G 0x8
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X40G 0x10
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X40G 0x20
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \
+ 0x80
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X10G 0x100
+ /* @DPDK */
+ u32 reserved1[12]; /* 0x9C */
+ u32 oem1_number[8]; /* 0xCC */
+ u32 oem2_number[8]; /* 0xEC */
+ u32 mps25_active_txfir_pre; /* 0x10C */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_OFFSET 24
+ u32 mps25_active_txfir_main; /* 0x110 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_OFFSET 24
+ u32 mps25_active_txfir_post; /* 0x114 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_OFFSET 24
+ u32 features; /* 0x118 */
+ /* Set the Aux Fan on temperature */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_OFFSET 0
+ /* Set NC-SI package ID */
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_OFFSET 8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO31 0x20
+ /* PMBUS Clock GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO31 0x20
+ /* PMBUS Data GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO31 0x20
+ u32 tx_rx_eq_25g_hlpc; /* 0x11C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_OFFSET 24
+ u32 tx_rx_eq_25g_llpc; /* 0x120 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_OFFSET 24
+ u32 tx_rx_eq_25g_ac; /* 0x124 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_OFFSET 24
+ u32 tx_rx_eq_10g_pc; /* 0x128 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_OFFSET 24
+ u32 tx_rx_eq_10g_ac; /* 0x12C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_OFFSET 24
+ u32 tx_rx_eq_1g; /* 0x130 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_OFFSET 24
+ u32 tx_rx_eq_25g_bt; /* 0x134 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_OFFSET 24
+ u32 tx_rx_eq_10g_bt; /* 0x138 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_OFFSET 24
+ u32 generic_cont4; /* 0x13C */
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_OFFSET 0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
+ u32 reserved[58]; /* 0x140 */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[1]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 reserved__m_relocated_to_option_123; /* 0x0 */
+ u32 reserved__m_relocated_to_option_124; /* 0x4 */
+ u32 generic_cont0; /* 0x8 */
+ #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+ #define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+ #define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+ #define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+ #define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+ #define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+ #define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+ #define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+ #define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+ #define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+ #define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+ #define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+ #define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+ #define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+ #define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+ #define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+ #define NVM_CFG1_PORT_LED_MODE_BREAKOUT 0x10
+ #define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+ #define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+ #define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+ #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+ #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ /* GPIO for HW reset the PHY. In case it is the same for all ports,
+ * need to set same value for all ports
+ */
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20
+ u32 pcie_cfg; /* 0xC */
+ #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+ #define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+ u32 features; /* 0x10 */
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+ u32 speed_cap_mask; /* 0x14 */
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 link_settings; /* 0x18 */
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK \
+ 0x00004000
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED \
+ 0x0
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED \
+ 0x1
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+ #define NVM_CFG1_PORT_FEC_AN_MODE_MASK 0x00700000
+ #define NVM_CFG1_PORT_FEC_AN_MODE_OFFSET 20
+ #define NVM_CFG1_PORT_FEC_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE 0x2
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_AND_25G_FIRECODE 0x3
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
+ #define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
+ u32 phy_cfg; /* 0x1C */
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+ #define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+ #define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+ #define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+ #define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+ #define NVM_CFG1_PORT_AN_MODE_BB_CL37_BAM 0x4
+ #define NVM_CFG1_PORT_AN_MODE_BB_HPAM 0x5
+ #define NVM_CFG1_PORT_AN_MODE_BB_SGMII 0x6
+ u32 mgmt_traffic; /* 0x20 */
+ #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+ #define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+ u32 ext_phy; /* 0x24 */
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ /* EEE power saving mode */
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
+ u32 mba_cfg1; /* 0x28 */
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
+ #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+ #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+ #define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK \
+ 0x00E00000
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
+ u32 mba_cfg2; /* 0x2C */
+ #define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_RESERVED65_OFFSET 0
+ #define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
+ #define NVM_CFG1_PORT_RESERVED66_OFFSET 16
+ u32 vf_cfg; /* 0x30 */
+ #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+ #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+ #define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+ u32 led_port_settings; /* 0x3C */
+ #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+ #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+ #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+ #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40
+ u32 transceiver_00; /* 0x40 */
+ /* Define for mapping of transceiver signal module absent */
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+ u32 device_ids; /* 0x44 */
+ #define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF
+ #define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0
+ #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_OFFSET 8
+ #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_OFFSET 16
+ #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24
+ u32 board_cfg; /* 0x48 */
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+ #define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+ /* This field defines the GPIO mapped to tx_disable signal in SFP */
+ #define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8
+ #define NVM_CFG1_PORT_TX_DISABLE_NA 0x0
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20
+ u32 mnm_10g_cap; /* 0x4C */
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_10g_ctrl; /* 0x50 */
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_10g_misc; /* 0x54 */
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_25g_cap; /* 0x58 */
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_25g_ctrl; /* 0x5C */
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_25g_misc; /* 0x60 */
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_40g_cap; /* 0x64 */
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_40g_ctrl; /* 0x68 */
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_40g_misc; /* 0x6C */
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_50g_cap; /* 0x70 */
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_BB_100G \
+ 0x40
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_BB_100G \
+ 0x40
+ u32 mnm_50g_ctrl; /* 0x74 */
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_50g_misc; /* 0x78 */
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7
+ u32 mnm_100g_cap; /* 0x7C */
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_BB_100G 0x40
+ u32 mnm_100g_ctrl; /* 0x80 */
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_100g_misc; /* 0x84 */
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
+ u32 reserved[116]; /* 0x88 */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+ u32 rsrv1; /* 0x8 */
+ #define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+ u32 rsrv2; /* 0xC */
+ #define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+ u32 device_id; /* 0x10 */
+ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
+ u32 cmn_cfg; /* 0x14 */
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
+ #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+ #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+ #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+ #define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+ #define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+ #define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+ #define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+ #define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+ #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+ #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+ u32 pci_cfg; /* 0x18 */
+ #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+ #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+ /* AH VF BAR2 size */
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_MASK 0x00003F80
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_OFFSET 7
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4K 0x1
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8K 0x2
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16K 0x3
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32K 0x4
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64K 0x5
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_128K 0x6
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_256K 0x7
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_512K 0x8
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_1M 0x9
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_2M 0xA
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4M 0xB
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8M 0xC
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16M 0xD
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32M 0xE
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64M 0xF
+ #define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+ #define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+ #define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+ #define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+ #define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+ #define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+ #define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+ #define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+ #define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+ #define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+ #define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+ #define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+ #define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+ #define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+ #define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+ #define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+ #define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+ #define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+ #define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+ /* Hide function in npar mode */
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_MASK 0x04000000
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_OFFSET 26
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_ENABLED 0x1
+ /* AH BAR2 size (per function) */
+ #define NVM_CFG1_FUNC_BAR2_SIZE_MASK 0x78000000
+ #define NVM_CFG1_FUNC_BAR2_SIZE_OFFSET 27
+ #define NVM_CFG1_FUNC_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_BAR2_SIZE_1M 0x5
+ #define NVM_CFG1_FUNC_BAR2_SIZE_2M 0x6
+ #define NVM_CFG1_FUNC_BAR2_SIZE_4M 0x7
+ #define NVM_CFG1_FUNC_BAR2_SIZE_8M 0x8
+ #define NVM_CFG1_FUNC_BAR2_SIZE_16M 0x9
+ #define NVM_CFG1_FUNC_BAR2_SIZE_32M 0xA
+ #define NVM_CFG1_FUNC_BAR2_SIZE_64M 0xB
+ #define NVM_CFG1_FUNC_BAR2_SIZE_128M 0xC
+ #define NVM_CFG1_FUNC_BAR2_SIZE_256M 0xD
+ #define NVM_CFG1_FUNC_BAR2_SIZE_512M 0xE
+ #define NVM_CFG1_FUNC_BAR2_SIZE_1G 0xF
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+ u32 preboot_generic_cfg; /* 0x2C */
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_MASK 0x001E0000
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_OFFSET 17
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
+ u32 reserved[8]; /* 0x30 */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x228 */
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+ * nvm_cfg structs
+ ******************************************/
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#endif /* NVM_CFG_H */
diff --git a/src/seastar/dpdk/drivers/net/qede/base/reg_addr.h b/src/seastar/dpdk/drivers/net/qede/base/reg_addr.h
new file mode 100644
index 00000000..f9920f37
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/base/reg_addr.h
@@ -0,0 +1,1202 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xffUL << 24) /* @DPDK */
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PSWRQ2_REG_RBC_DONE \
+ 0x240000UL
+#define PSWRQ2_REG_CFG_DONE \
+ 0x240004UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
+#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL
+#define NWM_REG_MAC0 0x800400UL
+#define NWM_REG_MAC0_SIZE 256
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL
+#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL
+#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL
+#define XMAC_REG_MODE 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE 0x210040UL
+#define XMAC_REG_TX_CTRL_LO 0x210020UL
+#define XMAC_REG_CTRL 0x210000UL
+#define XMAC_REG_RX_CTRL 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
+#define MISC_REG_CLK_100G_MODE 0x008c10UL
+#define MISC_REG_OPTE_MODE 0x008c0cUL
+#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define PRS_REG_SEARCH_TAG1 0x1f0444UL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL
+#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL
+#define MISCS_REG_ECO_RESERVED 0x0097b4UL
+#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL
+#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL
+#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL
+#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL
+#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL
+#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL
+#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL
+#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL
+#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL
+#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL
+#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0
+#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL
+#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL
+#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL
+#define BRB_REG_SHARED_HR_AREA 0x340880UL
+#define BRB_REG_TC_GUARANTIED_0 0x340900UL
+#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL
+#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL
+#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL
+#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL
+#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL
+#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL
+#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL
+#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL
+#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL
+#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL
+#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL
+#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
+#define MCP_REG_CPU_STATE 0xe05004UL
+#define MCP_REG_CPU_MODE 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
+#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
+#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
+#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
+#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
+#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
+#define TM_REG_INT_STS_1 0x2c0190UL
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1 0x2c0194UL
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
+#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
+#define YSEM_REG_FAST_MEMORY 0x1540000UL
+#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL
+#define TSEM_REG_FAST_MEMORY 0x1740000UL
+#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL
+#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL
+#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL
+#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL
+#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL
+#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX 0x11814c4UL
+#define TCM_REG_SM_CON_CTX 0x11814ccUL
+#define TCM_REG_AGG_TASK_CTX 0x11814c8UL
+#define TCM_REG_SM_TASK_CTX 0x11814d0UL
+#define MSEM_REG_FAST_MEMORY 0x1840000UL
+#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL
+#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL
+#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL
+#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL
+#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL
+#define MCM_REG_CTX_RBC_ACCS 0x1201800UL
+#define MCM_REG_AGG_CON_CTX 0x1201804UL
+#define MCM_REG_SM_CON_CTX 0x120180cUL
+#define MCM_REG_AGG_TASK_CTX 0x1201808UL
+#define MCM_REG_SM_TASK_CTX 0x1201810UL
+#define USEM_REG_FAST_MEMORY 0x1940000UL
+#define USEM_REG_DBG_FRAME_MODE 0x1901408UL
+#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE 0x1901404UL
+#define USEM_REG_DBG_MODE1_CFG 0x1901420UL
+#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL
+#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL
+#define UCM_REG_CTX_RBC_ACCS 0x1281700UL
+#define UCM_REG_AGG_CON_CTX 0x1281704UL
+#define UCM_REG_SM_CON_CTX 0x128170cUL
+#define UCM_REG_AGG_TASK_CTX 0x1281708UL
+#define UCM_REG_SM_TASK_CTX 0x1281710UL
+#define XSEM_REG_FAST_MEMORY 0x1440000UL
+#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL
+#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL
+#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL
+#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL
+#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL
+#define XCM_REG_CTX_RBC_ACCS 0x1001800UL
+#define XCM_REG_AGG_CON_CTX 0x1001804UL
+#define XCM_REG_SM_CON_CTX 0x1001808UL
+#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL
+#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL
+#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL
+#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL
+#define YCM_REG_CTX_RBC_ACCS 0x1081800UL
+#define YCM_REG_AGG_CON_CTX 0x1081804UL
+#define YCM_REG_SM_CON_CTX 0x108180cUL
+#define YCM_REG_AGG_TASK_CTX 0x1081808UL
+#define YCM_REG_SM_TASK_CTX 0x1081810UL
+#define PSEM_REG_FAST_MEMORY 0x1640000UL
+#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL
+#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL
+#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL
+#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL
+#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL
+#define PCM_REG_CTX_RBC_ACCS 0x1101440UL
+#define PCM_REG_SM_CON_CTX 0x1101444UL
+#define GRC_REG_DBG_SELECT 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL
+#define GRC_REG_DBG_SHIFT 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL
+#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL
+#define CNIG_REG_DBG_SELECT_K2 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL
+#define NCSI_REG_DBG_SELECT 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL
+#define NCSI_REG_DBG_SHIFT 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL
+#define BMB_REG_DBG_SELECT 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL
+#define BMB_REG_DBG_SHIFT 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL
+#define PCIE_REG_DBG_SELECT 0x0547e8UL
+#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL
+#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL
+#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL
+#define PCIE_REG_DBG_SHIFT 0x0547f0UL
+#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL
+#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL
+#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL
+#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL
+#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL
+#define MCP2_REG_DBG_SELECT 0x052400UL
+#define MCP2_REG_DBG_SHIFT 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL
+#define PSWHST_REG_DBG_SELECT 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL
+#define PSWHST2_REG_DBG_SELECT 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL
+#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL
+#define PSWWR_REG_DBG_SELECT 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL
+#define PSWRQ2_REG_DBG_SELECT 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL
+#define PGLCS_REG_DBG_SELECT 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL
+#define PTU_REG_DBG_SELECT 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL
+#define PTU_REG_DBG_SHIFT 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME 0x560110UL
+#define DMAE_REG_DBG_SELECT 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL
+#define DMAE_REG_DBG_SHIFT 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL
+#define TCM_REG_DBG_SELECT 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL
+#define TCM_REG_DBG_SHIFT 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL
+#define MCM_REG_DBG_SELECT 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL
+#define MCM_REG_DBG_SHIFT 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL
+#define UCM_REG_DBG_SELECT 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL
+#define UCM_REG_DBG_SHIFT 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL
+#define XCM_REG_DBG_SELECT 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL
+#define XCM_REG_DBG_SHIFT 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL
+#define YCM_REG_DBG_SELECT 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL
+#define YCM_REG_DBG_SHIFT 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL
+#define PCM_REG_DBG_SELECT 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL
+#define PCM_REG_DBG_SHIFT 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL
+#define QM_REG_DBG_SELECT 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL
+#define QM_REG_DBG_SHIFT 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL
+#define TM_REG_DBG_SELECT 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL
+#define TM_REG_DBG_SHIFT 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL
+#define DORQ_REG_DBG_SELECT 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL
+#define BRB_REG_DBG_SELECT 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL
+#define BRB_REG_DBG_SHIFT 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL
+#define SRC_REG_DBG_SELECT 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL
+#define SRC_REG_DBG_SHIFT 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME 0x238710UL
+#define PRS_REG_DBG_SELECT 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL
+#define TSDM_REG_DBG_SELECT 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL
+#define USDM_REG_DBG_SELECT 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL
+#define XSDM_REG_DBG_SELECT 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL
+#define YSDM_REG_DBG_SELECT 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL
+#define PSDM_REG_DBG_SELECT 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL
+#define TSEM_REG_DBG_SELECT 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL
+#define TSEM_REG_DBG_SHIFT 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL
+#define MSEM_REG_DBG_SELECT 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL
+#define MSEM_REG_DBG_SHIFT 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL
+#define USEM_REG_DBG_SELECT 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL
+#define USEM_REG_DBG_SHIFT 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL
+#define XSEM_REG_DBG_SELECT 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL
+#define XSEM_REG_DBG_SHIFT 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL
+#define YSEM_REG_DBG_SELECT 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL
+#define YSEM_REG_DBG_SHIFT 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL
+#define PSEM_REG_DBG_SELECT 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL
+#define PSEM_REG_DBG_SHIFT 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL
+#define RSS_REG_DBG_SELECT 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL
+#define RSS_REG_DBG_SHIFT 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL
+#define TMLD_REG_DBG_SELECT 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL
+#define MULD_REG_DBG_SELECT 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL
+#define MULD_REG_DBG_SHIFT 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL
+#define YULD_REG_DBG_SELECT 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL
+#define YULD_REG_DBG_SHIFT 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL
+#define XYLD_REG_DBG_SELECT 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL
+#define PRM_REG_DBG_SELECT 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL
+#define PRM_REG_DBG_SHIFT 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL
+#define PBF_PB1_REG_DBG_SELECT 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL
+#define RPB_REG_DBG_SELECT 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL
+#define RPB_REG_DBG_SHIFT 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL
+#define BTB_REG_DBG_SELECT 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL
+#define PBF_REG_DBG_SELECT 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL
+#define PBF_REG_DBG_SHIFT 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL
+#define RDIF_REG_DBG_SELECT 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL
+#define RDIF_REG_DBG_SHIFT 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL
+#define TDIF_REG_DBG_SELECT 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL
+#define TDIF_REG_DBG_SHIFT 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL
+#define CDU_REG_DBG_SELECT 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL
+#define CDU_REG_DBG_SHIFT 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME 0x580714UL
+#define CCFC_REG_DBG_SELECT 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL
+#define TCFC_REG_DBG_SELECT 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL
+#define IGU_REG_DBG_SELECT 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL
+#define IGU_REG_DBG_SHIFT 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME 0x181588UL
+#define CAU_REG_DBG_SELECT 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL
+#define UMAC_REG_DBG_SELECT 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL
+#define UMAC_REG_DBG_SHIFT 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL
+#define NIG_REG_DBG_SELECT 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL
+#define NIG_REG_DBG_SHIFT 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME 0x502150UL
+#define WOL_REG_DBG_SELECT 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL
+#define WOL_REG_DBG_SHIFT 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME 0x600150UL
+#define BMBN_REG_DBG_SELECT 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL
+#define BMBN_REG_DBG_SHIFT 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL
+#define NWM_REG_DBG_SELECT 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL
+#define NWM_REG_DBG_SHIFT 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL
+#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL
+#define BRB_REG_BIG_RAM_DATA 0x341500UL
+#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL
+#define BMB_REG_BIG_RAM_DATA 0x540f00UL
+#define MISCS_REG_RESET_PL_UA 0x009050UL
+#define MISC_REG_RESET_PL_UA 0x008050UL
+#define MISC_REG_RESET_PL_HV 0x008060UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL
+#define SEM_FAST_REG_INT_RAM 0x020000UL
+#define DBG_REG_DBG_BLOCK_ON 0x010454UL
+#define DBG_REG_FRAMING_MODE 0x010058UL
+#define SEM_FAST_REG_DEBUG_MODE 0x000744UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_FILTER_CID 0x000754UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL
+#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL
+#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL
+#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL
+#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL
+#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL
+#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL
+#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL
+#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL
+#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL
+#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL
+#define DBG_REG_INTR_BUFFER 0x014000UL
+#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL
+#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL
+#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL
+#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL
+#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL
+#define SEM_FAST_REG_STALL_0 0x000488UL
+#define SEM_FAST_REG_STALLED 0x000494UL
+#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define RSS_REG_RSS_RAM_ADDR 0x238c30UL
+#define RSS_REG_RSS_RAM_DATA 0x238c20UL
+#define MISCS_REG_BLOCK_256B_EN 0x009074UL
+#define MCP_REG_CPU_REG_FILE 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE 32
+#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL
+#define DBG_REG_FULL_MODE 0x010060UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL
+#define DBG_REG_PCI_FUNC_NUM 0x010a98UL
+#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL
+#define DBG_REG_PCI_REQ_CREDIT 0x010440UL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL
+#define NIG_REG_DEBUG_PORT 0x5020d0UL
+#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL
+#define DBG_REG_ETHERNET_HDR_7 0x010b34UL
+#define DBG_REG_ETHERNET_HDR_6 0x010b30UL
+#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL
+#define DBG_REG_ETHERNET_HDR_4 0x010b28UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL
+#define DBG_REG_TIMESTAMP_TICK 0x010b50UL
+#define DBG_REG_FILTER_ID_NUM 0x0109d4UL
+#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL
+#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL
+#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL
+#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL
+#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL
+#define DBG_REG_STORM_ID_NUM 0x010b14UL
+#define DBG_REG_CALENDAR_SLOT0 0x010014UL
+#define DBG_REG_HW_ID_NUM 0x010b10UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TIMESTAMP 0x010b4cUL
+#define DBG_REG_CPU_TIMEOUT 0x010450UL
+#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL
+#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL
+#define GRC_REG_TRACE_FIFO 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL
+#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL
+#define TSEM_REG_VF_ERROR 0x1700408UL
+#define USEM_REG_VF_ERROR 0x1900408UL
+#define MSEM_REG_VF_ERROR 0x1800408UL
+#define XSEM_REG_VF_ERROR 0x1400408UL
+#define YSEM_REG_VF_ERROR 0x1500408UL
+#define PSEM_REG_VF_ERROR 0x1600408UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL
+#define IGU_REG_VF_CONFIGURATION 0x180804UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL
+#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL
+#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL
+#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL
+#define BRB_REG_PER_TC_COUNTERS 0x341a00UL
+
+/* added */
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL
+#define PCIE_REG_PRTY_MASK 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL
+#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL
+#define SEM_FAST_REG_INT_RAM_SIZE 20480
+#define MCP_REG_SCRATCH_SIZE 57344
+
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL
+
+/* 8.10.9.0 FW */
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
+#define PRS_REG_SEARCH_ROCE 0x1f040cUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define CCFC_REG_WEAK_ENABLE_VF 0x2e0704UL
+#define TCFC_REG_STRONG_ENABLE_VF 0x2d070cUL
+#define TCFC_REG_WEAK_ENABLE_VF 0x2d0704UL
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PGLUE_B_REG_MSDM_VF_SHIFT_B 0x2aa1c4UL
+#define PGLUE_B_REG_MSDM_OFFSET_MASK_B 0x2aa1c0UL
+#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST 0x1f0a0cUL
+#define PRS_REG_SEARCH_FCOE 0x1f0408UL
+#define PGLUE_B_REG_PGL_ADDR_E8_F0 0x2aaf98UL
+#define NIG_REG_DSCP_TO_TC_MAP_ENABLE 0x5088f8UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0 0x2aafa0UL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF 0x1f0430UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0 0x2aafa4UL
+#define IGU_REG_WRITE_DONE_PENDING 0x180900UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
+#define PRS_REG_MSG_INFO 0x1f0a1cUL
+#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
+
+/* 8.18.7.0 FW */
+#define BRB_REG_INT_MASK_10 0x3401b8UL
+
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
+
+#define CDU_REG_CCFC_CTX_VALID0 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 0x580408UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 0x009150UL
+#define CNIG_REG_NW_PORT_MODE_BB 0x218200UL
+#define CNIG_REG_PMEG_IF_CMD_BB 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB 0x218228UL
+#define NWM_REG_MAC0_K2_E5 0x800400UL
+#define CNIG_REG_NIG_PORT0_CONF_K2_E5 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE_K2_E5 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH_K2_E5 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH_K2_E5 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG_K2_E5 0x000008UL
+#define MISC_REG_XMAC_CORE_PORT_MODE_BB 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE_BB 0x008c04UL
+#define XMAC_REG_MODE_BB 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
+#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
+#define XMAC_REG_CTRL_BB 0x210000UL
+#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_RX_CTRL_BB 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5 0x2aafa4UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB 0x2aa410UL
+#define MISCS_REG_FUNCTION_HIDE_BB_K2 0x0096f0UL
+#define PCIE_REG_PRTY_MASK_K2_E5 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
+
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_eth_if.c b/src/seastar/dpdk/drivers/net/qede/qede_eth_if.c
new file mode 100644
index 00000000..a3c0b137
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_eth_if.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+
+static int
+qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
+{
+ int rc, i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ u8 tx_switching = 0;
+ struct ecore_sp_vport_start_params start = { 0 };
+
+ start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
+ ECORE_TPA_MODE_NONE;
+ start.remove_inner_vlan = p_params->remove_inner_vlan;
+ start.tx_switching = tx_switching;
+ start.only_untagged = false; /* untagged only */
+ start.drop_ttl0 = p_params->drop_ttl0;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.handle_ptp_pkts = p_params->handle_ptp_pkts;
+ start.vport_id = p_params->vport_id;
+ start.mtu = p_params->mtu;
+ /* @DPDK - Disable FW placement */
+ start.zero_placement_offset = 1;
+
+ rc = ecore_sp_vport_start(p_hwfn, &start);
+ if (rc) {
+ DP_ERR(edev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Started V-PORT %d with MTU %d\n",
+ p_params->vport_id, p_params->mtu);
+ }
+
+ ecore_reset_vport_stats(edev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid, vport_id);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
+{
+ struct ecore_sp_vport_update_params sp_params;
+ struct ecore_rss_params sp_rss_params;
+ int rc, i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_inner_vlan_removal_flg =
+ params->update_inner_vlan_removal_flg;
+ sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
+ sp_params.accept_any_vlan = params->accept_any_vlan;
+ sp_params.update_accept_any_vlan_flg =
+ params->update_accept_any_vlan_flg;
+ sp_params.mtu = params->mtu;
+ sp_params.sge_tpa_params = params->sge_tpa_params;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &sp_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "Failed to update VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+ return 0;
+}
+
+static int
+qed_start_rxq(struct ecore_dev *edev,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ uint16_t bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params)
+{
+ struct ecore_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_num % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ ret_params);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
+
+ return 0;
+}
+
+static int
+qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
+{
+ int rc, hwfn_index;
+ struct ecore_hwfn *p_hwfn;
+
+ hwfn_index = rss_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qed_start_txq(struct ecore_dev *edev,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params)
+{
+ struct ecore_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_num % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params, 0 /* tc */,
+ pbl_addr, pbl_size,
+ ret_params);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
+
+ return 0;
+}
+
+static int
+qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
+{
+ struct ecore_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qed_fp_cqe_completion(struct ecore_dev *edev,
+ uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
+{
+ return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
+ cqe);
+}
+
+static int qed_fastpath_stop(struct ecore_dev *edev)
+{
+ ecore_hw_stop_fastpath(edev);
+
+ return 0;
+}
+
+static void qed_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
+static void
+qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
+{
+ ecore_get_vport_stats(edev, stats);
+}
+
+int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+ INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+ INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
+ INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
+ INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
+ INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
+ INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
+ INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
+ INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
+ INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
+ INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
+ INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
+ INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_eth_if.h b/src/seastar/dpdk/drivers/net/qede/qede_eth_if.h
new file mode 100644
index 00000000..097e0257
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_eth_if.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_ETH_IF_H
+#define _QEDE_ETH_IF_H
+
+#include "qede_if.h"
+
+/*forward decl */
+struct eth_slow_path_rx_cqe;
+
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+#define QED_ETH_INTERFACE_VERSION 609
+
+#define QEDE_MAX_MCAST_FILTERS 64
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_type {
+ QED_FILTER_TYPE_UCAST,
+ QED_FILTER_TYPE_MCAST,
+ QED_FILTER_TYPE_RX_MODE,
+ QED_MAX_FILTER_TYPES,
+};
+
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ uint8_t num_queues;
+ uint8_t num_tc;
+
+ struct ether_addr port_mac;
+ uint16_t num_vlan_filters;
+ uint32_t num_mac_filters;
+
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
+};
+
+struct qed_update_vport_params {
+ uint8_t vport_id;
+ uint8_t update_vport_active_flg;
+ uint8_t vport_active_flg;
+ uint8_t update_inner_vlan_removal_flg;
+ uint8_t inner_vlan_removal_flg;
+ uint8_t update_tx_switching_flg;
+ uint8_t tx_switching_flg;
+ uint8_t update_accept_any_vlan_flg;
+ uint8_t accept_any_vlan;
+ uint8_t update_rss_flg;
+ uint16_t mtu;
+ struct ecore_sge_tpa_params *sge_tpa_params;
+};
+
+struct qed_start_vport_params {
+ bool remove_inner_vlan;
+ bool handle_ptp_pkts;
+ bool enable_lro;
+ bool drop_ttl0;
+ uint8_t vport_id;
+ uint16_t mtu;
+ bool clear_stats;
+};
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
+
+ int (*vport_start)(struct ecore_dev *edev,
+ struct qed_start_vport_params *params);
+
+ int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
+
+ int (*vport_update)(struct ecore_dev *edev,
+ struct qed_update_vport_params *params);
+
+ int (*q_rx_start)(struct ecore_dev *cdev,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ uint16_t bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params);
+
+ int (*q_rx_stop)(struct ecore_dev *edev,
+ uint8_t rss_id, void *handle);
+
+ int (*q_tx_start)(struct ecore_dev *edev,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params);
+
+ int (*q_tx_stop)(struct ecore_dev *edev,
+ uint8_t rss_id, void *handle);
+
+ int (*eth_cqe_completion)(struct ecore_dev *edev,
+ uint8_t rss_id,
+ struct eth_slow_path_rx_cqe *cqe);
+
+ int (*fastpath_stop)(struct ecore_dev *edev);
+
+ void (*fastpath_start)(struct ecore_dev *edev);
+
+ void (*get_vport_stats)(struct ecore_dev *edev,
+ struct ecore_eth_stats *stats);
+};
+
+/* externs */
+
+extern const struct qed_common_ops qed_common_ops_pass;
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+
+int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type);
+
+#endif /* _QEDE_ETH_IF_H */
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_ethdev.c b/src/seastar/dpdk/drivers/net/qede/qede_ethdev.c
new file mode 100644
index 00000000..7501eb20
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_ethdev.c
@@ -0,0 +1,2470 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+#include <rte_alarm.h>
+#include <rte_version.h>
+
+/* Globals */
+static const struct qed_eth_ops *qed_ops;
+static int64_t timer_period = 1;
+
+/* VXLAN tunnel classification mapping */
+const struct _qede_vxlan_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
+struct rte_qede_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
+ {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+ {"rx_multicast_bytes",
+ offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+ {"rx_broadcast_bytes",
+ offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
+ {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+ {"rx_multicast_packets",
+ offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+ {"rx_broadcast_packets",
+ offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+
+ {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+ {"tx_multicast_bytes",
+ offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+ {"tx_broadcast_bytes",
+ offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
+ {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+ {"tx_multicast_packets",
+ offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+ {"tx_broadcast_packets",
+ offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+
+ {"rx_64_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+ {"rx_65_to_127_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+ {"rx_128_to_255_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+ {"rx_256_to_511_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+ {"rx_512_to_1023_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+ {"rx_1024_to_1518_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
+ {"rx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
+ {"rx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
+ {"rx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
+ {"rx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
+ {"rx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats,
+ rx_9217_to_16383_byte_packets)},
+ {"tx_64_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+ {"tx_65_to_127_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+ {"tx_128_to_255_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+ {"tx_256_to_511_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+ {"tx_512_to_1023_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+ {"tx_1024_to_1518_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
+ {"trx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
+ {"tx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
+ {"tx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
+ {"tx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats,
+ tx_9217_to_16383_byte_packets)},
+
+ {"rx_mac_crtl_frames",
+ offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+ {"tx_mac_control_frames",
+ offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
+ {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
+ {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+ {"rx_priority_flow_control_frames",
+ offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+ {"tx_priority_flow_control_frames",
+ offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+
+ {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
+ {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+ {"rx_carrier_errors",
+ offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+ {"rx_oversize_packet_errors",
+ offsetof(struct ecore_eth_stats, rx_oversize_packets)},
+ {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+ {"rx_undersize_packet_errors",
+ offsetof(struct ecore_eth_stats, rx_undersize_packets)},
+ {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+ {"rx_host_buffer_not_available",
+ offsetof(struct ecore_eth_stats, no_buff_discards)},
+ /* Number of packets discarded because they are bigger than MTU */
+ {"rx_packet_too_big_discards",
+ offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+ {"rx_ttl_zero_discards",
+ offsetof(struct ecore_eth_stats, ttl0_discard)},
+ {"rx_multi_function_tag_filter_discards",
+ offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+ {"rx_mac_filter_discards",
+ offsetof(struct ecore_eth_stats, mac_filter_discards)},
+ {"rx_hw_buffer_truncates",
+ offsetof(struct ecore_eth_stats, brb_truncates)},
+ {"rx_hw_buffer_discards",
+ offsetof(struct ecore_eth_stats, brb_discards)},
+ {"tx_lpi_entry_count",
+ offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
+ {"tx_total_collisions",
+ offsetof(struct ecore_eth_stats, tx_total_collisions)},
+ {"tx_error_drop_packets",
+ offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+
+ {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+ {"rx_mac_unicast_packets",
+ offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+ {"rx_mac_multicast_packets",
+ offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+ {"rx_mac_broadcast_packets",
+ offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+ {"rx_mac_frames_ok",
+ offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
+ {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+ {"tx_mac_unicast_packets",
+ offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+ {"tx_mac_multicast_packets",
+ offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+ {"tx_mac_broadcast_packets",
+ offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+
+ {"lro_coalesced_packets",
+ offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+ {"lro_coalesced_events",
+ offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+ {"lro_aborts_num",
+ offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+ {"lro_not_coalesced_packets",
+ offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+ {"lro_coalesced_bytes",
+ offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
+ {"rx_q_segments",
+ offsetof(struct qede_rx_queue, rx_segs)},
+ {"rx_q_hw_errors",
+ offsetof(struct qede_rx_queue, rx_hw_errors)},
+ {"rx_q_allocation_errors",
+ offsetof(struct qede_rx_queue, rx_alloc_errors)}
+};
+
+static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
+{
+ ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+}
+
+static void
+qede_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+ if (rte_intr_enable(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+}
+
+static void
+qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
+{
+ rte_memcpy(&qdev->dev_info, info, sizeof(*info));
+ qdev->num_tc = qdev->dev_info.num_tc;
+ qdev->ops = qed_ops;
+}
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+static void qede_print_adapter_info(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_dev_info *info = &qdev->dev_info.common;
+ static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
+
+ DP_INFO(edev, "*********************************\n");
+ DP_INFO(edev, " DPDK version:%s\n", rte_version());
+ DP_INFO(edev, " Chip details : %s%d\n",
+ ECORE_IS_BB(edev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(edev) ? 0 : 1);
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+ info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
+ snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
+ ver_str, QEDE_PMD_VERSION);
+ DP_INFO(edev, " Driver version : %s\n", drv_ver);
+ DP_INFO(edev, " Firmware version : %s\n", ver_str);
+
+ snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
+ "%d.%d.%d.%d",
+ (info->mfw_rev >> 24) & 0xff,
+ (info->mfw_rev >> 16) & 0xff,
+ (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
+ DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
+ DP_INFO(edev, " Firmware file : %s\n", fw_file);
+ DP_INFO(edev, "*********************************\n");
+}
+#endif
+
+static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+ memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+ ucast->is_rx_filter = true;
+ ucast->is_tx_filter = true;
+ /* ucast->assert_on_error = true; - For debug */
+}
+
+static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
+ uint8_t clss, bool mode, bool mask)
+{
+ memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
+ p_tunn->vxlan.b_update_mode = mode;
+ p_tunn->vxlan.b_mode_enabled = mask;
+ p_tunn->b_update_rx_cls = true;
+ p_tunn->b_update_tx_cls = true;
+ p_tunn->vxlan.tun_cls = clss;
+}
+
+static int
+qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_ucast_entry *tmp = NULL;
+ struct qede_ucast_entry *u;
+ struct ether_addr *mac_addr;
+
+ mac_addr = (struct ether_addr *)ucast->mac;
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+ if ((memcmp(mac_addr, &tmp->mac,
+ ETHER_ADDR_LEN) == 0) &&
+ ucast->vlan == tmp->vlan) {
+ DP_ERR(edev, "Unicast MAC is already added"
+ " with vlan = %u, vni = %u\n",
+ ucast->vlan, ucast->vni);
+ return -EEXIST;
+ }
+ }
+ u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!u) {
+ DP_ERR(edev, "Did not allocate memory for ucast\n");
+ return -ENOMEM;
+ }
+ ether_addr_copy(mac_addr, &u->mac);
+ u->vlan = ucast->vlan;
+ u->vni = ucast->vni;
+ SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
+ qdev->num_uc_addr++;
+ } else {
+ SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+ if ((memcmp(mac_addr, &tmp->mac,
+ ETHER_ADDR_LEN) == 0) &&
+ ucast->vlan == tmp->vlan &&
+ ucast->vni == tmp->vni)
+ break;
+ }
+ if (tmp == NULL) {
+ DP_INFO(edev, "Unicast MAC is not found\n");
+ return -EINVAL;
+ }
+ SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
+ qdev->num_uc_addr--;
+ }
+
+ return 0;
+}
+
+static int
+qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ether_addr *mac_addr;
+ struct qede_mcast_entry *tmp = NULL;
+ struct qede_mcast_entry *m;
+
+ mac_addr = (struct ether_addr *)mcast->mac;
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
+ DP_ERR(edev,
+ "Multicast MAC is already added\n");
+ return -EEXIST;
+ }
+ }
+ m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!m) {
+ DP_ERR(edev,
+ "Did not allocate memory for mcast\n");
+ return -ENOMEM;
+ }
+ ether_addr_copy(mac_addr, &m->mac);
+ SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
+ qdev->num_mc_addr++;
+ } else {
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
+ break;
+ }
+ if (tmp == NULL) {
+ DP_INFO(edev, "Multicast mac is not found\n");
+ return -EINVAL;
+ }
+ SLIST_REMOVE(&qdev->mc_list_head, tmp,
+ qede_mcast_entry, list);
+ qdev->num_mc_addr--;
+ }
+
+ return 0;
+}
+
+static enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc;
+ struct ecore_filter_mcast mcast;
+ struct qede_mcast_entry *tmp;
+ uint16_t j = 0;
+
+ /* Multicast */
+ if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
+ if (add) {
+ if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
+ DP_ERR(edev,
+ "Mcast filter table limit exceeded, "
+ "Please enable mcast promisc mode\n");
+ return -ECORE_INVAL;
+ }
+ }
+ rc = qede_mcast_filter(eth_dev, ucast, add);
+ if (rc == 0) {
+ DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = qdev->num_mc_addr;
+ mcast.opcode = ECORE_FILTER_ADD;
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ ether_addr_copy(&tmp->mac,
+ (struct ether_addr *)&mcast.mac[j]);
+ j++;
+ }
+ rc = ecore_filter_mcast_cmd(edev, &mcast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to add multicast filter"
+ " rc = %d, op = %d\n", rc, add);
+ }
+ } else { /* Unicast */
+ if (add) {
+ if (qdev->num_uc_addr >=
+ qdev->dev_info.num_mac_filters) {
+ DP_ERR(edev,
+ "Ucast filter table limit exceeded,"
+ " Please enable promisc mode\n");
+ return -ECORE_INVAL;
+ }
+ }
+ rc = qede_ucast_filter(eth_dev, ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+ rc, add);
+ }
+ }
+
+ return rc;
+}
+
+static int
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
+{
+ struct ecore_filter_ucast ucast;
+ int re;
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.type = ECORE_FILTER_MAC;
+ ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+ re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
+ return re;
+}
+
+static void
+qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_filter_ucast ucast;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (index >= qdev->dev_info.num_mac_filters) {
+ DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
+ index, qdev->dev_info.num_mac_filters);
+ return;
+ }
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_MAC;
+
+ /* Use the index maintained by rte */
+ ether_addr_copy(&eth_dev->data->mac_addrs[index],
+ (struct ether_addr *)&ucast.mac);
+
+ ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+}
+
+static void
+qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
+ mac_addr->addr_bytes)) {
+ DP_ERR(edev, "Setting MAC address is not allowed\n");
+ ether_addr_copy(&qdev->primary_mac,
+ &eth_dev->data->mac_addrs[0]);
+ return;
+ }
+
+ qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
+}
+
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_update_vport_params params = {
+ .vport_id = 0,
+ .accept_any_vlan = action,
+ .update_accept_any_vlan_flg = 1,
+ };
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (qdev->accept_any_vlan == action)
+ return;
+
+ rc = qdev->ops->vport_update(edev, &params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ qdev->accept_any_vlan = action;
+ }
+}
+
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_inner_vlan_removal_flg = 1;
+ vport_update_params.inner_vlan_removal_flg = set_stripping;
+ rc = qdev->ops->vport_update(edev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+ qdev->vlan_strip_flg = set_stripping;
+
+ return 0;
+}
+
+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_dev_eth_info *dev_info = &qdev->dev_info;
+ struct qede_vlan_entry *tmp = NULL;
+ struct qede_vlan_entry *vlan;
+ struct ecore_filter_ucast ucast;
+ int rc;
+
+ if (on) {
+ if (qdev->configured_vlans == dev_info->num_vlan_filters) {
+ DP_ERR(edev, "Reached max VLAN filter limit"
+ " enabling accept_any_vlan\n");
+ qede_config_accept_any_vlan(qdev, true);
+ return 0;
+ }
+
+ SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
+ if (tmp->vid == vlan_id) {
+ DP_ERR(edev, "VLAN %u already configured\n",
+ vlan_id);
+ return -EEXIST;
+ }
+ }
+
+ vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!vlan) {
+ DP_ERR(edev, "Did not allocate memory for VLAN\n");
+ return -ENOMEM;
+ }
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = vlan_id;
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+ NULL);
+ if (rc != 0) {
+ DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
+ rc);
+ rte_free(vlan);
+ } else {
+ vlan->vid = vlan_id;
+ SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
+ qdev->configured_vlans++;
+ DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
+ vlan_id, qdev->configured_vlans);
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
+ if (tmp->vid == vlan_id)
+ break;
+ }
+
+ if (!tmp) {
+ if (qdev->configured_vlans == 0) {
+ DP_INFO(edev,
+ "No VLAN filters configured yet\n");
+ return 0;
+ }
+
+ DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
+ return -EINVAL;
+ }
+
+ SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
+
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = vlan_id;
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+ NULL);
+ if (rc != 0) {
+ DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
+ vlan_id, rc);
+ } else {
+ qdev->configured_vlans--;
+ DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
+ vlan_id, qdev->configured_vlans);
+ }
+ }
+
+ return rc;
+}
+
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->hw_vlan_strip)
+ (void)qede_vlan_stripping(eth_dev, 1);
+ else
+ (void)qede_vlan_stripping(eth_dev, 0);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN filtering kicks in when a VLAN is added */
+ if (rxmode->hw_vlan_filter) {
+ qede_vlan_filter_set(eth_dev, 0, 1);
+ } else {
+ if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
+ DP_ERR(edev,
+ " Please remove existing VLAN filters"
+ " before disabling VLAN filtering\n");
+ /* Signal app that VLAN filtering is still
+ * enabled
+ */
+ rxmode->hw_vlan_filter = true;
+ } else {
+ qede_vlan_filter_set(eth_dev, 0, 0);
+ }
+ }
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
+ " and classification is based on outer tag only\n");
+
+ DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
+ mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+}
+
+static int qede_init_vport(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_start_vport_params start = {0};
+ int rc;
+
+ start.remove_inner_vlan = 1;
+ start.enable_lro = qdev->enable_lro;
+ start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
+ start.vport_id = 0;
+ start.drop_ttl0 = false;
+ start.clear_stats = 1;
+ start.handle_ptp_pkts = 0;
+
+ rc = qdev->ops->vport_start(edev, &start);
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_INFO(edev,
+ "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
+ start.vport_id, ETHER_MTU);
+
+ return 0;
+}
+
+static void qede_prandom_bytes(uint32_t *buff)
+{
+ uint8_t i;
+
+ srand((unsigned int)time(NULL));
+ for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+ buff[i] = rand();
+}
+
+int qede_config_rss(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+ uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[2];
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, id, pos, q;
+
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (!rss_conf.rss_key) {
+ DP_INFO(edev, "Applying driver default key\n");
+ rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ qede_prandom_bytes(&def_rss_key[0]);
+ rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
+ }
+
+ /* Configure RSS hash */
+ if (qede_rss_hash_update(eth_dev, &rss_conf))
+ return -EINVAL;
+
+ /* Configure default RETA */
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ id = i / RTE_RETA_GROUP_SIZE;
+ pos = i % RTE_RETA_GROUP_SIZE;
+ q = i % QEDE_RSS_COUNT(qdev);
+ reta_conf[id].reta[pos] = q;
+ }
+ if (qede_rss_reta_update(eth_dev, &reta_conf[0],
+ ECORE_RSS_IND_TABLE_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Check requirements for 100G mode */
+ if (edev->num_hwfns > 1) {
+ if (eth_dev->data->nb_rx_queues < 2 ||
+ eth_dev->data->nb_tx_queues < 2) {
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
+ return -EINVAL;
+ }
+
+ if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ DP_ERR(edev,
+ "100G mode needs even no. of RX/TX queues\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Sanity checks and throw warnings */
+ if (rxmode->enable_scatter == 1)
+ eth_dev->data->scattered_rx = 1;
+
+ if (!rxmode->hw_strip_crc)
+ DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
+
+ if (!rxmode->hw_ip_checksum)
+ DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
+ "in hw\n");
+
+ if (rxmode->enable_lro) {
+ qdev->enable_lro = true;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+
+ /* Check for the port restart case */
+ if (qdev->state != QEDE_DEV_INIT) {
+ rc = qdev->ops->vport_stop(edev, 0);
+ if (rc != 0)
+ return rc;
+ qede_dealloc_fp_resc(eth_dev);
+ }
+
+ qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
+ qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
+ qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
+
+ /* Fastpath status block should be initialized before sending
+ * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
+ */
+ rc = qede_alloc_fp_resc(qdev);
+ if (rc != 0)
+ return rc;
+
+ /* Issue VPORT-START with default config values to allow
+ * other port configurations early on.
+ */
+ rc = qede_init_vport(qdev);
+ if (rc != 0)
+ return rc;
+
+ if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
+ rxmode->mq_mode == ETH_MQ_RX_NONE)) {
+ DP_ERR(edev, "Unsupported RSS mode\n");
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
+
+ /* Flow director mode check */
+ rc = qede_check_fdir_support(eth_dev);
+ if (rc) {
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
+ SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+
+ SLIST_INIT(&qdev->vlan_list_head);
+
+ /* Enable VLAN offloads by default */
+ qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
+
+ qdev->state = QEDE_DEV_CONFIG;
+
+ DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
+ (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
+ qdev->num_tc);
+
+ return 0;
+}
+
+/* Info about HW descriptor ring limitations */
+static const struct rte_eth_desc_lim qede_rx_desc_lim = {
+ .nb_max = NUM_RX_BDS_MAX,
+ .nb_min = 128,
+ .nb_align = 128 /* lowest common multiple */
+};
+
+static const struct rte_eth_desc_lim qede_tx_desc_lim = {
+ .nb_max = NUM_TX_BDS_MAX,
+ .nb_min = 256,
+ .nb_align = 256,
+ .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+ .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
+};
+
+static void
+qede_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output link;
+ uint32_t speed_cap = 0;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
+ dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
+ dev_info->rx_desc_lim = qede_rx_desc_lim;
+ dev_info->tx_desc_lim = qede_tx_desc_lim;
+
+ if (IS_PF(edev))
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
+ else
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ dev_info->max_tx_queues = dev_info->max_rx_queues;
+
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
+ dev_info->max_vfs = 0;
+ dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
+ dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .txq_flags = QEDE_TXQ_FLAGS,
+ };
+
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO);
+
+ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
+
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ speed_cap |= ETH_LINK_SPEED_1G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ speed_cap |= ETH_LINK_SPEED_10G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ speed_cap |= ETH_LINK_SPEED_25G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ speed_cap |= ETH_LINK_SPEED_40G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ speed_cap |= ETH_LINK_SPEED_50G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ speed_cap |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = speed_cap;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t link_duplex;
+ struct qed_link_output link;
+ struct rte_eth_link *curr = &eth_dev->data->dev_link;
+
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+
+ /* Link Speed */
+ curr->link_speed = link.speed;
+
+ /* Link Mode */
+ switch (link.duplex) {
+ case QEDE_DUPLEX_HALF:
+ link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case QEDE_DUPLEX_FULL:
+ link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case QEDE_DUPLEX_UNKNOWN:
+ default:
+ link_duplex = -1;
+ }
+ curr->link_duplex = link_duplex;
+
+ /* Link Status */
+ curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ /* AN */
+ curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+ ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+
+ DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
+ curr->link_speed, curr->link_duplex,
+ curr->link_autoneg, curr->link_status);
+
+ /* return 0 means link status changed, -1 means not changed */
+ return ((curr->link_status == link.link_up) ? -1 : 0);
+}
+
+static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
+ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+ if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+ type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+ qed_configure_filter_rx_mode(eth_dev, type);
+}
+
+static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
+ if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+ else
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static void qede_poll_sp_sb_cb(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+ qede_interrupt_action(&edev->hwfns[1]);
+
+ rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ if (rc != 0) {
+ DP_ERR(edev, "Unable to start periodic"
+ " timer rc %d\n", rc);
+ assert(false && "Unable to start periodic timer");
+ }
+}
+
+static void qede_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ qede_fdir_dealloc_resc(eth_dev);
+
+ /* dev_stop() shall cleanup fp resources in hw but without releasing
+ * dma memories and sw structures so that dev_start() can be called
+ * by the app without reconfiguration. However, in dev_close() we
+ * can release all the resources and device can be brought up newly
+ */
+ if (qdev->state != QEDE_DEV_STOP)
+ qede_dev_stop(eth_dev);
+ else
+ DP_INFO(edev, "Device is already stopped\n");
+
+ rc = qdev->ops->vport_stop(edev, 0);
+ if (rc != 0)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ qede_dealloc_fp_resc(eth_dev);
+
+ qdev->ops->common->slowpath_stop(edev);
+
+ qdev->ops->common->remove(edev);
+
+ rte_intr_disable(&pci_dev->intr_handle);
+
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler, (void *)eth_dev);
+
+ if (edev->num_hwfns > 1)
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+
+ qdev->state = QEDE_DEV_INIT; /* Go back to init state */
+}
+
+static void
+qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_eth_stats stats;
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ qdev->ops->get_vport_stats(edev, &stats);
+
+ /* RX Stats */
+ eth_stats->ipackets = stats.rx_ucast_pkts +
+ stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+
+ eth_stats->ibytes = stats.rx_ucast_bytes +
+ stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+
+ eth_stats->ierrors = stats.rx_crc_errors +
+ stats.rx_align_errors +
+ stats.rx_carrier_errors +
+ stats.rx_oversize_packets +
+ stats.rx_jabbers + stats.rx_undersize_packets;
+
+ eth_stats->rx_nombuf = stats.no_buff_discards;
+
+ eth_stats->imissed = stats.mftag_filter_discards +
+ stats.mac_filter_discards +
+ stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+
+ /* TX stats */
+ eth_stats->opackets = stats.tx_ucast_pkts +
+ stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+
+ eth_stats->obytes = stats.tx_ucast_bytes +
+ stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+
+ eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+ /* Queue stats */
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+ "Not all the queue stats will be displayed. Set"
+ " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+ " appropriately and retry.\n");
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
+ }
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ txq = qdev->fp_array[(qid)].txqs[0];
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
+ }
+ if (j == txq_stat_cntrs)
+ break;
+ }
+}
+
+static unsigned
+qede_get_xstats_count(struct qede_dev *qdev) {
+ return RTE_DIM(qede_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+}
+
+static int
+qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ const unsigned int stat_cnt = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
+
+ if (xstats_names != NULL) {
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_xstats_strings[i].name);
+ stat_idx++;
+ }
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%.4s%d%s",
+ qede_rxq_xstats_strings[i].name, qid,
+ qede_rxq_xstats_strings[i].name + 4);
+ stat_idx++;
+ }
+ }
+ }
+
+ return stat_cnt;
+}
+
+static int
+qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_eth_stats stats;
+ const unsigned int num = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
+
+ if (n < num)
+ return num;
+
+ qdev->ops->get_vport_stats(edev, &stats);
+
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
+ qede_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ qede_rxq_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+ }
+
+ return stat_idx;
+}
+
+static void
+qede_reset_xstats(struct rte_eth_dev *dev)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ ecore_reset_vport_stats(edev);
+}
+
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "setting link state %d\n", link_up);
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = link_up;
+ rc = qdev->ops->common->set_link(edev, &link_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(edev, "Unable to set link state %d\n", link_up);
+
+ return rc;
+}
+
+static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_set_link_state(eth_dev, true);
+}
+
+static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_set_link_state(eth_dev, false);
+}
+
+static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ ecore_reset_vport_stats(edev);
+}
+
+static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ enum qed_filter_rx_mode_type type =
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+ qed_configure_filter_rx_mode(eth_dev, type);
+}
+
+static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_PROMISC);
+ else
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+
+ memset(&current_link, 0, sizeof(current_link));
+ qdev->ops->common->get_link(edev, &current_link);
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+ if (fc_conf->autoneg) {
+ if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
+ DP_ERR(edev, "Autoneg not supported\n");
+ return -EINVAL;
+ }
+ params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ }
+
+ /* Pause is assumed to be supported (SUPPORTED_Pause) */
+ if (fc_conf->mode == RTE_FC_FULL)
+ params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
+ QED_LINK_PAUSE_RX_ENABLE);
+ if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+ if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+ params.link_up = true;
+ (void)qdev->ops->common->set_link(edev, &params);
+
+ return 0;
+}
+
+static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ qdev->ops->common->get_link(edev, &current_link);
+
+ if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ fc_conf->autoneg = true;
+
+ if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
+ QED_LINK_PAUSE_TX_ENABLE))
+ fc_conf->mode = RTE_FC_FULL;
+ else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static const uint32_t *
+qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+{
+ *rss_caps = 0;
+ *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
+}
+
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params rss_params;
+ struct ecore_hwfn *p_hwfn;
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint64_t hf = rss_conf->rss_hf;
+ uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
+ uint8_t i;
+ int rc;
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ memset(&rss_params, 0, sizeof(rss_params));
+
+ DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
+ (unsigned long)hf, len, key);
+
+ if (hf != 0) {
+ /* Enabling RSS */
+ DP_INFO(edev, "Enabling rss\n");
+
+ /* RSS caps */
+ qede_init_rss_caps(&rss_params.rss_caps, hf);
+ rss_params.update_rss_capabilities = 1;
+
+ /* RSS hash key */
+ if (key) {
+ if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
+ DP_ERR(edev, "RSS key length exceeds limit\n");
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Applying user supplied hash key\n");
+ rss_params.update_rss_key = 1;
+ memcpy(&rss_params.rss_key, key, len);
+ }
+ rss_params.rss_enable = 1;
+ }
+
+ rss_params.update_rss_config = 1;
+ /* tbl_size has to be set with capabilities */
+ rss_params.rss_table_size_log = 7;
+ vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = qdev->rss_ind_table[i];
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
+ vport_update_params.rss_params = &rss_params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ return rc;
+ }
+ }
+ qdev->rss_enable = rss_params.rss_enable;
+
+ /* Update local structure for hash query */
+ qdev->rss_conf.rss_hf = hf;
+ qdev->rss_conf.rss_key_len = len;
+ if (qdev->rss_enable) {
+ if (qdev->rss_conf.rss_key == NULL) {
+ qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
+ if (qdev->rss_conf.rss_key == NULL) {
+ DP_ERR(edev, "No memory to store RSS key\n");
+ return -ENOMEM;
+ }
+ }
+ if (key && len) {
+ DP_INFO(edev, "Storing RSS key\n");
+ memcpy(qdev->rss_conf.rss_key, key, len);
+ }
+ } else if (!qdev->rss_enable && len == 0) {
+ if (qdev->rss_conf.rss_key) {
+ free(qdev->rss_conf.rss_key);
+ qdev->rss_conf.rss_key = NULL;
+ DP_INFO(edev, "Free RSS key\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+
+ rss_conf->rss_hf = qdev->rss_conf.rss_hf;
+ rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
+
+ if (rss_conf->rss_key && qdev->rss_conf.rss_key)
+ memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
+ rss_conf->rss_key_len);
+ return 0;
+}
+
+static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
+ struct ecore_rss_params *rss)
+{
+ int i, fn;
+ bool rss_mode = 1; /* enable */
+ struct ecore_queue_cid *cid;
+ struct ecore_rss_params *t_rss;
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+
+ /* CMT should be round-robin */
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ cid = rss->rss_ind_table[i];
+
+ if (cid->p_owner == ECORE_LEADING_HWFN(edev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
+ }
+
+ t_rss = &rss[1];
+ t_rss->update_rss_ind_table = 1;
+ t_rss->rss_table_size_log = 7;
+ t_rss->update_rss_config = 1;
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(edev, fn) {
+ for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
+ i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+
+ if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
+ DP_INFO(edev,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ rss_mode = 0;
+ goto out;
+ }
+ }
+
+out:
+ t_rss->rss_enable = rss_mode;
+
+ return rss_mode;
+}
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params *params;
+ struct ecore_hwfn *p_hwfn;
+ uint16_t i, idx, shift;
+ uint8_t entry;
+ int rc = 0;
+
+ if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ DP_ERR(edev, "reta_size %d is not supported by hardware\n",
+ reta_size);
+ return -EINVAL;
+ }
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
+ RTE_CACHE_LINE_SIZE);
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ entry = reta_conf[idx].reta[shift];
+ /* Pass rxq handles to ecore */
+ params->rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
+ }
+ }
+
+ params->update_rss_ind_table = 1;
+ params->rss_table_size_log = 7;
+ params->update_rss_config = 1;
+
+ /* Fix up RETA for CMT mode device */
+ if (edev->num_hwfns > 1)
+ qdev->rss_enable = qede_update_rss_parm_cmt(edev,
+ params);
+ vport_update_params.vport_id = 0;
+ /* Use the current value of rss_enable */
+ params->rss_enable = qdev->rss_enable;
+ vport_update_params.rss_params = params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ goto out;
+ }
+ }
+
+out:
+ rte_free(params);
+ return rc;
+}
+
+static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t i, idx, shift;
+ uint8_t entry;
+
+ if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ DP_ERR(edev, "reta_size %d is not supported\n",
+ reta_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ entry = qdev->rss_ind_table[i];
+ reta_conf[idx].reta[shift] = entry;
+ }
+ }
+
+ return 0;
+}
+
+static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_dev_info dev_info = {0};
+ struct qede_fastpath *fp;
+ uint32_t frame_size;
+ uint16_t rx_buf_size;
+ uint16_t bufsz;
+ int i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+ qede_dev_info_get(dev, &dev_info);
+ frame_size = mtu + QEDE_ETH_OVERHEAD;
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ DP_ERR(edev, "MTU %u out of range\n", mtu);
+ return -EINVAL;
+ }
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+ dev->data->min_rx_buf_size);
+ return -EINVAL;
+ }
+ /* Temporarily replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ qede_dev_stop(dev);
+ rte_delay_ms(1000);
+ qdev->mtu = mtu;
+ /* Fix up RX buf size for all queues of the port */
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ }
+ }
+ qede_dev_start(dev);
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ /* Reassign back */
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+
+ return 0;
+}
+
+static int
+qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ struct ecore_hwfn *p_hwfn;
+ int rc, i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+ if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
+ QEDE_VXLAN_DEF_PORT;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
+}
+
+static int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn;
+ struct ecore_hwfn *p_hwfn;
+ enum ecore_filter_ucast_type type;
+ enum ecore_tunn_clss clss;
+ struct ecore_filter_ucast ucast;
+ char str[80];
+ uint16_t filter_type;
+ int rc, i;
+
+ filter_type = conf->filter_type | qdev->vxlan_filter_type;
+ /* First determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Wrong filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ucast.opcode = ECORE_FILTER_ADD;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 1);
+ if (rc == 0) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 1);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+
+ DP_INFO(edev, "Enabling VXLAN tunneling\n");
+ qede_set_cmn_tunn_param(&tunn, clss, true, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+ }
+ qdev->num_tunn_filters++; /* Filter added successfully */
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ucast.opcode = ECORE_FILTER_REMOVE;
+
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 0);
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 0);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+ qdev->num_tunn_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->num_tunn_filters == 0) {
+ DP_INFO(edev, "Disabling VXLAN tunneling\n");
+
+ /* Use 0 as tunnel mode */
+ qede_set_cmn_tunn_param(&tunn, clss, false, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
+
+ return 0;
+}
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with VXLAN tunneling");
+ return(qede_vxlan_tunn_config(eth_dev, filter_op,
+ filter_conf));
+ /* Place holders for future tunneling support */
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct eth_dev_ops qede_eth_dev_ops = {
+ .dev_configure = qede_dev_configure,
+ .dev_infos_get = qede_dev_info_get,
+ .rx_queue_setup = qede_rx_queue_setup,
+ .rx_queue_release = qede_rx_queue_release,
+ .tx_queue_setup = qede_tx_queue_setup,
+ .tx_queue_release = qede_tx_queue_release,
+ .dev_start = qede_dev_start,
+ .dev_set_link_up = qede_dev_set_link_up,
+ .dev_set_link_down = qede_dev_set_link_down,
+ .link_update = qede_link_update,
+ .promiscuous_enable = qede_promiscuous_enable,
+ .promiscuous_disable = qede_promiscuous_disable,
+ .allmulticast_enable = qede_allmulticast_enable,
+ .allmulticast_disable = qede_allmulticast_disable,
+ .dev_stop = qede_dev_stop,
+ .dev_close = qede_dev_close,
+ .stats_get = qede_get_stats,
+ .stats_reset = qede_reset_stats,
+ .xstats_get = qede_get_xstats,
+ .xstats_reset = qede_reset_xstats,
+ .xstats_get_names = qede_get_xstats_names,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
+ .vlan_offload_set = qede_vlan_offload_set,
+ .vlan_filter_set = qede_vlan_filter_set,
+ .flow_ctrl_set = qede_flow_ctrl_set,
+ .flow_ctrl_get = qede_flow_ctrl_get,
+ .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .rss_hash_update = qede_rss_hash_update,
+ .rss_hash_conf_get = qede_rss_hash_conf_get,
+ .reta_update = qede_rss_reta_update,
+ .reta_query = qede_rss_reta_query,
+ .mtu_set = qede_set_mtu,
+ .filter_ctrl = qede_dev_filter_ctrl,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
+};
+
+static const struct eth_dev_ops qede_eth_vf_dev_ops = {
+ .dev_configure = qede_dev_configure,
+ .dev_infos_get = qede_dev_info_get,
+ .rx_queue_setup = qede_rx_queue_setup,
+ .rx_queue_release = qede_rx_queue_release,
+ .tx_queue_setup = qede_tx_queue_setup,
+ .tx_queue_release = qede_tx_queue_release,
+ .dev_start = qede_dev_start,
+ .dev_set_link_up = qede_dev_set_link_up,
+ .dev_set_link_down = qede_dev_set_link_down,
+ .link_update = qede_link_update,
+ .promiscuous_enable = qede_promiscuous_enable,
+ .promiscuous_disable = qede_promiscuous_disable,
+ .allmulticast_enable = qede_allmulticast_enable,
+ .allmulticast_disable = qede_allmulticast_disable,
+ .dev_stop = qede_dev_stop,
+ .dev_close = qede_dev_close,
+ .stats_get = qede_get_stats,
+ .stats_reset = qede_reset_stats,
+ .xstats_get = qede_get_xstats,
+ .xstats_reset = qede_reset_xstats,
+ .xstats_get_names = qede_get_xstats_names,
+ .vlan_offload_set = qede_vlan_offload_set,
+ .vlan_filter_set = qede_vlan_filter_set,
+ .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .rss_hash_update = qede_rss_hash_update,
+ .rss_hash_conf_get = qede_rss_hash_conf_get,
+ .reta_update = qede_rss_reta_update,
+ .reta_query = qede_rss_reta_query,
+ .mtu_set = qede_set_mtu,
+};
+
+static void qede_update_pf_params(struct ecore_dev *edev)
+{
+ struct ecore_pf_params pf_params;
+
+ memset(&pf_params, 0, sizeof(struct ecore_pf_params));
+ pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+ qed_ops->common->update_pf_params(edev, &pf_params);
+}
+
+static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_pci_addr pci_addr;
+ struct qede_dev *adapter;
+ struct ecore_dev *edev;
+ struct qed_dev_eth_info dev_info;
+ struct qed_slowpath_params params;
+ static bool do_once = true;
+ uint8_t bulletin_change;
+ uint8_t vf_mac[ETHER_ADDR_LEN];
+ uint8_t is_mac_forced;
+ bool is_mac_exist;
+ /* Fix up ecore debug level */
+ uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
+ uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+ int rc;
+
+ /* Extract key data structures */
+ adapter = eth_dev->data->dev_private;
+ edev = &adapter->edev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+ pci_addr.bus, pci_addr.devid, pci_addr.function,
+ eth_dev->data->port_id);
+
+ eth_dev->rx_pkt_burst = qede_recv_pkts;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DP_NOTICE(edev, false,
+ "Skipping device init from secondary process\n");
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* @DPDK */
+ edev->vendor_id = pci_dev->id.vendor_id;
+ edev->device_id = pci_dev->id.device_id;
+
+ qed_ops = qed_get_eth_ops();
+ if (!qed_ops) {
+ DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
+ return -EINVAL;
+ }
+
+ DP_INFO(edev, "Starting qede probe\n");
+
+ rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
+ dp_module, dp_level, is_vf);
+
+ if (rc != 0) {
+ DP_ERR(edev, "qede probe failed rc %d\n", rc);
+ return -ENODEV;
+ }
+
+ qede_update_pf_params(edev);
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler, (void *)eth_dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
+ DP_ERR(edev, "rte_intr_enable() failed\n");
+ return -ENODEV;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&params, 0, sizeof(struct qed_slowpath_params));
+ params.int_mode = ECORE_INT_MODE_MSIX;
+ params.drv_major = QEDE_PMD_VERSION_MAJOR;
+ params.drv_minor = QEDE_PMD_VERSION_MINOR;
+ params.drv_rev = QEDE_PMD_VERSION_REVISION;
+ params.drv_eng = QEDE_PMD_VERSION_PATCH;
+ strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
+ QEDE_PMD_DRV_VER_STR_SIZE);
+
+ /* For CMT mode device do periodic polling for slowpath events.
+ * This is required since uio device uses only one MSI-x
+ * interrupt vector but we need one for each engine.
+ */
+ if (edev->num_hwfns > 1 && IS_PF(edev)) {
+ rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ if (rc != 0) {
+ DP_ERR(edev, "Unable to start periodic"
+ " timer rc %d\n", rc);
+ return -EINVAL;
+ }
+ }
+
+ rc = qed_ops->common->slowpath_start(edev, &params);
+ if (rc) {
+ DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENODEV;
+ }
+
+ rc = qed_ops->fill_dev_info(edev, &dev_info);
+ if (rc) {
+ DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
+ qed_ops->common->slowpath_stop(edev);
+ qed_ops->common->remove(edev);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENODEV;
+ }
+
+ qede_alloc_etherdev(adapter, &dev_info);
+
+ adapter->ops->common->set_name(edev, edev->name);
+
+ if (!is_vf)
+ adapter->dev_info.num_mac_filters =
+ (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
+ ECORE_MAC);
+ else
+ ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
+ (uint32_t *)&adapter->dev_info.num_mac_filters);
+
+ /* Allocate memory for storing MAC addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
+ (ETHER_ADDR_LEN *
+ adapter->dev_info.num_mac_filters),
+ RTE_CACHE_LINE_SIZE);
+
+ if (eth_dev->data->mac_addrs == NULL) {
+ DP_ERR(edev, "Failed to allocate MAC address\n");
+ qed_ops->common->slowpath_stop(edev);
+ qed_ops->common->remove(edev);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENOMEM;
+ }
+
+ if (!is_vf) {
+ ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+ hw_info.hw_mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+ ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ } else {
+ ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
+ &bulletin_change);
+ if (bulletin_change) {
+ is_mac_exist =
+ ecore_vf_bulletin_get_forced_mac(
+ ECORE_LEADING_HWFN(edev),
+ vf_mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced) {
+ DP_INFO(edev, "VF macaddr received from PF\n");
+ ether_addr_copy((struct ether_addr *)&vf_mac,
+ &eth_dev->data->mac_addrs[0]);
+ ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ } else {
+ DP_NOTICE(edev, false,
+ "No VF macaddr assigned\n");
+ }
+ }
+ }
+
+ eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+
+ if (do_once) {
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+ qede_print_adapter_info(adapter);
+#endif
+ do_once = false;
+ }
+
+ adapter->state = QEDE_DEV_INIT;
+
+ DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
+
+ return rc;
+}
+
+static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ return qede_common_dev_init(eth_dev, 1);
+}
+
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ return qede_common_dev_init(eth_dev, 0);
+}
+
+static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
+{
+ /* only uninitialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* safe to close dev here */
+ qede_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ if (eth_dev->data->mac_addrs)
+ rte_free(eth_dev->data->mac_addrs);
+
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_common_uninit(eth_dev);
+}
+
+static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_common_uninit(eth_dev);
+}
+
+static const struct rte_pci_id pci_id_qedevf_map[] = {
+#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
+ },
+ {.vendor_id = 0,}
+};
+
+static const struct rte_pci_id pci_id_qede_map[] = {
+#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
+ },
+ {.vendor_id = 0,}
+};
+
+static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qedevf_eth_dev_init);
+}
+
+static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qedevf_pmd = {
+ .id_table = pci_id_qedevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qedevf_eth_dev_pci_probe,
+ .remove = qedevf_eth_dev_pci_remove,
+};
+
+static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qede_eth_dev_init);
+}
+
+static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qede_pmd = {
+ .id_table = pci_id_qede_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qede_eth_dev_pci_probe,
+ .remove = qede_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_ethdev.h b/src/seastar/dpdk/drivers/net/qede/qede_ethdev.h
new file mode 100644
index 00000000..e4323a0d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_ethdev.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_ETHDEV_H_
+#define _QEDE_ETHDEV_H_
+
+#include <sys/queue.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_dev.h>
+#include <rte_ip.h>
+
+/* ecore includes */
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_l2_api.h"
+#include "base/ecore_vf_api.h"
+#include "base/ecore_hsi_common.h"
+#include "base/ecore_int_api.h"
+#include "base/ecore_chain.h"
+#include "base/ecore_status.h"
+#include "base/ecore_hsi_eth.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_iov_api.h"
+#include "base/ecore_cxt.h"
+#include "base/nvm_cfg.h"
+#include "base/ecore_iov_api.h"
+#include "base/ecore_sp_commands.h"
+#include "base/ecore_l2.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_l2.h"
+
+#include "qede_logs.h"
+#include "qede_if.h"
+#include "qede_eth_if.h"
+
+#include "qede_rxtx.h"
+
+#define qede_stringify1(x...) #x
+#define qede_stringify(x...) qede_stringify1(x)
+
+/* Driver versions */
+#define QEDE_PMD_VER_PREFIX "QEDE PMD"
+#define QEDE_PMD_VERSION_MAJOR 2
+#define QEDE_PMD_VERSION_MINOR 4
+#define QEDE_PMD_VERSION_REVISION 0
+#define QEDE_PMD_VERSION_PATCH 1
+
+#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
+ qede_stringify(QEDE_PMD_VERSION_MINOR) "." \
+ qede_stringify(QEDE_PMD_VERSION_REVISION) "." \
+ qede_stringify(QEDE_PMD_VERSION_PATCH)
+
+#define QEDE_PMD_DRV_VER_STR_SIZE NAME_SIZE
+#define QEDE_PMD_VER_PREFIX "QEDE PMD"
+
+
+#define QEDE_RSS_INDIR_INITED (1 << 0)
+#define QEDE_RSS_KEY_INITED (1 << 1)
+#define QEDE_RSS_CAPS_INITED (1 << 2)
+
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_queues - (qdev)->fp_num_tx)
+#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
+ (qdev)->num_tc)
+
+#define QEDE_FASTPATH_TX (1 << 0)
+#define QEDE_FASTPATH_RX (1 << 1)
+
+#define QEDE_DUPLEX_FULL 1
+#define QEDE_DUPLEX_HALF 2
+#define QEDE_DUPLEX_UNKNOWN 0xff
+
+#define QEDE_SUPPORTED_AUTONEG (1 << 6)
+#define QEDE_SUPPORTED_PAUSE (1 << 13)
+
+#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private)
+
+#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev)
+
+#define QEDE_INIT(eth_dev) { \
+ struct qede_dev *qdev = eth_dev->data->dev_private; \
+ struct ecore_dev *edev = &qdev->edev; \
+}
+
+/************* QLogic 10G/25G/40G/50G/100G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+
+#define CHIP_NUM_57980E 0x1634
+#define CHIP_NUM_57980S 0x1629
+#define CHIP_NUM_VF 0x1630
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_AH_50G 0x8070
+#define CHIP_NUM_AH_10G 0x8071
+#define CHIP_NUM_AH_40G 0x8072
+#define CHIP_NUM_AH_25G 0x8073
+#define CHIP_NUM_AH_IOV 0x8090
+
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980E CHIP_NUM_57980E
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980S CHIP_NUM_57980S
+#define PCI_DEVICE_ID_QLOGIC_NX2_VF CHIP_NUM_VF
+#define PCI_DEVICE_ID_QLOGIC_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_QLOGIC_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_QLOGIC_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_QLOGIC_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_QLOGIC_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_QLOGIC_AH_50G CHIP_NUM_AH_50G
+#define PCI_DEVICE_ID_QLOGIC_AH_10G CHIP_NUM_AH_10G
+#define PCI_DEVICE_ID_QLOGIC_AH_40G CHIP_NUM_AH_40G
+#define PCI_DEVICE_ID_QLOGIC_AH_25G CHIP_NUM_AH_25G
+#define PCI_DEVICE_ID_QLOGIC_AH_IOV CHIP_NUM_AH_IOV
+
+
+#define QEDE_VXLAN_DEF_PORT 8472
+
+extern char fw_file[];
+
+/* Number of PF connections - 32 RX + 32 TX */
+#define QEDE_PF_NUM_CONNS (64)
+
+/* Maximum number of flowdir filters */
+#define QEDE_RFS_MAX_FLTR (256)
+
+/* Port/function states */
+enum qede_dev_state {
+ QEDE_DEV_INIT, /* Init the chip and Slowpath */
+ QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
+ QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
+ QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
+};
+
+struct qede_vlan_entry {
+ SLIST_ENTRY(qede_vlan_entry) list;
+ uint16_t vid;
+};
+
+struct qede_mcast_entry {
+ struct ether_addr mac;
+ SLIST_ENTRY(qede_mcast_entry) list;
+};
+
+struct qede_ucast_entry {
+ struct ether_addr mac;
+ uint16_t vlan;
+ uint16_t vni;
+ SLIST_ENTRY(qede_ucast_entry) list;
+};
+
+struct qede_fdir_entry {
+ uint32_t soft_id; /* unused for now */
+ uint16_t pkt_len; /* actual packet length to match */
+ uint16_t rx_queue; /* queue to be steered to */
+ const struct rte_memzone *mz; /* mz used to hold L2 frame */
+ SLIST_ENTRY(qede_fdir_entry) list;
+};
+
+struct qede_fdir_info {
+ struct ecore_arfs_config_params arfs;
+ uint16_t filter_count;
+ SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+};
+
+
+/*
+ * Structure to store private data for each port.
+ */
+struct qede_dev {
+ struct ecore_dev edev;
+ uint8_t protocol;
+ const struct qed_eth_ops *ops;
+ struct qed_dev_eth_info dev_info;
+ struct ecore_sb_info *sb_array;
+ struct qede_fastpath *fp_array;
+ uint8_t num_tc;
+ uint16_t mtu;
+ bool rss_enable;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ uint64_t rss_hf;
+ uint8_t rss_key_len;
+ bool enable_lro;
+ uint16_t num_queues;
+ uint8_t fp_num_tx;
+ uint8_t fp_num_rx;
+ enum qede_dev_state state;
+ SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
+ uint16_t configured_vlans;
+ bool accept_any_vlan;
+ struct ether_addr primary_mac;
+ SLIST_HEAD(mc_list_head, qede_mcast_entry) mc_list_head;
+ uint16_t num_mc_addr;
+ SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
+ uint16_t num_uc_addr;
+ bool handle_hw_err;
+ uint16_t num_tunn_filters;
+ uint16_t vxlan_filter_type;
+ struct qede_fdir_info fdir_info;
+ bool vlan_strip_flg;
+ char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+};
+
+/* Non-static functions */
+int qede_config_rss(struct rte_eth_dev *eth_dev);
+
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+int qed_fill_eth_dev_info(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
+ enum rte_filter_op op, void *arg);
+
+int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev);
+
+uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+
+#endif /* _QEDE_ETHDEV_H_ */
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_fdir.c b/src/seastar/dpdk/drivers/net/qede/qede_fdir.c
new file mode 100644
index 00000000..7bd5c5d6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_fdir.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2017 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+
+#include "qede_ethdev.h"
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+#define QEDE_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->fdir_info.arfs.arfs_enable = false;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ return -ENOTSUP;
+ }
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "flowdir is enabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fdir_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_fdir_entry *tmp = NULL;
+ struct qede_fdir_entry *fdir = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!fdir) {
+ DP_ERR(edev, "Did not allocate memory for fdir\n");
+ return -ENOMEM;
+ }
+ }
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ rc = -rte_errno;
+ goto err1;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
+ &qdev->fdir_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err2;
+ }
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_ERR(edev, "flowdir filter exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (!qdev->fdir_info.arfs.arfs_enable) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
+ (dma_addr_t)mz->phys_addr,
+ pkt_len,
+ fdir_filter->action.rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ fdir->rx_queue = fdir_filter->action.rx_queue;
+ fdir->pkt_len = pkt_len;
+ fdir->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
+ fdir, list);
+ qdev->fdir_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->fdir_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->fdir_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->fdir_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->fdir_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->fdir_info.filter_count == 0) {
+ memset(&qdev->fdir_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ return 0;
+
+err2:
+ rte_memzone_free(mz);
+err1:
+ if (add)
+ rte_free(fdir);
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+uint16_t
+qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct rte_eth_fdir_input *input;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+ raw_pkt = (uint8_t *)buff;
+ input = &fdir->input;
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (input->flow_ext.vlan_tci) {
+ DP_INFO(edev, "adding VLAN header\n");
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ ip = (struct ipv4_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = input->flow.ip4_flow.proto ?
+ input->flow.ip4_flow.proto :
+ next_proto[input->flow_type];
+ ip->time_to_live = input->flow.ip4_flow.ttl ?
+ input->flow.ip4_flow.ttl :
+ QEDE_FDIR_IPV4_DEF_TTL;
+ ip->type_of_service = input->flow.ip4_flow.tos;
+ ip->dst_addr = input->flow.ip4_flow.dst_ip;
+ ip->src_addr = input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = input->flow.udp4_flow.dst_port;
+ udp->src_port = input->flow.udp4_flow.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->proto = input->flow.ipv6_flow.proto ?
+ input->flow.ipv6_flow.proto :
+ next_proto[input->flow_type];
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = input->flow.udp6_flow.dst_port;
+ udp->dst_port = input->flow.udp6_flow.src_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return 0;
+ }
+
+ return len;
+}
+
+int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_if.h b/src/seastar/dpdk/drivers/net/qede/qede_if.h
new file mode 100644
index 00000000..405c525e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_if.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_IF_H
+#define _QEDE_IF_H
+
+#include "qede_ethdev.h"
+
+/* forward */
+struct ecore_dev;
+struct qed_sb_info;
+struct qed_pf_params;
+enum ecore_int_mode;
+
+struct qed_dev_info {
+ uint8_t num_hwfns;
+ uint8_t hw_mac[ETHER_ADDR_LEN];
+ bool is_mf_default;
+
+ /* FW version */
+ uint16_t fw_major;
+ uint16_t fw_minor;
+ uint16_t fw_rev;
+ uint16_t fw_eng;
+
+ /* MFW version */
+ uint32_t mfw_rev;
+#define QED_MFW_VERSION_0_MASK 0x000000FF
+#define QED_MFW_VERSION_0_OFFSET 0
+#define QED_MFW_VERSION_1_MASK 0x0000FF00
+#define QED_MFW_VERSION_1_OFFSET 8
+#define QED_MFW_VERSION_2_MASK 0x00FF0000
+#define QED_MFW_VERSION_2_OFFSET 16
+#define QED_MFW_VERSION_3_MASK 0xFF000000
+#define QED_MFW_VERSION_3_OFFSET 24
+
+ uint32_t flash_size;
+ uint8_t mf_mode;
+ bool tx_switching;
+ u16 mtu;
+
+ /* Out param for qede */
+ bool vxlan_enable;
+ bool gre_enable;
+ bool geneve_enable;
+};
+
+enum qed_sb_type {
+ QED_SB_TYPE_L2_QUEUE,
+ QED_SB_TYPE_STORAGE,
+ QED_SB_TYPE_CNQ,
+};
+
+enum qed_protocol {
+ QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+ bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG (1 << 0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS (1 << 1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED (1 << 2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG (1 << 3)
+ uint32_t override_flags;
+ bool autoneg;
+ uint32_t adv_speeds;
+ uint32_t forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE (1 << 0)
+#define QED_LINK_PAUSE_RX_ENABLE (1 << 1)
+#define QED_LINK_PAUSE_TX_ENABLE (1 << 2)
+ uint32_t pause_config;
+};
+
+struct qed_link_output {
+ bool link_up;
+ uint32_t supported_caps; /* In SUPPORTED defs */
+ uint32_t advertised_caps; /* In ADVERTISED defs */
+ uint32_t lp_caps; /* In ADVERTISED defs */
+ uint32_t speed; /* In Mb/s */
+ uint32_t adv_speed; /* Speed mask */
+ uint8_t duplex; /* In DUPLEX defs */
+ uint8_t port; /* In PORT defs */
+ bool autoneg;
+ uint32_t pause_config;
+};
+
+struct qed_slowpath_params {
+ uint32_t int_mode;
+ uint8_t drv_major;
+ uint8_t drv_minor;
+ uint8_t drv_rev;
+ uint8_t drv_eng;
+ uint8_t name[NAME_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_eth_tlvs {
+ u16 feat_flags;
+ u8 mac[3][ETH_ALEN];
+ u16 lso_maxoff;
+ u16 lso_minseg;
+ bool prom_mode;
+ u16 num_txqs;
+ u16 num_rxqs;
+ u16 num_netqs;
+ u16 flex_vlan;
+ u32 tcp4_offloads;
+ u32 tcp6_offloads;
+ u16 tx_avg_qdepth;
+ u16 rx_avg_qdepth;
+ u8 txqs_empty;
+ u8 rxqs_empty;
+ u8 num_txqs_full;
+ u8 num_rxqs_full;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct qed_common_cb_ops {
+ void (*link_update)(void *dev, struct qed_link_output *link);
+ void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
+};
+
+struct qed_selftest_ops {
+/**
+ * @brief registers - Perform register tests
+ *
+ * @param edev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*registers)(struct ecore_dev *edev);
+};
+
+struct qed_common_ops {
+ int (*probe)(struct ecore_dev *edev,
+ struct rte_pci_device *pci_dev,
+ enum qed_protocol protocol,
+ uint32_t dp_module, uint8_t dp_level, bool is_vf);
+ void (*set_name)(struct ecore_dev *edev, char name[]);
+ enum _ecore_status_t
+ (*chain_alloc)(struct ecore_dev *edev,
+ enum ecore_chain_use_mode
+ intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ uint32_t num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
+
+ void (*chain_free)(struct ecore_dev *edev,
+ struct ecore_chain *p_chain);
+
+ void (*get_link)(struct ecore_dev *edev,
+ struct qed_link_output *if_link);
+ int (*set_link)(struct ecore_dev *edev,
+ struct qed_link_params *params);
+
+ int (*drain)(struct ecore_dev *edev);
+
+ void (*remove)(struct ecore_dev *edev);
+
+ int (*slowpath_stop)(struct ecore_dev *edev);
+
+ void (*update_pf_params)(struct ecore_dev *edev,
+ struct ecore_pf_params *params);
+
+ int (*slowpath_start)(struct ecore_dev *edev,
+ struct qed_slowpath_params *params);
+
+ int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt);
+
+ uint32_t (*sb_init)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ uint16_t sb_id, enum qed_sb_type type);
+
+ int (*get_sb_info)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb, u16 qid,
+ struct ecore_sb_info_dbg *sb_dbg);
+
+ bool (*can_link_change)(struct ecore_dev *edev);
+
+ void (*update_msglvl)(struct ecore_dev *edev,
+ uint32_t dp_module, uint8_t dp_level);
+
+ int (*send_drv_state)(struct ecore_dev *edev, bool active);
+};
+
+#endif /* _QEDE_IF_H */
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_logs.h b/src/seastar/dpdk/drivers/net/qede/qede_logs.h
new file mode 100644
index 00000000..25c14d8b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_logs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_LOGS_H_
+#define _QEDE_LOGS_H_
+
+#define DP_ERR(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
+ rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__)
+#else
+#define DP_NOTICE(p_dev, fmt, ...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+#define DP_INFO(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__)
+#else
+#define DP_INFO(p_dev, fmt, ...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+#define DP_VERBOSE(p_dev, module, fmt, ...) \
+do { \
+ if ((p_dev)->dp_module & module) \
+ rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__); \
+} while (0)
+#else
+#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0)
+#endif
+
+#define PMD_INIT_LOG(level, edev, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "[qede_pmd: %s] %s() " fmt "\n", \
+ (edev)->name, __func__, ##args)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE(edev) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+#define PMD_TX_LOG(level, q, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+ __func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+#define PMD_RX_LOG(level, q, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+ __func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _QEDE_LOGS_H_ */
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_main.c b/src/seastar/dpdk/drivers/net/qede/qede_main.c
new file mode 100644
index 00000000..712c03fd
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_main.c
@@ -0,0 +1,739 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <limits.h>
+#include <time.h>
+#include <rte_alarm.h>
+
+#include "qede_ethdev.h"
+
+/* Alarm timeout. */
+#define QEDE_ALARM_TIMEOUT_US 100000
+
+/* Global variable to hold absolute path of fw file */
+char fw_file[PATH_MAX];
+
+const char *QEDE_DEFAULT_FIRMWARE =
+ "/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
+
+static void
+qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < edev->num_hwfns; i++) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
+{
+ edev->regview = pci_dev->mem_resource[0].addr;
+ edev->doorbells = pci_dev->mem_resource[2].addr;
+}
+
+static int
+qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
+ enum qed_protocol protocol, uint32_t dp_module,
+ uint8_t dp_level, bool is_vf)
+{
+ struct ecore_hw_prepare_params hw_prepare_params;
+ struct qede_dev *qdev = (struct qede_dev *)edev;
+ int rc;
+
+ ecore_init_struct(edev);
+ edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+ qdev->protocol = protocol;
+
+ if (is_vf)
+ edev->b_is_vf = true;
+
+ ecore_init_dp(edev, dp_module, dp_level, NULL);
+ qed_init_pci(edev, pci_dev);
+
+ memset(&hw_prepare_params, 0, sizeof(hw_prepare_params));
+ hw_prepare_params.personality = ECORE_PCI_ETH;
+ hw_prepare_params.drv_resc_alloc = false;
+ hw_prepare_params.chk_reg_fifo = false;
+ hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.epoch = (u32)time(NULL);
+ rc = ecore_hw_prepare(edev, &hw_prepare_params);
+ if (rc) {
+ DP_ERR(edev, "hw prepare failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qed_nic_setup(struct ecore_dev *edev)
+{
+ int rc;
+
+ rc = ecore_resc_alloc(edev);
+ if (rc)
+ return rc;
+
+ DP_INFO(edev, "Allocated qed resources\n");
+ ecore_resc_setup(edev);
+
+ return rc;
+}
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+static int qed_alloc_stream_mem(struct ecore_dev *edev)
+{
+ int i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->stream));
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct ecore_dev *edev)
+{
+ int i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
+ }
+}
+#endif
+
+#ifdef CONFIG_ECORE_BINARY_FW
+static int qed_load_firmware_data(struct ecore_dev *edev)
+{
+ int fd;
+ struct stat st;
+ const char *fw = RTE_LIBRTE_QEDE_FW;
+
+ if (strcmp(fw, "") == 0)
+ strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+ else
+ strcpy(fw_file, fw);
+
+ fd = open(fw_file, O_RDONLY);
+ if (fd < 0) {
+ DP_NOTICE(edev, false, "Can't open firmware file\n");
+ return -ENOENT;
+ }
+
+ if (fstat(fd, &st) < 0) {
+ DP_NOTICE(edev, false, "Can't stat firmware file\n");
+ close(fd);
+ return -1;
+ }
+
+ edev->firmware = rte_zmalloc("qede_fw", st.st_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!edev->firmware) {
+ DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+ close(fd);
+ return -ENOMEM;
+ }
+
+ if (read(fd, edev->firmware, st.st_size) != st.st_size) {
+ DP_NOTICE(edev, false, "Can't read firmware data\n");
+ close(fd);
+ return -1;
+ }
+
+ edev->fw_len = st.st_size;
+ if (edev->fw_len < 104) {
+ DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
+ edev->fw_len);
+ close(fd);
+ return -EINVAL;
+ }
+
+ close(fd);
+ return 0;
+}
+#endif
+
+static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
+{
+ uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+
+ is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced)
+ rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+static void qede_vf_task(void *arg)
+{
+ struct ecore_hwfn *p_hwfn = arg;
+ uint8_t change = 0;
+
+ /* Read the bulletin board, and re-schedule the task */
+ ecore_vf_read_bulletin(p_hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(p_hwfn);
+
+ rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn);
+}
+
+static void qed_start_iov_task(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (!IS_PF(edev))
+ rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task,
+ p_hwfn);
+ }
+}
+
+static void qed_stop_iov_task(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (!IS_PF(edev))
+ rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
+ }
+}
+static int qed_slowpath_start(struct ecore_dev *edev,
+ struct qed_slowpath_params *params)
+{
+ const uint8_t *data = NULL;
+ struct ecore_hwfn *hwfn;
+ struct ecore_mcp_drv_version drv_version;
+ struct ecore_hw_init_params hw_init_params;
+ struct ecore_ptt *p_ptt;
+ int rc;
+
+ if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_BINARY_FW
+ rc = qed_load_firmware_data(edev);
+ if (rc) {
+ DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
+ goto err;
+ }
+#endif
+ hwfn = ECORE_LEADING_HWFN(edev);
+ if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */
+ p_ptt = ecore_ptt_acquire(hwfn);
+ if (p_ptt) {
+ ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_ERR(edev, "Failed to acquire PTT for flowdir\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+ }
+
+ rc = qed_nic_setup(edev);
+ if (rc)
+ goto err;
+
+ /* set int_coalescing_mode */
+ edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ if (IS_PF(edev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(edev);
+ if (rc) {
+ DP_NOTICE(edev, true,
+ "Failed to allocate stream memory\n");
+ goto err1;
+ }
+ }
+#endif
+
+ qed_start_iov_task(edev);
+
+#ifdef CONFIG_ECORE_BINARY_FW
+ if (IS_PF(edev))
+ data = (const uint8_t *)edev->firmware + sizeof(u32);
+#endif
+
+ /* Start the slowpath */
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+ hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ hw_init_params.avoid_eng_reset = false;
+ rc = ecore_hw_init(edev, &hw_init_params);
+ if (rc) {
+ DP_ERR(edev, "ecore_hw_init failed\n");
+ goto err2;
+ }
+
+ DP_INFO(edev, "HW inited and function started\n");
+
+ if (IS_PF(edev)) {
+ hwfn = ECORE_LEADING_HWFN(edev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) | (params->drv_eng);
+ /* TBD: strlcpy() */
+ strncpy((char *)drv_version.name, (const char *)params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(edev, true,
+ "Failed sending drv version command\n");
+ goto err3;
+ }
+ }
+
+ ecore_reset_vport_stats(edev);
+
+ return 0;
+
+err3:
+ ecore_hw_stop(edev);
+err2:
+ qed_stop_iov_task(edev);
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ qed_free_stream_mem(edev);
+err1:
+#endif
+ ecore_resc_free(edev);
+err:
+#ifdef CONFIG_ECORE_BINARY_FW
+ if (IS_PF(edev)) {
+ if (edev->firmware)
+ rte_free(edev->firmware);
+ edev->firmware = NULL;
+ }
+#endif
+ qed_stop_iov_task(edev);
+
+ return rc;
+}
+
+static int
+qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
+{
+ struct ecore_ptt *ptt = NULL;
+ struct ecore_tunnel_info *tun = &edev->tunnel;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
+ dev_info->num_hwfns = edev->num_hwfns;
+ dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+ dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
+
+ rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ ETHER_ADDR_LEN);
+
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+ if (IS_PF(edev)) {
+ dev_info->mf_mode = edev->mf_mode;
+ dev_info->tx_switching = false;
+
+ ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
+ if (ptt) {
+ ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->mfw_rev, NULL);
+
+ ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->flash_size);
+
+ /* Workaround to allow PHY-read commands for
+ * B0 bringup.
+ */
+ if (ECORE_IS_BB_B0(edev))
+ dev_info->flash_size = 0xffffffff;
+
+ ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
+ }
+ } else {
+ ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->mfw_rev, NULL);
+ }
+
+ return 0;
+}
+
+int
+qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
+{
+ uint8_t queues = 0;
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
+
+ if (IS_PF(edev)) {
+ int max_vf_vlan_filters = 0;
+
+ info->num_queues = 0;
+ for_each_hwfn(edev, i)
+ info->num_queues +=
+ FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
+
+ if (edev->p_iov_info)
+ max_vf_vlan_filters = edev->p_iov_info->total_vfs *
+ ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
+ max_vf_vlan_filters;
+
+ rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ ETHER_ADDR_LEN);
+ } else {
+ ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
+ &info->num_queues);
+ if (edev->num_hwfns > 1) {
+ ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
+ info->num_queues += queues;
+ }
+
+ ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
+ (u8 *)&info->num_vlan_filters);
+
+ ecore_vf_get_port_mac(&edev->hwfns[0],
+ (uint8_t *)&info->port_mac);
+
+ info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]);
+ }
+
+ qed_fill_dev_info(edev, &info->common);
+
+ if (IS_VF(edev))
+ memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
+{
+ int i;
+
+ rte_memcpy(edev->name, name, NAME_SIZE);
+ for_each_hwfn(edev, i) {
+ snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+ }
+}
+
+static uint32_t
+qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr,
+ uint16_t sb_id, enum qed_sb_type type)
+{
+ struct ecore_hwfn *p_hwfn;
+ int hwfn_index;
+ uint16_t rel_sb_id;
+ uint8_t n_hwfns;
+ uint32_t rc;
+
+ /* RoCE uses single engine and CMT uses two engines. When using both
+ * we force only a single engine. Storage uses only engine 0 too.
+ */
+ if (type == QED_SB_TYPE_L2_QUEUE)
+ n_hwfns = edev->num_hwfns;
+ else
+ n_hwfns = 1;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static void qed_fill_link(struct ecore_hwfn *hwfn,
+ struct qed_link_output *if_link)
+{
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
+ struct ecore_mcp_link_capabilities link_caps;
+ uint8_t change = 0;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ if (IS_PF(hwfn->p_dev)) {
+ rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
+ sizeof(params));
+ rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
+ rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+ } else {
+ ecore_vf_read_bulletin(hwfn, &change);
+ ecore_vf_get_link_params(hwfn, &params);
+ ecore_vf_get_link_state(hwfn, &link);
+ ecore_vf_get_link_caps(hwfn, &link_caps);
+ }
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ if_link->duplex = QEDE_DUPLEX_FULL;
+
+ /* Fill up the native advertised speed cap mask */
+ if_link->adv_speed = params.speed.advertised_speeds;
+
+ if (params.speed.autoneg)
+ if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
+
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+}
+
+static void
+qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
+{
+ qed_fill_link(&edev->hwfns[0], if_link);
+
+#ifdef CONFIG_QED_SRIOV
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+#endif
+}
+
+static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+ struct ecore_mcp_link_params *link_params;
+ int rc;
+
+ if (IS_VF(edev))
+ return 0;
+
+ /* The link should be set only once per PF */
+ hwfn = &edev->hwfns[0];
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = ecore_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+
+ rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+void qed_link_update(struct ecore_hwfn *hwfn)
+{
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, &if_link);
+}
+
+static int qed_drain(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+ int i, rc;
+
+ if (IS_VF(edev))
+ return 0;
+
+ for_each_hwfn(edev, i) {
+ hwfn = &edev->hwfns[i];
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = ecore_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ ecore_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+static int qed_nic_stop(struct ecore_dev *edev)
+{
+ int i, rc;
+
+ rc = ecore_hw_stop(edev);
+ for (i = 0; i < edev->num_hwfns; i++) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled)
+ p_hwfn->b_sp_dpc_enabled = false;
+ }
+ return rc;
+}
+
+static int qed_slowpath_stop(struct ecore_dev *edev)
+{
+#ifdef CONFIG_QED_SRIOV
+ int i;
+#endif
+
+ if (!edev)
+ return -ENODEV;
+
+ if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ qed_free_stream_mem(edev);
+#endif
+
+#ifdef CONFIG_QED_SRIOV
+ if (IS_QED_ETH_IF(edev))
+ qed_sriov_disable(edev, true);
+#endif
+ }
+
+ qed_nic_stop(edev);
+
+ ecore_resc_free(edev);
+ qed_stop_iov_task(edev);
+
+ return 0;
+}
+
+static void qed_remove(struct ecore_dev *edev)
+{
+ if (!edev)
+ return;
+
+ ecore_hw_remove(edev);
+}
+
+static int qed_send_drv_state(struct ecore_dev *edev, bool active)
+{
+ struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev);
+ struct ecore_ptt *ptt;
+ int status = 0;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ?
+ ECORE_OV_DRIVER_STATE_ACTIVE :
+ ECORE_OV_DRIVER_STATE_DISABLED);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
+ u16 qid, struct ecore_sb_info_dbg *sb_dbg)
+{
+ struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns];
+ struct ecore_ptt *ptt;
+ int rc;
+
+ if (IS_VF(edev))
+ return -EINVAL;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ ecore_ptt_release(hwfn, ptt);
+ return rc;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ INIT_STRUCT_FIELD(probe, &qed_probe),
+ INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
+ INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
+ INIT_STRUCT_FIELD(set_name, &qed_set_name),
+ INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
+ INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
+ INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+ INIT_STRUCT_FIELD(get_sb_info, &qed_get_sb_info),
+ INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
+ INIT_STRUCT_FIELD(set_link, &qed_set_link),
+ INIT_STRUCT_FIELD(drain, &qed_drain),
+ INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
+ INIT_STRUCT_FIELD(remove, &qed_remove),
+ INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
+};
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_rxtx.c b/src/seastar/dpdk/drivers/net/qede/qede_rxtx.c
new file mode 100644
index 00000000..baea1bb0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_rxtx.c
@@ -0,0 +1,1952 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <rte_net.h>
+#include "qede_rxtx.h"
+
+static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+ struct rte_mbuf *new_mb = NULL;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+ new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!new_mb)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Failed to allocate rx buffer "
+ "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+ idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
+ return -ENOMEM;
+ }
+ rxq->sw_rx_ring[idx].mbuf = new_mb;
+ rxq->sw_rx_ring[idx].page_offset = 0;
+ mapping = rte_mbuf_data_dma_addr_default(new_mb);
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
+ rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ rxq->sw_rx_prod++;
+ return 0;
+}
+
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (rxq->sw_rx_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_rx_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+ rxq->sw_rx_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+ struct qede_rx_queue *rxq = rx_queue;
+
+ if (rxq != NULL) {
+ qede_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_rx_ring);
+ rxq->sw_rx_ring = NULL;
+ rte_free(rxq);
+ rxq = NULL;
+ }
+}
+
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
+{
+ unsigned int i;
+
+ PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
+
+ if (txq->sw_tx_ring) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+int
+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct qede_rx_queue *rxq;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
+ size_t size;
+ int rc;
+ int i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
+ if (!rte_is_power_of_2(nb_desc)) {
+ DP_ERR(edev, "Ring size %u is not power of 2\n",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!rxq) {
+ DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ rxq->qdev = qdev;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ qdev->mtu = max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->enable_scatter) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
+ }
+ if (dev->data->scattered_rx)
+ rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
+ rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_rx_ring) {
+ DP_NOTICE(edev, false,
+ "Unable to alloc memory for sw_rx_ring on socket %u\n",
+ socket_id);
+ rte_free(rxq);
+ rxq = NULL;
+ return -ENOMEM;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+ ECORE_CHAIN_MODE_NEXT_PTR,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ rxq->nb_rx_desc,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring,
+ NULL);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(edev, false,
+ "Unable to alloc memory for rxbd ring on socket %u\n",
+ socket_id);
+ rte_free(rxq->sw_rx_ring);
+ rxq->sw_rx_ring = NULL;
+ rte_free(rxq);
+ rxq = NULL;
+ return -ENOMEM;
+ }
+
+ /* Allocate FW completion ring */
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ rxq->nb_rx_desc,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring,
+ NULL);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(edev, false,
+ "Unable to alloc memory for cqe ring on socket %u\n",
+ socket_id);
+ /* TBD: Freeing RX BD ring */
+ rte_free(rxq->sw_rx_ring);
+ rxq->sw_rx_ring = NULL;
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate buffers for the Rx ring */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ rc = qede_alloc_rx_buffer(rxq);
+ if (rc) {
+ DP_NOTICE(edev, false,
+ "RX buffer allocation failed at idx=%d\n", i);
+ goto err4;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
+ queue_idx, nb_desc, qdev->mtu, socket_id);
+
+ return 0;
+err4:
+ qede_rx_queue_release(rxq);
+ return -ENOMEM;
+}
+
+void qede_tx_queue_release(void *tx_queue)
+{
+ struct qede_tx_queue *txq = tx_queue;
+
+ if (txq != NULL) {
+ qede_tx_queue_release_mbufs(txq);
+ if (txq->sw_tx_ring) {
+ rte_free(txq->sw_tx_ring);
+ txq->sw_tx_ring = NULL;
+ }
+ rte_free(txq);
+ }
+ txq = NULL;
+}
+
+int
+qede_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_tx_queue *txq;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ DP_ERR(edev, "Ring size %u is not power of 2\n",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (txq == NULL) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txq on socket %u",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->qdev = qdev;
+ txq->port_id = dev->data->port_id;
+
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ txq->nb_tx_desc,
+ sizeof(union eth_tx_bd_types),
+ &txq->tx_pbl,
+ NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txbd ring on socket %u",
+ socket_id);
+ qede_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate software ring */
+ txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
+ (sizeof(struct qede_tx_entry) *
+ txq->nb_tx_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!txq->sw_tx_ring) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txbd ring on socket %u",
+ socket_id);
+ qede_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->queue_id = queue_idx;
+
+ txq->nb_tx_avail = txq->nb_tx_desc;
+
+ txq->tx_free_thresh =
+ tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
+ (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ DP_INFO(edev,
+ "txq %u num_desc %u tx_free_thresh %u socket %u\n",
+ queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
+
+ return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
+static void qede_init_fp(struct qede_dev *qdev)
+{
+ struct qede_fastpath *fp;
+ uint8_t i;
+ int fp_rx = qdev->fp_num_rx;
+
+ memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
+ sizeof(*qdev->fp_array)));
+ memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
+ sizeof(*qdev->sb_array)));
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp_rx) {
+ fp->type = QEDE_FASTPATH_RX;
+ fp_rx--;
+ } else{
+ fp->type = QEDE_FASTPATH_TX;
+ }
+ fp->qdev = qdev;
+ fp->id = i;
+ fp->sb_info = &qdev->sb_array[i];
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
+ }
+
+}
+
+void qede_free_fp_arrays(struct qede_dev *qdev)
+{
+ /* It asseumes qede_free_mem_load() is called before */
+ if (qdev->fp_array != NULL) {
+ rte_free(qdev->fp_array);
+ qdev->fp_array = NULL;
+ }
+
+ if (qdev->sb_array != NULL) {
+ rte_free(qdev->sb_array);
+ qdev->sb_array = NULL;
+ }
+}
+
+static int qede_alloc_fp_array(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+
+ qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
+ sizeof(*qdev->fp_array),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!qdev->fp_array) {
+ DP_ERR(edev, "fp array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
+ sizeof(*qdev->sb_array),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!qdev->sb_array) {
+ DP_ERR(edev, "sb array allocation failed\n");
+ rte_free(qdev->fp_array);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* This function allocates fast-path status block memory */
+static int
+qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
+ uint16_t sb_id)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
+
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = qdev->ops->common->sb_init(edev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ /* TBD: No dma_free_coherent possible */
+ return rc;
+ }
+
+ return 0;
+}
+
+int qede_alloc_fp_resc(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp;
+ uint32_t num_sbs;
+ uint16_t i;
+ uint16_t sb_idx;
+ int rc;
+
+ if (IS_VF(edev))
+ ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
+ else
+ num_sbs = ecore_cxt_get_proto_cid_count
+ (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
+
+ if (num_sbs == 0) {
+ DP_ERR(edev, "No status blocks available\n");
+ return -EINVAL;
+ }
+
+ if (qdev->fp_array)
+ qede_free_fp_arrays(qdev);
+
+ rc = qede_alloc_fp_array(qdev);
+ if (rc != 0)
+ return rc;
+
+ qede_init_fp(qdev);
+
+ for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
+ fp = &qdev->fp_array[i];
+ if (IS_VF(edev))
+ sb_idx = i % num_sbs;
+ else
+ sb_idx = i;
+ if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
+ qede_free_fp_arrays(qdev);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+
+ qede_free_mem_load(eth_dev);
+ qede_free_fp_arrays(qdev);
+}
+
+static inline void
+qede_update_rx_prod(__rte_unused struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+ uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = { 0 };
+
+ /* Update producers */
+ rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
+ rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ rte_wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (uint32_t *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ rte_wmb();
+
+ PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
+{
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+static int qede_start_queues(struct rte_eth_dev *eth_dev,
+ __rte_unused bool clear_stats)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_queue_start_common_params q_params;
+ struct qed_dev_info *qed_info = &qdev->dev_info.common;
+ struct qed_update_vport_params vport_update_params;
+ struct ecore_sge_tpa_params tpa_params;
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ dma_addr_t p_phys_table;
+ int txq_index;
+ uint16_t page_cnt;
+ int rc, tc, i;
+
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ struct ecore_rxq_start_ret_params ret_params;
+
+ p_phys_table =
+ ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt =
+ ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+
+ memset(&ret_params, 0, sizeof(ret_params));
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+ rc = qdev->ops->q_rx_start(edev, i, &q_params,
+ fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table,
+ page_cnt,
+ &ret_params);
+ if (rc) {
+ DP_ERR(edev, "Start rxq #%d failed %d\n",
+ fp->rxq->queue_id, rc);
+ return rc;
+ }
+
+ /* Use the return parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
+ fp->rxq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+ qede_update_rx_prod(qdev, fp->rxq);
+ }
+
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct ecore_txq_start_ret_params ret_params;
+
+ txq = fp->txqs[tc];
+ txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
+
+ p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+
+ memset(&q_params, 0, sizeof(q_params));
+ memset(&ret_params, 0, sizeof(ret_params));
+ q_params.queue_id = txq->queue_id;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = qdev->ops->q_tx_start(edev, i, &q_params,
+ p_phys_table,
+ page_cnt, /* **pp_doorbell */
+ &ret_params);
+ if (rc) {
+ DP_ERR(edev, "Start txq %u failed %d\n",
+ txq_index, rc);
+ return rc;
+ }
+
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ txq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ /* Update MTU via vport update */
+ vport_update_params.mtu = qdev->mtu;
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 1;
+
+ /* @DPDK */
+ if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
+ /* TBD: Check SRIOV enabled for VF */
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
+ /* TPA */
+ if (qdev->enable_lro) {
+ DP_INFO(edev, "Enabling LRO\n");
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
+ vport_update_params.sge_tpa_params = &tpa_params;
+ }
+
+ rc = qdev->ops->vport_update(edev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static bool qede_tunn_exist(uint16_t flag)
+{
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
+}
+
+/*
+ * qede_check_tunn_csum_l4:
+ * Returns:
+ * 1 : If L4 csum is enabled AND if the validation has failed.
+ * 0 : Otherwise
+ */
+static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
+{
+ if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
+
+ return 0;
+}
+
+static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
+{
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
+ return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
+
+ return 0;
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline uint8_t
+qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
+{
+ struct ipv4_hdr *ip;
+ uint16_t pkt_csum;
+ uint16_t calc_csum;
+ uint16_t val;
+
+ val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
+
+ if (unlikely(val)) {
+ m->packet_type = qede_rx_cqe_to_pkt_type(flag);
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
+ ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ pkt_csum = ip->hdr_checksum;
+ ip->hdr_checksum = 0;
+ calc_csum = rte_ipv4_cksum(ip);
+ ip->hdr_checksum = pkt_csum;
+ return (calc_csum != pkt_csum);
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ ecore_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
+static inline void
+qede_reuse_page(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+{
+ struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
+ uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ struct qede_rx_entry *curr_prod;
+ dma_addr_t new_mapping;
+
+ curr_prod = &rxq->sw_rx_ring[idx];
+ *curr_prod = *curr_cons;
+
+ new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+ curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
+ rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
+
+ rxq->sw_rx_prod++;
+}
+
+static inline void
+qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *qdev, uint8_t count)
+{
+ struct qede_rx_entry *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+ qede_reuse_page(qdev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ uint8_t agg_index, uint16_t len)
+{
+ struct qede_agg_info *tpa_info;
+ struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
+ uint16_t cons_idx;
+
+ /* Under certain conditions it is possible that FW may not consume
+ * additional or new BD. So decision to consume the BD must be made
+ * based on len_list[0].
+ */
+ if (rte_le_to_cpu_16(len)) {
+ tpa_info = &rxq->tpa_info[agg_index];
+ cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ assert(curr_frag);
+ curr_frag->nb_segs = 1;
+ curr_frag->pkt_len = rte_le_to_cpu_16(len);
+ curr_frag->data_len = curr_frag->pkt_len;
+ tpa_info->tpa_tail->next = curr_frag;
+ tpa_info->tpa_tail = curr_frag;
+ qede_rx_bd_ring_consume(rxq);
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ }
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+ cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+ /* only len_list[0] will have value */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+}
+
+static inline void
+qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
+
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+ /* Update total length and frags based on end TPA */
+ rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+ /* TODO: Add Sanity Checks */
+ rx_mb->nb_segs = cqe->num_of_bds;
+ rx_mb->pkt_len = cqe->total_packet_len;
+
+ PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+ " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+ rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+ rx_mb->pkt_len);
+}
+
+static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
+{
+ uint32_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
+ [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ };
+
+ /* Cover bits[4-0] to include tunn_type and next protocol */
+ val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
+ (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
+ return ptype_tunn_lkup_tbl[val];
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline int
+qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ uint8_t num_segs, uint16_t pkt_len)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ register struct rte_mbuf *seg1 = NULL;
+ register struct rte_mbuf *seg2 = NULL;
+ uint16_t sw_rx_index;
+ uint16_t cur_size;
+
+ seg1 = rx_mb;
+ while (num_segs) {
+ cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+ if (unlikely(!cur_size)) {
+ PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
+ " left for mapping jumbo", num_segs);
+ qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
+ return -EINVAL;
+ }
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ qede_rx_bd_ring_consume(rxq);
+ pkt_len -= cur_size;
+ seg2->data_len = cur_size;
+ seg1->next = seg2;
+ seg1 = seg1->next;
+ num_segs--;
+ rxq->rx_segs++;
+ }
+
+ return 0;
+}
+
+uint16_t
+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
+ uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
+ uint16_t rx_pkt = 0;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
+ register struct rte_mbuf *rx_mb = NULL;
+ register struct rte_mbuf *seg1 = NULL;
+ enum eth_rx_cqe_type cqe_type;
+ uint16_t pkt_len = 0; /* Sum of all BD segments */
+ uint16_t len; /* Length of first BD */
+ uint8_t num_segs = 1;
+ uint16_t preload_idx;
+ uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ uint8_t bitfield_val;
+ enum rss_hash_type htype;
+#endif
+ uint8_t tunn_parse_flag;
+ uint8_t j;
+ struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
+ uint64_t ol_flags;
+ uint32_t packet_type;
+ uint16_t vlan_tci;
+ bool tpa_start_flg;
+ uint8_t offset, tpa_agg_idx, flags;
+ struct qede_agg_info *tpa_info = NULL;
+ uint32_t rss_hash;
+
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ rte_rmb();
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ ol_flags = 0;
+ packet_type = RTE_PTYPE_UNKNOWN;
+ vlan_tci = 0;
+ tpa_start_flg = false;
+ rss_hash = 0;
+
+ /* Get the CQE from the completion ring */
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ fp_cqe = &cqe->fast_path_regular;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_START:
+ cqe_start_tpa = &cqe->fast_path_tpa_start;
+ tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
+ tpa_start_flg = true;
+ /* Mark it as LRO packet */
+ ol_flags |= PKT_RX_LRO;
+ /* In split mode, seg_len is same as len_on_first_bd
+ * and ext_bd_len_list will be empty since there are
+ * no additional buffers
+ */
+ PMD_RX_LOG(INFO, rxq,
+ "TPA start[%d] - len_on_first_bd %d header %d"
+ " [bd_list[0] %d], [seg_len %d]\n",
+ cqe_start_tpa->tpa_agg_index,
+ rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+ cqe_start_tpa->header_len,
+ rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
+ rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+
+ break;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_rx_process_tpa_cont_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_rx_process_tpa_end_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_end);
+ tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
+ tpa_info = &rxq->tpa_info[tpa_agg_idx];
+ rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
+ goto tpa_end;
+ case ETH_RX_CQE_TYPE_SLOW_PATH:
+ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
+ qdev->ops->eth_cqe_completion(edev, fp->id,
+ (struct eth_slow_path_rx_cqe *)cqe);
+ /* fall-thru */
+ default:
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ assert(rx_mb != NULL);
+
+ /* Handle regular CQE or TPA start CQE */
+ if (!tpa_start_flg) {
+ parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+ offset = fp_cqe->placement_offset;
+ len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = fp_cqe->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+#endif
+ } else {
+ parse_flag =
+ rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
+ offset = cqe_start_tpa->placement_offset;
+ /* seg_len = len_on_first_bd */
+ len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
+ vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = cqe_start_tpa->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
+#endif
+ rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
+ }
+ if (qede_tunn_exist(parse_flag)) {
+ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+ if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (tpa_start_flg)
+ flags =
+ cqe_start_tpa->tunnel_pars_flags.flags;
+ else
+ flags = fp_cqe->tunnel_pars_flags.flags;
+ tunn_parse_flag = flags;
+ packet_type =
+ qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+ }
+ } else {
+ PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
+ if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ if (unlikely(qede_check_notunn_csum_l3(rx_mb,
+ parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "IP csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ packet_type =
+ qede_rx_cqe_to_pkt_type(parse_flag);
+ }
+ }
+
+ if (CQE_HAS_VLAN(parse_flag)) {
+ ol_flags |= PKT_RX_VLAN_PKT;
+ if (qdev->vlan_strip_flg) {
+ ol_flags |= PKT_RX_VLAN_STRIPPED;
+ rx_mb->vlan_tci = vlan_tci;
+ }
+ }
+ if (CQE_HAS_OUTER_VLAN(parse_flag)) {
+ ol_flags |= PKT_RX_QINQ_PKT;
+ if (qdev->vlan_strip_flg) {
+ rx_mb->vlan_tci = vlan_tci;
+ ol_flags |= PKT_RX_QINQ_STRIPPED;
+ }
+ rx_mb->vlan_tci_outer = 0;
+ }
+ /* RSS Hash */
+ if (qdev->rss_enable) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rx_mb->hash.rss = rss_hash;
+ }
+
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq,
+ "New buffer allocation failed,"
+ "dropping incoming packet\n");
+ qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ qede_rx_bd_ring_consume(rxq);
+
+ if (!tpa_start_flg && fp_cqe->bd_num > 1) {
+ PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
+ " len on first: %04x Total Len: %04x",
+ fp_cqe->bd_num, len, pkt_len);
+ num_segs = fp_cqe->bd_num - 1;
+ seg1 = rx_mb;
+ if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
+ pkt_len - len))
+ goto next_cqe;
+ for (j = 0; j < num_segs; j++) {
+ if (qede_alloc_rx_buffer(rxq)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Buffer allocation failed");
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ rxq->rx_segs++;
+ }
+ }
+ rxq->rx_segs++; /* for the first segment */
+
+ /* Prefetch next mbuf while processing current one. */
+ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+ /* Update rest of the MBUF fields */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+ rx_mb->port = rxq->port_id;
+ rx_mb->ol_flags = ol_flags;
+ rx_mb->data_len = len;
+ rx_mb->packet_type = packet_type;
+ PMD_RX_LOG(INFO, rxq,
+ "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
+ " ol_flags 0x%04lx\n",
+ packet_type, len, htype, rx_mb->hash.rss,
+ (unsigned long)ol_flags);
+ if (!tpa_start_flg) {
+ rx_mb->nb_segs = fp_cqe->bd_num;
+ rx_mb->pkt_len = pkt_len;
+ } else {
+ /* store ref to the updated mbuf */
+ tpa_info->tpa_head = rx_mb;
+ tpa_info->tpa_tail = tpa_info->tpa_head;
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
+tpa_end:
+ if (!tpa_start_flg) {
+ rx_pkts[rx_pkt] = rx_mb;
+ rx_pkt++;
+ }
+next_cqe:
+ ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+ if (rx_pkt == nb_pkts) {
+ PMD_RX_LOG(DEBUG, rxq,
+ "Budget reached nb_pkts=%u received=%u",
+ rx_pkt, nb_pkts);
+ break;
+ }
+ }
+
+ qede_update_rx_prod(qdev, rxq);
+
+ rxq->rcv_pkts += rx_pkt;
+
+ PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
+
+ return rx_pkt;
+}
+
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
+
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ }
+}
+
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
+
+ rte_compiler_barrier();
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
+}
+
+/* Populate scatter gather buffer descriptor fields */
+static inline uint8_t
+qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
+ struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
+{
+ struct qede_tx_queue *txq = p_txq;
+ struct eth_tx_bd *tx_bd = NULL;
+ dma_addr_t mapping;
+ uint8_t nb_segs = 0;
+
+ /* Check for scattered buffers */
+ while (m_seg) {
+ if (nb_segs == 0) {
+ if (!*bd2) {
+ *bd2 = (struct eth_tx_2nd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nb_segs++;
+ }
+ mapping = rte_mbuf_data_dma_addr(m_seg);
+ QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
+ } else if (nb_segs == 1) {
+ if (!*bd3) {
+ *bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nb_segs++;
+ }
+ mapping = rte_mbuf_data_dma_addr(m_seg);
+ QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
+ } else {
+ tx_bd = (struct eth_tx_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(tx_bd, 0, sizeof(*tx_bd));
+ nb_segs++;
+ mapping = rte_mbuf_data_dma_addr(m_seg);
+ QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
+ }
+ m_seg = m_seg->next;
+ }
+
+ /* Return total scattered buffers */
+ return nb_segs;
+}
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+print_tx_bd_info(struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *bd1,
+ struct eth_tx_2nd_bd *bd2,
+ struct eth_tx_3rd_bd *bd3,
+ uint64_t tx_ol_flags)
+{
+ char ol_buf[256] = { 0 }; /* for verbose prints */
+
+ if (bd1)
+ PMD_TX_LOG(INFO, txq,
+ "BD1: nbytes=%u nbds=%u bd_flags=04%x bf=%04x",
+ rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+ bd1->data.bd_flags.bitfields,
+ rte_cpu_to_le_16(bd1->data.bitfields));
+ if (bd2)
+ PMD_TX_LOG(INFO, txq,
+ "BD2: nbytes=%u bf=%04x\n",
+ rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+ if (bd3)
+ PMD_TX_LOG(INFO, txq,
+ "BD3: nbytes=%u bf=%04x mss=%u\n",
+ rte_cpu_to_le_16(bd3->nbytes),
+ rte_cpu_to_le_16(bd3->data.bitfields),
+ rte_cpu_to_le_16(bd3->data.lso_mss));
+
+ rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
+ PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
+}
+#endif
+
+/* TX prepare to check packets meets TX conditions */
+uint16_t
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+#else
+qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+#endif
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ /* TBD: confirm its ~9700B for both ? */
+ if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ } else {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ }
+ if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ break;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+#endif
+ /* TBD: pseudo csum calcuation required iff
+ * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
+ */
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+ }
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ if (unlikely(i != nb_pkts))
+ PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
+ nb_pkts - i);
+#endif
+ return i;
+}
+
+uint16_t
+qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+ struct qede_dev *qdev = txq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ struct rte_mbuf *mbuf;
+ struct rte_mbuf *m_seg = NULL;
+ uint16_t nb_tx_pkts;
+ uint16_t bd_prod;
+ uint16_t idx;
+ uint16_t nb_frags;
+ uint16_t nb_pkt_sent = 0;
+ uint8_t nbds;
+ bool ipv6_ext_flg;
+ bool lso_flg;
+ bool tunn_flg;
+ struct eth_tx_1st_bd *bd1;
+ struct eth_tx_2nd_bd *bd2;
+ struct eth_tx_3rd_bd *bd3;
+ uint64_t tx_ol_flags;
+ uint16_t hdr_size;
+
+ if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+ PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
+ nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+ qede_process_tx_compl(edev, txq);
+ }
+
+ nb_tx_pkts = nb_pkts;
+ bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+ while (nb_tx_pkts--) {
+ /* Init flags/values */
+ ipv6_ext_flg = false;
+ tunn_flg = false;
+ lso_flg = false;
+ nbds = 0;
+ bd1 = NULL;
+ bd2 = NULL;
+ bd3 = NULL;
+ hdr_size = 0;
+
+ mbuf = *tx_pkts++;
+ assert(mbuf);
+
+ /* Check minimum TX BDS availability against available BDs */
+ if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+ break;
+
+ tx_ol_flags = mbuf->ol_flags;
+
+#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
+ if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
+ ipv6_ext_flg = true;
+
+ if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+ tunn_flg = true;
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG)
+ lso_flg = true;
+
+ if (lso_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_LSO_PKT))
+ break;
+ } else {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+ break;
+ }
+
+ if (tunn_flg && ipv6_ext_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
+ }
+ if (ipv6_ext_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
+ break;
+ }
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = TX_PROD(txq);
+ txq->sw_tx_ring[idx].mbuf = mbuf;
+
+ /* BD1 */
+ bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
+ memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+ nbds++;
+
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ /* FW 8.10.x specific change */
+ if (!lso_flg) {
+ bd1->data.bitfields |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+ } else {
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ hdr_size);
+ }
+
+ if (tunn_flg) {
+ /* First indicate its a tunnel pkt */
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ bd1->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
+
+ /* Outer UDP checksum offload */
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ if (lso_flg)
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+
+ /* Offload the IP checksum in the hardware */
+ if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+
+ /* L4 checksum offload (tcp or udp) */
+ if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM)))
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ /* BD2 */
+ if (lso_flg || ipv6_ext_flg) {
+ bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nbds++;
+ QEDE_BD_SET_ADDR_LEN(bd2,
+ (hdr_size +
+ rte_mbuf_data_dma_addr(mbuf)),
+ mbuf->data_len - hdr_size);
+ /* TBD: check pseudo csum iff tx_prepare not called? */
+ if (ipv6_ext_flg) {
+ bd2->data.bitfields1 |=
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ }
+ }
+
+ /* BD3 */
+ if (lso_flg || ipv6_ext_flg) {
+ bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ if (lso_flg) {
+ bd3->data.lso_mss =
+ rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3->data.bitfields |=
+ rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ }
+ }
+
+ /* Handle fragmented MBUF */
+ m_seg = mbuf->next;
+ /* Encode scatter gather buffer descriptors if required */
+ nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+ bd1->data.nbds = nbds + nb_frags;
+ txq->nb_tx_avail -= bd1->data.nbds;
+ txq->sw_tx_prod++;
+ rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
+ bd_prod =
+ rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+ PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
+ lso_flg, tunn_flg, ipv6_ext_flg);
+#endif
+ nb_pkt_sent++;
+ txq->xmit_pkts++;
+ }
+
+ /* Write value of prod idx into bd_prod */
+ txq->tx_db.data.bd_prod = bd_prod;
+ rte_wmb();
+ rte_compiler_barrier();
+ DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
+ rte_wmb();
+
+ /* Check again for Tx completions */
+ qede_process_tx_compl(edev, txq);
+
+ PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+ nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
+
+ return nb_pkt_sent;
+}
+
+static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct qede_fastpath *fp;
+ uint8_t i, txq_index, tc;
+ int rxq = 0, txq = 0;
+
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq = eth_dev->data->rx_queues[i];
+ fp->rxq->queue_id = rxq++;
+ }
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
+ fp->txqs[tc] =
+ eth_dev->data->tx_queues[txq_index];
+ fp->txqs[tc]->queue_id = txq_index;
+ if (qdev->dev_info.is_legacy)
+ fp->txqs[tc]->is_legacy = true;
+ }
+ txq++;
+ }
+ }
+}
+
+int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ int rc;
+
+ DP_INFO(edev, "Device state is %d\n", qdev->state);
+
+ if (qdev->state == QEDE_DEV_START) {
+ DP_INFO(edev, "Port is already started\n");
+ return 0;
+ }
+
+ if (qdev->state == QEDE_DEV_CONFIG)
+ qede_init_fp_queue(eth_dev);
+
+ rc = qede_start_queues(eth_dev, true);
+ if (rc) {
+ DP_ERR(edev, "Failed to start queues\n");
+ /* TBD: free */
+ return rc;
+ }
+
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ return -1;
+
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
+ /* Start/resume traffic */
+ qdev->ops->fastpath_start(edev);
+
+ qdev->state = QEDE_DEV_START;
+
+ DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
+
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ qede_process_tx_compl(edev, txq);
+ if (!cnt) {
+ if (allow_drain) {
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
+ "requesting MCP to drain\n",
+ txq->queue_id);
+ rc = qdev->ops->common->drain(edev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(qdev, txq, false);
+ }
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
+ "PROD=%d, CONS=%d\n",
+ txq->queue_id, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -1;
+ }
+ cnt--;
+ DELAY(1000);
+ rte_compiler_barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ DELAY(2000);
+
+ return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *qdev)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_sge_tpa_params tpa_params;
+ struct qede_fastpath *fp;
+ int rc, tc, i;
+
+ /* Disable the vport */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 0;
+ vport_update_params.update_rss_flg = 0;
+ /* Disable TPA */
+ if (qdev->enable_lro) {
+ DP_INFO(edev, "Disabling LRO\n");
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
+ vport_update_params.sge_tpa_params = &tpa_params;
+ }
+
+ DP_INFO(edev, "Deactivate vport\n");
+ rc = qdev->ops->vport_update(edev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ DP_INFO(edev, "Flushing tx queues\n");
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qede_tx_queue *txq = fp->txqs[tc];
+
+ rc = qede_drain_txq(qdev, txq, true);
+ if (rc)
+ return rc;
+ }
+ }
+ }
+
+ /* Stop all Queues in reverse order */
+ for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
+ fp = &qdev->fp_array[i];
+
+ /* Stop the Tx Queue(s) */
+ if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qede_tx_queue *txq = fp->txqs[tc];
+ DP_INFO(edev, "Stopping tx queues\n");
+ rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ i);
+ return rc;
+ }
+ }
+ }
+
+ /* Stop the Rx Queue */
+ if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ DP_INFO(edev, "Stopping rx queues\n");
+ rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+ }
+ qede_reset_fp_rings(qdev);
+
+ return 0;
+}
+
+int qede_reset_fp_rings(struct qede_dev *qdev)
+{
+ struct qede_fastpath *fp;
+ struct qede_tx_queue *txq;
+ uint8_t tc;
+ uint16_t id, i;
+
+ for_each_queue(id) {
+ fp = &qdev->fp_array[id];
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ DP_INFO(&qdev->edev,
+ "Reset FP chain for RSS %u\n", id);
+ qede_rx_queue_release_mbufs(fp->rxq);
+ ecore_chain_reset(&fp->rxq->rx_bd_ring);
+ ecore_chain_reset(&fp->rxq->rx_comp_ring);
+ fp->rxq->sw_rx_prod = 0;
+ fp->rxq->sw_rx_cons = 0;
+ *fp->rxq->hw_cons_ptr = 0;
+ for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
+ if (qede_alloc_rx_buffer(fp->rxq)) {
+ DP_ERR(&qdev->edev,
+ "RX buffer allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ txq = fp->txqs[tc];
+ qede_tx_queue_release_mbufs(txq);
+ ecore_chain_reset(&txq->tx_pbl);
+ txq->sw_tx_cons = 0;
+ txq->sw_tx_prod = 0;
+ *txq->hw_cons_ptr = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* This function frees all memory of a single fp */
+void qede_free_mem_load(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fastpath *fp;
+ uint16_t txq_idx;
+ uint8_t id;
+ uint8_t tc;
+
+ for_each_queue(id) {
+ fp = &qdev->fp_array[id];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ if (!fp->rxq)
+ continue;
+ qede_rx_queue_release(fp->rxq);
+ eth_dev->data->rx_queues[id] = NULL;
+ } else {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ if (!fp->txqs[tc])
+ continue;
+ txq_idx = fp->txqs[tc]->queue_id;
+ qede_tx_queue_release(fp->txqs[tc]);
+ eth_dev->data->tx_queues[txq_idx] = NULL;
+ }
+ }
+ }
+}
+
+void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
+
+ if (qdev->state != QEDE_DEV_START) {
+ DP_INFO(edev, "Device not yet started\n");
+ return;
+ }
+
+ if (qede_stop_queues(qdev))
+ DP_ERR(edev, "Didn't succeed to close queues\n");
+
+ DP_INFO(edev, "Stopped queues\n");
+
+ qdev->ops->fastpath_stop(edev);
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+
+ qdev->state = QEDE_DEV_STOP;
+
+ DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
+}
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/net/qede/qede_rxtx.h b/src/seastar/dpdk/drivers/net/qede/qede_rxtx.h
new file mode 100644
index 00000000..a1bbd256
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/qede_rxtx.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_RXTX_H_
+#define _QEDE_RXTX_H_
+
+#include "qede_ethdev.h"
+
+/* Ring Descriptors */
+#define RX_RING_SIZE_POW 16 /* 64K */
+#define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS(q) (q->nb_rx_desc - 1)
+
+#define TX_RING_SIZE_POW 16 /* 64K */
+#define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define NUM_TX_BDS(q) (q->nb_tx_desc - 1)
+
+#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
+#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
+
+#define QEDE_DEFAULT_TX_FREE_THRESH 32
+
+#define QEDE_CSUM_ERROR (1 << 0)
+#define QEDE_CSUM_UNNECESSARY (1 << 1)
+#define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2)
+
+#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
+ (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
+ (bd)->nbytes = rte_cpu_to_le_16(len); \
+ } while (0)
+
+#define CQE_HAS_VLAN(flags) \
+ ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
+ << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
+
+#define CQE_HAS_OUTER_VLAN(flags) \
+ ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
+ << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
+/* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_L1_CACHE_SHIFT 6
+#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
+#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
+
+#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
+ ETH_RSS_NONFRAG_IPV4_TCP |\
+ ETH_RSS_NONFRAG_IPV4_UDP |\
+ ETH_RSS_IPV6 |\
+ ETH_RSS_NONFRAG_IPV6_TCP |\
+ ETH_RSS_NONFRAG_IPV6_UDP |\
+ ETH_RSS_VXLAN)
+
+#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
+
+#define MAX_NUM_TC 8
+
+#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
+
+
+/* Macros for non-tunnel packet types lkup table */
+#define QEDE_PKT_TYPE_UNKNOWN 0x0
+#define QEDE_PKT_TYPE_MAX 0xf
+#define QEDE_PKT_TYPE_IPV4 0x1
+#define QEDE_PKT_TYPE_IPV6 0x2
+#define QEDE_PKT_TYPE_IPV4_TCP 0x5
+#define QEDE_PKT_TYPE_IPV6_TCP 0x6
+#define QEDE_PKT_TYPE_IPV4_UDP 0x9
+#define QEDE_PKT_TYPE_IPV6_UDP 0xa
+
+/* Macros for tunneled packets with next protocol lkup table */
+#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
+#define QEDE_PKT_TYPE_TUNN_GRE 0x2
+#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
+
+/* Bit 2 is don't care bit */
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
+
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
+
+#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
+
+#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT)
+
+#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
+
+/*
+ * RX BD descriptor ring
+ */
+struct qede_rx_entry {
+ struct rte_mbuf *mbuf;
+ uint32_t page_offset;
+ /* allows expansion .. */
+};
+
+/* TPA related structures */
+struct qede_agg_info {
+ struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
+ struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct qede_rx_queue {
+ struct rte_mempool *mb_pool;
+ struct ecore_chain rx_bd_ring;
+ struct ecore_chain rx_comp_ring;
+ uint16_t *hw_cons_ptr;
+ void OSAL_IOMEM *hw_rxq_prod_addr;
+ struct qede_rx_entry *sw_rx_ring;
+ uint16_t sw_rx_cons;
+ uint16_t sw_rx_prod;
+ uint16_t nb_rx_desc;
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint16_t rx_buf_size;
+ uint64_t rcv_pkts;
+ uint64_t rx_segs;
+ uint64_t rx_hw_errors;
+ uint64_t rx_alloc_errors;
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ struct qede_dev *qdev;
+ void *handle;
+};
+
+/*
+ * TX BD descriptor ring
+ */
+struct qede_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint8_t flags;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ uint32_t raw;
+};
+
+struct qede_tx_queue {
+ struct ecore_chain tx_pbl;
+ struct qede_tx_entry *sw_tx_ring;
+ uint16_t nb_tx_desc;
+ uint16_t nb_tx_avail;
+ uint16_t tx_free_thresh;
+ uint16_t queue_id;
+ uint16_t *hw_cons_ptr;
+ uint16_t sw_tx_cons;
+ uint16_t sw_tx_prod;
+ void OSAL_IOMEM *doorbell_addr;
+ volatile union db_prod tx_db;
+ uint16_t port_id;
+ uint64_t xmit_pkts;
+ bool is_legacy;
+ struct qede_dev *qdev;
+ void *handle;
+};
+
+struct qede_fastpath {
+ struct qede_dev *qdev;
+ u8 type;
+ uint8_t id;
+ struct ecore_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txqs[MAX_NUM_TC];
+ char name[80];
+};
+
+/*
+ * RX/TX function prototypes
+ */
+int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+
+int qede_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+void qede_rx_queue_release(void *rx_queue);
+
+void qede_tx_queue_release(void *tx_queue);
+
+int qede_dev_start(struct rte_eth_dev *eth_dev);
+
+void qede_dev_stop(struct rte_eth_dev *eth_dev);
+
+int qede_reset_fp_rings(struct qede_dev *qdev);
+
+void qede_free_fp_arrays(struct qede_dev *qdev);
+
+void qede_free_mem_load(struct rte_eth_dev *eth_dev);
+
+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts);
+
+/* Fastpath resource alloc/dealloc helpers */
+int qede_alloc_fp_resc(struct qede_dev *qdev);
+
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
+
+#endif /* _QEDE_RXTX_H_ */
diff --git a/src/seastar/dpdk/drivers/net/qede/rte_pmd_qede_version.map b/src/seastar/dpdk/drivers/net/qede/rte_pmd_qede_version.map
new file mode 100644
index 00000000..349c6e1c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/net/qede/rte_pmd_qede_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+ local: *;
+};