summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/common/iavf
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/common/iavf')
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/Makefile27
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/README20
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c962
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h94
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h652
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h36
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_common.c1025
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_devids.h17
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_impl.c95
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h199
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h94
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_register.h93
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_status.h79
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/iavf_type.h1014
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map12
-rw-r--r--src/spdk/dpdk/drivers/common/iavf/virtchnl.h1311
17 files changed, 5738 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/common/iavf/Makefile b/src/spdk/dpdk/drivers/common/iavf/Makefile
new file mode 100644
index 000000000..f06dafd40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_iavf.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+
+EXPORT_MAP := rte_common_iavf_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += iavf_adminq.c
+SRCS-y += iavf_common.c
+SRCS-y += iavf_impl.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/iavf/README b/src/spdk/dpdk/drivers/common/iavf/README
new file mode 100644
index 000000000..f301ff665
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/README
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Intel Corporation
+ */
+
+IntelĀ® IAVF driver
+=================
+
+This directory contains source code of FreeBSD IAVF driver of version
+cid-avf.2020.04.09.tar.gz released by the team which develops
+basic drivers for any IAVF NIC. The directory of base/ contains the
+original source package.
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ iavf_osdep.h
+ iavf_impl.c
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c
new file mode 100644
index 000000000..8bae51a46
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.c
@@ -0,0 +1,962 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#include "iavf_status.h"
+#include "iavf_type.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+
+/**
+ * iavf_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ hw->aq.asq.tail = IAVF_VF_ATQT1;
+ hw->aq.asq.head = IAVF_VF_ATQH1;
+ hw->aq.asq.len = IAVF_VF_ATQLEN1;
+ hw->aq.asq.bal = IAVF_VF_ATQBAL1;
+ hw->aq.asq.bah = IAVF_VF_ATQBAH1;
+ hw->aq.arq.tail = IAVF_VF_ARQT1;
+ hw->aq.arq.head = IAVF_VF_ARQH1;
+ hw->aq.arq.len = IAVF_VF_ARQLEN1;
+ hw->aq.arq.bal = IAVF_VF_ARQBAL1;
+ hw->aq.arq.bah = IAVF_VF_ARQBAH1;
+}
+
+/**
+ * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ iavf_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_asq_cmd_details)));
+ if (ret_code) {
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ iavf_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct iavf_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * iavf_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void iavf_free_adminq_asq(struct iavf_hw *hw)
+{
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * iavf_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void iavf_free_adminq_arq(struct iavf_hw *hw)
+{
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+ struct iavf_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ iavf_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+ struct iavf_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ iavf_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * iavf_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * iavf_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * iavf_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * iavf_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * iavf_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum iavf_status iavf_init_asq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = IAVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = iavf_alloc_adminq_asq_ring(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = iavf_alloc_asq_bufs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = iavf_config_asq_regs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_config_regs;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ iavf_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ iavf_free_asq_bufs(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum iavf_status iavf_init_arq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = IAVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = iavf_alloc_adminq_arq_ring(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = iavf_alloc_arq_bufs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = iavf_config_arq_regs(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ iavf_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ iavf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = IAVF_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ iavf_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ iavf_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ iavf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = IAVF_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ iavf_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ iavf_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+
+/**
+ * iavf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = IAVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ iavf_init_spinlock(&hw->aq.asq_spinlock);
+ iavf_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ iavf_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = iavf_init_asq(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = iavf_init_arq(hw);
+ if (ret_code != IAVF_SUCCESS)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ iavf_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ iavf_destroy_spinlock(&hw->aq.asq_spinlock);
+ iavf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+
+ if (iavf_check_asq_alive(hw))
+ iavf_aq_queue_shutdown(hw, true);
+
+ iavf_shutdown_asq(hw);
+ iavf_shutdown_arq(hw);
+ iavf_destroy_spinlock(&hw->aq.asq_spinlock);
+ iavf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * iavf_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 iavf_clean_asq(struct iavf_hw *hw)
+{
+ struct iavf_adminq_ring *asq = &(hw->aq.asq);
+ struct iavf_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct iavf_aq_desc desc_cb;
+ struct iavf_aq_desc *desc;
+
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ IAVF_ADMINQ_CALLBACK cb_func =
+ (IAVF_ADMINQ_CALLBACK)details->callback;
+ iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
+ iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return IAVF_DESC_UNUSED(asq);
+}
+
+/**
+ * iavf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool iavf_asq_done(struct iavf_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * iavf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+ struct iavf_dma_mem *dma_buff = NULL;
+ struct iavf_asq_cmd_details *details;
+ struct iavf_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ iavf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = IAVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = IAVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ iavf_memcpy(details,
+ cmd_details,
+ sizeof(struct iavf_asq_cmd_details),
+ IAVF_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
+ }
+ } else {
+ iavf_memset(details, 0,
+ sizeof(struct iavf_asq_cmd_details),
+ IAVF_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = IAVF_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = IAVF_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (iavf_clean_asq(hw) == 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
+ IAVF_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ iavf_memcpy(dma_buff->va, buff, buff_size,
+ IAVF_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (iavf_asq_done(hw))
+ break;
+ iavf_usec_delay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (iavf_asq_done(hw)) {
+ iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_NONDMA);
+ if (buff != NULL)
+ iavf_memcpy(buff, dma_buff->va, buff_size,
+ IAVF_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ status = IAVF_SUCCESS;
+ else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ status = IAVF_ERR_NOT_READY;
+ else
+ status = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ }
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ iavf_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ iavf_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
+ IAVF_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
+}
+
+/**
+ * iavf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *pending)
+{
+ enum iavf_status ret_code = IAVF_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct iavf_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ iavf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = IAVF_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & IAVF_AQ_FLAG_ERR) {
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
+ IAVF_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ iavf_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, IAVF_DMA_TO_NONDMA);
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ iavf_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h
new file mode 100644
index 000000000..93214162e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_ADMINQ_H_
+#define _IAVF_ADMINQ_H_
+
+#include "iavf_osdep.h"
+#include "iavf_status.h"
+#include "iavf_adminq_cmd.h"
+
+#define IAVF_ADMINQ_DESC(R, i) \
+ (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+
+#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
+
+struct iavf_adminq_ring {
+ struct iavf_virt_mem dma_head; /* space for dma structures */
+ struct iavf_dma_mem desc_buf; /* descriptor ring memory */
+ struct iavf_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct iavf_dma_mem *asq_bi;
+ struct iavf_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct iavf_asq_cmd_details {
+ void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct iavf_aq_desc *wb_desc;
+};
+
+#define IAVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct iavf_arq_event_info {
+ struct iavf_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct iavf_adminq_info {
+ struct iavf_adminq_ring arq; /* receive queue */
+ struct iavf_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct iavf_spinlock asq_spinlock; /* Send queue spinlock */
+ struct iavf_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum iavf_admin_queue_err asq_last_status;
+ enum iavf_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define IAVF_AQ_LARGE_BUF 512
+#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _IAVF_ADMINQ_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h
new file mode 100644
index 000000000..5b748426a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_adminq_cmd.h
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_ADMINQ_CMD_H_
+#define _IAVF_ADMINQ_CMD_H_
+
+/* This header file defines the iavf Admin Queue commands and is shared between
+ * iavf Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define IAVF_FW_API_VERSION_MAJOR 0x0001
+#define IAVF_FW_API_VERSION_MINOR_X722 0x0006
+#define IAVF_FW_API_VERSION_MINOR_X710 0x0007
+
+#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \
+ IAVF_FW_API_VERSION_MINOR_X710 : \
+ IAVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define IAVF_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+
+struct iavf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define IAVF_AQ_FLAG_DD_SHIFT 0
+#define IAVF_AQ_FLAG_CMP_SHIFT 1
+#define IAVF_AQ_FLAG_ERR_SHIFT 2
+#define IAVF_AQ_FLAG_VFE_SHIFT 3
+#define IAVF_AQ_FLAG_LB_SHIFT 9
+#define IAVF_AQ_FLAG_RD_SHIFT 10
+#define IAVF_AQ_FLAG_VFC_SHIFT 11
+#define IAVF_AQ_FLAG_BUF_SHIFT 12
+#define IAVF_AQ_FLAG_SI_SHIFT 13
+#define IAVF_AQ_FLAG_EI_SHIFT 14
+#define IAVF_AQ_FLAG_FE_SHIFT 15
+
+#define IAVF_AQ_FLAG_DD (1 << IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define IAVF_AQ_FLAG_CMP (1 << IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define IAVF_AQ_FLAG_ERR (1 << IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define IAVF_AQ_FLAG_VFE (1 << IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define IAVF_AQ_FLAG_LB (1 << IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define IAVF_AQ_FLAG_RD (1 << IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define IAVF_AQ_FLAG_VFC (1 << IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define IAVF_AQ_FLAG_BUF (1 << IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define IAVF_AQ_FLAG_SI (1 << IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define IAVF_AQ_FLAG_EI (1 << IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define IAVF_AQ_FLAG_FE (1 << IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum iavf_admin_queue_err {
+ IAVF_AQ_RC_OK = 0, /* success */
+ IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ IAVF_AQ_RC_ENOENT = 2, /* No such element */
+ IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ IAVF_AQ_RC_EIO = 5, /* I/O error */
+ IAVF_AQ_RC_ENXIO = 6, /* No such resource */
+ IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ IAVF_AQ_RC_EAGAIN = 8, /* Try again */
+ IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ IAVF_AQ_RC_EACCES = 10, /* Permission denied */
+ IAVF_AQ_RC_EFAULT = 11, /* Bad address */
+ IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ IAVF_AQ_RC_EEXIST = 13, /* object already exists */
+ IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IAVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum iavf_admin_queue_opc {
+ /* aq commands */
+ iavf_aqc_opc_get_version = 0x0001,
+ iavf_aqc_opc_driver_version = 0x0002,
+ iavf_aqc_opc_queue_shutdown = 0x0003,
+ iavf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ iavf_aqc_opc_request_resource = 0x0008,
+ iavf_aqc_opc_release_resource = 0x0009,
+
+ iavf_aqc_opc_list_func_capabilities = 0x000A,
+ iavf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ iavf_aqc_opc_set_proxy_config = 0x0104,
+ iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ iavf_aqc_opc_mac_address_read = 0x0107,
+ iavf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ iavf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ iavf_aqc_opc_set_wol_filter = 0x0120,
+ iavf_aqc_opc_get_wake_reason = 0x0121,
+ iavf_aqc_opc_clear_all_wol_filters = 0x025E,
+
+ /* internal switch commands */
+ iavf_aqc_opc_get_switch_config = 0x0200,
+ iavf_aqc_opc_add_statistics = 0x0201,
+ iavf_aqc_opc_remove_statistics = 0x0202,
+ iavf_aqc_opc_set_port_parameters = 0x0203,
+ iavf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ iavf_aqc_opc_set_switch_config = 0x0205,
+ iavf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ iavf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ iavf_aqc_opc_add_vsi = 0x0210,
+ iavf_aqc_opc_update_vsi_parameters = 0x0211,
+ iavf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ iavf_aqc_opc_add_pv = 0x0220,
+ iavf_aqc_opc_update_pv_parameters = 0x0221,
+ iavf_aqc_opc_get_pv_parameters = 0x0222,
+
+ iavf_aqc_opc_add_veb = 0x0230,
+ iavf_aqc_opc_update_veb_parameters = 0x0231,
+ iavf_aqc_opc_get_veb_parameters = 0x0232,
+
+ iavf_aqc_opc_delete_element = 0x0243,
+
+ iavf_aqc_opc_add_macvlan = 0x0250,
+ iavf_aqc_opc_remove_macvlan = 0x0251,
+ iavf_aqc_opc_add_vlan = 0x0252,
+ iavf_aqc_opc_remove_vlan = 0x0253,
+ iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ iavf_aqc_opc_add_tag = 0x0255,
+ iavf_aqc_opc_remove_tag = 0x0256,
+ iavf_aqc_opc_add_multicast_etag = 0x0257,
+ iavf_aqc_opc_remove_multicast_etag = 0x0258,
+ iavf_aqc_opc_update_tag = 0x0259,
+ iavf_aqc_opc_add_control_packet_filter = 0x025A,
+ iavf_aqc_opc_remove_control_packet_filter = 0x025B,
+ iavf_aqc_opc_add_cloud_filters = 0x025C,
+ iavf_aqc_opc_remove_cloud_filters = 0x025D,
+ iavf_aqc_opc_clear_wol_switch_filters = 0x025E,
+ iavf_aqc_opc_replace_cloud_filters = 0x025F,
+
+ iavf_aqc_opc_add_mirror_rule = 0x0260,
+ iavf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ iavf_aqc_opc_write_personalization_profile = 0x0270,
+ iavf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ iavf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ iavf_aqc_opc_dcb_updated = 0x0302,
+ iavf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ iavf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ iavf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ iavf_aqc_opc_query_vsi_bw_config = 0x0408,
+ iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ iavf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ iavf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ iavf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ iavf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ iavf_aqc_opc_query_port_ets_config = 0x0419,
+ iavf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ iavf_aqc_opc_suspend_port_tx = 0x041B,
+ iavf_aqc_opc_resume_port_tx = 0x041C,
+ iavf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ iavf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ iavf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+
+ /* phy commands*/
+ iavf_aqc_opc_get_phy_abilities = 0x0600,
+ iavf_aqc_opc_set_phy_config = 0x0601,
+ iavf_aqc_opc_set_mac_config = 0x0603,
+ iavf_aqc_opc_set_link_restart_an = 0x0605,
+ iavf_aqc_opc_get_link_status = 0x0607,
+ iavf_aqc_opc_set_phy_int_mask = 0x0613,
+ iavf_aqc_opc_get_local_advt_reg = 0x0614,
+ iavf_aqc_opc_set_local_advt_reg = 0x0615,
+ iavf_aqc_opc_get_partner_advt = 0x0616,
+ iavf_aqc_opc_set_lb_modes = 0x0618,
+ iavf_aqc_opc_get_phy_wol_caps = 0x0621,
+ iavf_aqc_opc_set_phy_debug = 0x0622,
+ iavf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ iavf_aqc_opc_run_phy_activity = 0x0626,
+ iavf_aqc_opc_set_phy_register = 0x0628,
+ iavf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ iavf_aqc_opc_nvm_read = 0x0701,
+ iavf_aqc_opc_nvm_erase = 0x0702,
+ iavf_aqc_opc_nvm_update = 0x0703,
+ iavf_aqc_opc_nvm_config_read = 0x0704,
+ iavf_aqc_opc_nvm_config_write = 0x0705,
+ iavf_aqc_opc_nvm_progress = 0x0706,
+ iavf_aqc_opc_oem_post_update = 0x0720,
+ iavf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ iavf_aqc_opc_send_msg_to_pf = 0x0801,
+ iavf_aqc_opc_send_msg_to_vf = 0x0802,
+ iavf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ iavf_aqc_opc_alternate_write = 0x0900,
+ iavf_aqc_opc_alternate_write_indirect = 0x0901,
+ iavf_aqc_opc_alternate_read = 0x0902,
+ iavf_aqc_opc_alternate_read_indirect = 0x0903,
+ iavf_aqc_opc_alternate_write_done = 0x0904,
+ iavf_aqc_opc_alternate_set_mode = 0x0905,
+ iavf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ iavf_aqc_opc_lldp_get_mib = 0x0A00,
+ iavf_aqc_opc_lldp_update_mib = 0x0A01,
+ iavf_aqc_opc_lldp_add_tlv = 0x0A02,
+ iavf_aqc_opc_lldp_update_tlv = 0x0A03,
+ iavf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ iavf_aqc_opc_lldp_stop = 0x0A05,
+ iavf_aqc_opc_lldp_start = 0x0A06,
+ iavf_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ iavf_aqc_opc_lldp_set_local_mib = 0x0A08,
+ iavf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ iavf_aqc_opc_add_udp_tunnel = 0x0B00,
+ iavf_aqc_opc_del_udp_tunnel = 0x0B01,
+ iavf_aqc_opc_set_rss_key = 0x0B02,
+ iavf_aqc_opc_set_rss_lut = 0x0B03,
+ iavf_aqc_opc_get_rss_key = 0x0B04,
+ iavf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ iavf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ iavf_aqc_opc_oem_parameter_change = 0xFE00,
+ iavf_aqc_opc_oem_device_status_change = 0xFE01,
+ iavf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ iavf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ iavf_aqc_opc_debug_read_reg = 0xFF03,
+ iavf_aqc_opc_debug_write_reg = 0xFF04,
+ iavf_aqc_opc_debug_modify_reg = 0xFF07,
+ iavf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \
+ { iavf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct iavf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define IAVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown);
+
+#define IAVF_AQC_WOL_PRESERVE_STATUS 0x200
+#define IAVF_AQC_MC_MAG_EN 0x0100
+#define IAVF_AQC_WOL_PRESERVE_ON_PFR 0x0200
+
+struct iavf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT)
+#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ IAVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ IAVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses iavf_aqc_switch_seid for the descriptor
+ */
+struct iavf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion);
+
+#define IAVF_LINK_SPEED_2_5GB_SHIFT 0x0
+#define IAVF_LINK_SPEED_100MB_SHIFT 0x1
+#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define IAVF_LINK_SPEED_10GB_SHIFT 0x3
+#define IAVF_LINK_SPEED_40GB_SHIFT 0x4
+#define IAVF_LINK_SPEED_20GB_SHIFT 0x5
+#define IAVF_LINK_SPEED_25GB_SHIFT 0x6
+#define IAVF_LINK_SPEED_5GB_SHIFT 0x7
+
+enum iavf_aq_link_speed {
+ IAVF_LINK_SPEED_UNKNOWN = 0,
+ IAVF_LINK_SPEED_100MB = (1 << IAVF_LINK_SPEED_100MB_SHIFT),
+ IAVF_LINK_SPEED_1GB = (1 << IAVF_LINK_SPEED_1000MB_SHIFT),
+ IAVF_LINK_SPEED_2_5GB = (1 << IAVF_LINK_SPEED_2_5GB_SHIFT),
+ IAVF_LINK_SPEED_5GB = (1 << IAVF_LINK_SPEED_5GB_SHIFT),
+ IAVF_LINK_SPEED_10GB = (1 << IAVF_LINK_SPEED_10GB_SHIFT),
+ IAVF_LINK_SPEED_40GB = (1 << IAVF_LINK_SPEED_40GB_SHIFT),
+ IAVF_LINK_SPEED_20GB = (1 << IAVF_LINK_SPEED_20GB_SHIFT),
+ IAVF_LINK_SPEED_25GB = (1 << IAVF_LINK_SPEED_25GB_SHIFT),
+};
+
+#define IAVF_AQ_LINK_UP_FUNCTION 0x01
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct iavf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define IAVF_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define IAVF_AQC_CEE_APP_FCOE_MASK (0x7 << IAVF_AQC_CEE_APP_FCOE_SHIFT)
+#define IAVF_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define IAVF_AQC_CEE_APP_ISCSI_MASK (0x7 << IAVF_AQC_CEE_APP_ISCSI_SHIFT)
+#define IAVF_AQC_CEE_APP_FIP_SHIFT 0x8
+#define IAVF_AQC_CEE_APP_FIP_MASK (0x7 << IAVF_AQC_CEE_APP_FIP_SHIFT)
+
+#define IAVF_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define IAVF_AQC_CEE_PG_STATUS_MASK (0x7 << IAVF_AQC_CEE_PG_STATUS_SHIFT)
+#define IAVF_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define IAVF_AQC_CEE_PFC_STATUS_MASK (0x7 << IAVF_AQC_CEE_PFC_STATUS_SHIFT)
+#define IAVF_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define IAVF_AQC_CEE_APP_STATUS_MASK (0x7 << IAVF_AQC_CEE_APP_STATUS_SHIFT)
+#define IAVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define IAVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << IAVF_AQC_CEE_FCOE_STATUS_SHIFT)
+#define IAVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define IAVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << IAVF_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define IAVF_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define IAVF_AQC_CEE_FIP_STATUS_MASK (0x7 << IAVF_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct iavf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct iavf_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+IAVF_CHECK_STRUCT_LEN(0x18, iavf_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct iavf_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x20, iavf_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct iavf_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_set_local_mib);
+
+struct iavf_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x10, iavf_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct iavf_aqc_lldp_stop_start_specific_agent {
+#define IAVF_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define IAVF_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << IAVF_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_lldp_stop_start_specific_agent);
+
+struct iavf_aqc_get_set_rss_key {
+#define IAVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key);
+
+struct iavf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data);
+
+struct iavf_aqc_get_set_rss_lut {
+#define IAVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut);
+#endif /* _IAVF_ADMINQ_CMD_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h b/src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h
new file mode 100644
index 000000000..7b7a205cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_ALLOC_H_
+#define _IAVF_ALLOC_H_
+
+struct iavf_hw;
+
+/* Memory allocation types */
+enum iavf_memory_type {
+ iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ iavf_mem_asq_buf = 1,
+ iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ iavf_mem_arq_ring = 3, /* ARQ descriptor ring */
+ iavf_mem_atq_ring = 4, /* ATQ descriptor ring */
+ iavf_mem_pd = 5, /* Page Descriptor */
+ iavf_mem_bp = 6, /* Backing Page - 4KB */
+ iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ iavf_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ enum iavf_memory_type type,
+ u64 size, u32 alignment);
+enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem);
+enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem,
+ u32 size);
+enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem);
+
+#endif /* _IAVF_ALLOC_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_common.c b/src/spdk/dpdk/drivers/common/iavf/iavf_common.c
new file mode 100644
index 000000000..57ec1ff39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_common.c
@@ -0,0 +1,1025 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#include "iavf_type.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+#include "virtchnl.h"
+
+/**
+ * iavf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+
+ DEBUGFUNC("iavf_set_mac_type\n");
+
+ if (hw->vendor_id == IAVF_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case IAVF_DEV_ID_X722_VF:
+ hw->mac.type = IAVF_MAC_X722_VF;
+ break;
+ case IAVF_DEV_ID_VF:
+ case IAVF_DEV_ID_VF_HV:
+ case IAVF_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = IAVF_MAC_VF;
+ break;
+ default:
+ hw->mac.type = IAVF_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("iavf_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * iavf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case IAVF_AQ_RC_OK:
+ return "OK";
+ case IAVF_AQ_RC_EPERM:
+ return "IAVF_AQ_RC_EPERM";
+ case IAVF_AQ_RC_ENOENT:
+ return "IAVF_AQ_RC_ENOENT";
+ case IAVF_AQ_RC_ESRCH:
+ return "IAVF_AQ_RC_ESRCH";
+ case IAVF_AQ_RC_EINTR:
+ return "IAVF_AQ_RC_EINTR";
+ case IAVF_AQ_RC_EIO:
+ return "IAVF_AQ_RC_EIO";
+ case IAVF_AQ_RC_ENXIO:
+ return "IAVF_AQ_RC_ENXIO";
+ case IAVF_AQ_RC_E2BIG:
+ return "IAVF_AQ_RC_E2BIG";
+ case IAVF_AQ_RC_EAGAIN:
+ return "IAVF_AQ_RC_EAGAIN";
+ case IAVF_AQ_RC_ENOMEM:
+ return "IAVF_AQ_RC_ENOMEM";
+ case IAVF_AQ_RC_EACCES:
+ return "IAVF_AQ_RC_EACCES";
+ case IAVF_AQ_RC_EFAULT:
+ return "IAVF_AQ_RC_EFAULT";
+ case IAVF_AQ_RC_EBUSY:
+ return "IAVF_AQ_RC_EBUSY";
+ case IAVF_AQ_RC_EEXIST:
+ return "IAVF_AQ_RC_EEXIST";
+ case IAVF_AQ_RC_EINVAL:
+ return "IAVF_AQ_RC_EINVAL";
+ case IAVF_AQ_RC_ENOTTY:
+ return "IAVF_AQ_RC_ENOTTY";
+ case IAVF_AQ_RC_ENOSPC:
+ return "IAVF_AQ_RC_ENOSPC";
+ case IAVF_AQ_RC_ENOSYS:
+ return "IAVF_AQ_RC_ENOSYS";
+ case IAVF_AQ_RC_ERANGE:
+ return "IAVF_AQ_RC_ERANGE";
+ case IAVF_AQ_RC_EFLUSHED:
+ return "IAVF_AQ_RC_EFLUSHED";
+ case IAVF_AQ_RC_BAD_ADDR:
+ return "IAVF_AQ_RC_BAD_ADDR";
+ case IAVF_AQ_RC_EMODE:
+ return "IAVF_AQ_RC_EMODE";
+ case IAVF_AQ_RC_EFBIG:
+ return "IAVF_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
+{
+ switch (stat_err) {
+ case IAVF_SUCCESS:
+ return "OK";
+ case IAVF_ERR_NVM:
+ return "IAVF_ERR_NVM";
+ case IAVF_ERR_NVM_CHECKSUM:
+ return "IAVF_ERR_NVM_CHECKSUM";
+ case IAVF_ERR_PHY:
+ return "IAVF_ERR_PHY";
+ case IAVF_ERR_CONFIG:
+ return "IAVF_ERR_CONFIG";
+ case IAVF_ERR_PARAM:
+ return "IAVF_ERR_PARAM";
+ case IAVF_ERR_MAC_TYPE:
+ return "IAVF_ERR_MAC_TYPE";
+ case IAVF_ERR_UNKNOWN_PHY:
+ return "IAVF_ERR_UNKNOWN_PHY";
+ case IAVF_ERR_LINK_SETUP:
+ return "IAVF_ERR_LINK_SETUP";
+ case IAVF_ERR_ADAPTER_STOPPED:
+ return "IAVF_ERR_ADAPTER_STOPPED";
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ return "IAVF_ERR_INVALID_MAC_ADDR";
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
+ case IAVF_ERR_MASTER_REQUESTS_PENDING:
+ return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ return "IAVF_ERR_INVALID_LINK_SETTINGS";
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
+ case IAVF_ERR_RESET_FAILED:
+ return "IAVF_ERR_RESET_FAILED";
+ case IAVF_ERR_SWFW_SYNC:
+ return "IAVF_ERR_SWFW_SYNC";
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ return "IAVF_ERR_NO_AVAILABLE_VSI";
+ case IAVF_ERR_NO_MEMORY:
+ return "IAVF_ERR_NO_MEMORY";
+ case IAVF_ERR_BAD_PTR:
+ return "IAVF_ERR_BAD_PTR";
+ case IAVF_ERR_RING_FULL:
+ return "IAVF_ERR_RING_FULL";
+ case IAVF_ERR_INVALID_PD_ID:
+ return "IAVF_ERR_INVALID_PD_ID";
+ case IAVF_ERR_INVALID_QP_ID:
+ return "IAVF_ERR_INVALID_QP_ID";
+ case IAVF_ERR_INVALID_CQ_ID:
+ return "IAVF_ERR_INVALID_CQ_ID";
+ case IAVF_ERR_INVALID_CEQ_ID:
+ return "IAVF_ERR_INVALID_CEQ_ID";
+ case IAVF_ERR_INVALID_AEQ_ID:
+ return "IAVF_ERR_INVALID_AEQ_ID";
+ case IAVF_ERR_INVALID_SIZE:
+ return "IAVF_ERR_INVALID_SIZE";
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ return "IAVF_ERR_INVALID_ARP_INDEX";
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ return "IAVF_ERR_INVALID_FPM_FUNC_ID";
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ return "IAVF_ERR_QP_INVALID_MSG_SIZE";
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ return "IAVF_ERR_INVALID_FRAG_COUNT";
+ case IAVF_ERR_QUEUE_EMPTY:
+ return "IAVF_ERR_QUEUE_EMPTY";
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ return "IAVF_ERR_INVALID_ALIGNMENT";
+ case IAVF_ERR_FLUSHED_QUEUE:
+ return "IAVF_ERR_FLUSHED_QUEUE";
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
+ case IAVF_ERR_TIMEOUT:
+ return "IAVF_ERR_TIMEOUT";
+ case IAVF_ERR_OPCODE_MISMATCH:
+ return "IAVF_ERR_OPCODE_MISMATCH";
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ return "IAVF_ERR_CQP_COMPL_ERROR";
+ case IAVF_ERR_INVALID_VF_ID:
+ return "IAVF_ERR_INVALID_VF_ID";
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ return "IAVF_ERR_INVALID_HMCFN_ID";
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ return "IAVF_ERR_BACKING_PAGE_ERROR";
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ return "IAVF_ERR_INVALID_PBLE_INDEX";
+ case IAVF_ERR_INVALID_SD_INDEX:
+ return "IAVF_ERR_INVALID_SD_INDEX";
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case IAVF_ERR_INVALID_SD_TYPE:
+ return "IAVF_ERR_INVALID_SD_TYPE";
+ case IAVF_ERR_MEMCPY_FAILED:
+ return "IAVF_ERR_MEMCPY_FAILED";
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case IAVF_ERR_SRQ_ENABLED:
+ return "IAVF_ERR_SRQ_ENABLED";
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_ERROR";
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return "IAVF_ERR_BUF_TOO_SHORT";
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ return "IAVF_ERR_ADMIN_QUEUE_FULL";
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case IAVF_ERR_BAD_IWARP_CQE:
+ return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_NVM_BLANK_MODE:
+ return "IAVF_ERR_NVM_BLANK_MODE";
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ return "IAVF_ERR_NOT_IMPLEMENTED";
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ return "IAVF_ERR_DIAG_TEST_FAILED";
+ case IAVF_ERR_NOT_READY:
+ return "IAVF_ERR_NOT_READY";
+ case IAVF_NOT_SUPPORTED:
+ return "IAVF_NOT_SUPPORTED";
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ return "IAVF_ERR_FIRMWARE_API_VERSION";
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+ u16 len;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ len = LE16_TO_CPU(aq_desc->datalen);
+
+ iavf_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ iavf_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ iavf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ iavf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * iavf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool iavf_check_asq_alive(struct iavf_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * iavf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw,
+ bool unloading)
+{
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_queue_shutdown *cmd =
+ (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(IAVF_AQ_DRIVER_UNLOADING);
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_lut *cmd_resp =
+ (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_set_rss_lut);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * iavf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * iavf_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_key *cmd_resp =
+ (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_set_rss_key);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * iavf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
+ u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT iavf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum iavf_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ IAVF_RX_PTYPE_##OUTER_FRAG, \
+ IAVF_RX_PTYPE_TUNNEL_##T, \
+ IAVF_RX_PTYPE_TUNNEL_END_##TE, \
+ IAVF_RX_PTYPE_##TEF, \
+ IAVF_RX_PTYPE_INNER_PROT_##I, \
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
+#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
+#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
+ /* L2 Packet types */
+ IAVF_PTT_UNUSED_ENTRY(0),
+ IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(4),
+ IAVF_PTT_UNUSED_ENTRY(5),
+ IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(8),
+ IAVF_PTT_UNUSED_ENTRY(9),
+ IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(25),
+ IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(32),
+ IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(39),
+ IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(47),
+ IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(54),
+ IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(62),
+ IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(69),
+ IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(77),
+ IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(84),
+ IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(91),
+ IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(98),
+ IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(105),
+ IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(113),
+ IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(120),
+ IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(128),
+ IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(135),
+ IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(143),
+ IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(150),
+ IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ IAVF_PTT_UNUSED_ENTRY(154),
+ IAVF_PTT_UNUSED_ENTRY(155),
+ IAVF_PTT_UNUSED_ENTRY(156),
+ IAVF_PTT_UNUSED_ENTRY(157),
+ IAVF_PTT_UNUSED_ENTRY(158),
+ IAVF_PTT_UNUSED_ENTRY(159),
+
+ IAVF_PTT_UNUSED_ENTRY(160),
+ IAVF_PTT_UNUSED_ENTRY(161),
+ IAVF_PTT_UNUSED_ENTRY(162),
+ IAVF_PTT_UNUSED_ENTRY(163),
+ IAVF_PTT_UNUSED_ENTRY(164),
+ IAVF_PTT_UNUSED_ENTRY(165),
+ IAVF_PTT_UNUSED_ENTRY(166),
+ IAVF_PTT_UNUSED_ENTRY(167),
+ IAVF_PTT_UNUSED_ENTRY(168),
+ IAVF_PTT_UNUSED_ENTRY(169),
+
+ IAVF_PTT_UNUSED_ENTRY(170),
+ IAVF_PTT_UNUSED_ENTRY(171),
+ IAVF_PTT_UNUSED_ENTRY(172),
+ IAVF_PTT_UNUSED_ENTRY(173),
+ IAVF_PTT_UNUSED_ENTRY(174),
+ IAVF_PTT_UNUSED_ENTRY(175),
+ IAVF_PTT_UNUSED_ENTRY(176),
+ IAVF_PTT_UNUSED_ENTRY(177),
+ IAVF_PTT_UNUSED_ENTRY(178),
+ IAVF_PTT_UNUSED_ENTRY(179),
+
+ IAVF_PTT_UNUSED_ENTRY(180),
+ IAVF_PTT_UNUSED_ENTRY(181),
+ IAVF_PTT_UNUSED_ENTRY(182),
+ IAVF_PTT_UNUSED_ENTRY(183),
+ IAVF_PTT_UNUSED_ENTRY(184),
+ IAVF_PTT_UNUSED_ENTRY(185),
+ IAVF_PTT_UNUSED_ENTRY(186),
+ IAVF_PTT_UNUSED_ENTRY(187),
+ IAVF_PTT_UNUSED_ENTRY(188),
+ IAVF_PTT_UNUSED_ENTRY(189),
+
+ IAVF_PTT_UNUSED_ENTRY(190),
+ IAVF_PTT_UNUSED_ENTRY(191),
+ IAVF_PTT_UNUSED_ENTRY(192),
+ IAVF_PTT_UNUSED_ENTRY(193),
+ IAVF_PTT_UNUSED_ENTRY(194),
+ IAVF_PTT_UNUSED_ENTRY(195),
+ IAVF_PTT_UNUSED_ENTRY(196),
+ IAVF_PTT_UNUSED_ENTRY(197),
+ IAVF_PTT_UNUSED_ENTRY(198),
+ IAVF_PTT_UNUSED_ENTRY(199),
+
+ IAVF_PTT_UNUSED_ENTRY(200),
+ IAVF_PTT_UNUSED_ENTRY(201),
+ IAVF_PTT_UNUSED_ENTRY(202),
+ IAVF_PTT_UNUSED_ENTRY(203),
+ IAVF_PTT_UNUSED_ENTRY(204),
+ IAVF_PTT_UNUSED_ENTRY(205),
+ IAVF_PTT_UNUSED_ENTRY(206),
+ IAVF_PTT_UNUSED_ENTRY(207),
+ IAVF_PTT_UNUSED_ENTRY(208),
+ IAVF_PTT_UNUSED_ENTRY(209),
+
+ IAVF_PTT_UNUSED_ENTRY(210),
+ IAVF_PTT_UNUSED_ENTRY(211),
+ IAVF_PTT_UNUSED_ENTRY(212),
+ IAVF_PTT_UNUSED_ENTRY(213),
+ IAVF_PTT_UNUSED_ENTRY(214),
+ IAVF_PTT_UNUSED_ENTRY(215),
+ IAVF_PTT_UNUSED_ENTRY(216),
+ IAVF_PTT_UNUSED_ENTRY(217),
+ IAVF_PTT_UNUSED_ENTRY(218),
+ IAVF_PTT_UNUSED_ENTRY(219),
+
+ IAVF_PTT_UNUSED_ENTRY(220),
+ IAVF_PTT_UNUSED_ENTRY(221),
+ IAVF_PTT_UNUSED_ENTRY(222),
+ IAVF_PTT_UNUSED_ENTRY(223),
+ IAVF_PTT_UNUSED_ENTRY(224),
+ IAVF_PTT_UNUSED_ENTRY(225),
+ IAVF_PTT_UNUSED_ENTRY(226),
+ IAVF_PTT_UNUSED_ENTRY(227),
+ IAVF_PTT_UNUSED_ENTRY(228),
+ IAVF_PTT_UNUSED_ENTRY(229),
+
+ IAVF_PTT_UNUSED_ENTRY(230),
+ IAVF_PTT_UNUSED_ENTRY(231),
+ IAVF_PTT_UNUSED_ENTRY(232),
+ IAVF_PTT_UNUSED_ENTRY(233),
+ IAVF_PTT_UNUSED_ENTRY(234),
+ IAVF_PTT_UNUSED_ENTRY(235),
+ IAVF_PTT_UNUSED_ENTRY(236),
+ IAVF_PTT_UNUSED_ENTRY(237),
+ IAVF_PTT_UNUSED_ENTRY(238),
+ IAVF_PTT_UNUSED_ENTRY(239),
+
+ IAVF_PTT_UNUSED_ENTRY(240),
+ IAVF_PTT_UNUSED_ENTRY(241),
+ IAVF_PTT_UNUSED_ENTRY(242),
+ IAVF_PTT_UNUSED_ENTRY(243),
+ IAVF_PTT_UNUSED_ENTRY(244),
+ IAVF_PTT_UNUSED_ENTRY(245),
+ IAVF_PTT_UNUSED_ENTRY(246),
+ IAVF_PTT_UNUSED_ENTRY(247),
+ IAVF_PTT_UNUSED_ENTRY(248),
+ IAVF_PTT_UNUSED_ENTRY(249),
+
+ IAVF_PTT_UNUSED_ENTRY(250),
+ IAVF_PTT_UNUSED_ENTRY(251),
+ IAVF_PTT_UNUSED_ENTRY(252),
+ IAVF_PTT_UNUSED_ENTRY(253),
+ IAVF_PTT_UNUSED_ENTRY(254),
+ IAVF_PTT_UNUSED_ENTRY(255)
+};
+
+/**
+ * iavf_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
+{
+ enum iavf_status status = IAVF_SUCCESS;
+
+ DEBUGFUNC("iavf_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (IAVF_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = IAVF_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * iavf_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ struct iavf_aq_desc desc;
+ struct iavf_asq_cmd_details details;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(IAVF_AQ_FLAG_BUF
+ | IAVF_AQ_FLAG_RD));
+ if (msglen > IAVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ iavf_memset(&details, 0, sizeof(details), IAVF_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = iavf_asq_send_command(hw, (struct iavf_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * iavf_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ iavf_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ ETH_ALEN,
+ IAVF_NONDMA_TO_NONDMA);
+ iavf_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN,
+ IAVF_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * iavf_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
+{
+ return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ IAVF_SUCCESS, NULL, 0, NULL);
+}
+
+/**
+* iavf_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
+ struct iavf_asq_cmd_details *cmd_details)
+{
+ struct iavf_aq_desc desc;
+ enum iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc,
+ iavf_aqc_opc_clear_all_wol_filters);
+
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_devids.h b/src/spdk/dpdk/drivers/common/iavf/iavf_devids.h
new file mode 100644
index 000000000..2e63aac28
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_devids.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_DEVIDS_H_
+#define _IAVF_DEVIDS_H_
+
+/* Vendor ID */
+#define IAVF_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs for the VF driver */
+#define IAVF_DEV_ID_VF 0x154C
+#define IAVF_DEV_ID_VF_HV 0x1571
+#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889
+#define IAVF_DEV_ID_X722_VF 0x37CD
+
+#endif /* _IAVF_DEVIDS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_impl.c b/src/spdk/dpdk/drivers/common/iavf/iavf_impl.c
new file mode 100644
index 000000000..6174a9144
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_impl.c
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_random.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+
+#include "iavf_type.h"
+#include "iavf_prototype.h"
+
+int iavf_common_logger;
+
+enum iavf_status
+iavf_allocate_dma_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "iavf_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment,
+ RTE_PGSIZE_2M);
+ if (!mz)
+ return IAVF_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = mz->iova;
+ mem->zone = (const void *)mz;
+
+ return IAVF_SUCCESS;
+}
+
+enum iavf_status
+iavf_free_dma_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_dma_mem *mem)
+{
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return IAVF_SUCCESS;
+}
+
+enum iavf_status
+iavf_allocate_virt_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("iavf", size, 0);
+
+ if (mem->va)
+ return IAVF_SUCCESS;
+ else
+ return IAVF_ERR_NO_MEMORY;
+}
+
+enum iavf_status
+iavf_free_virt_mem_d(__rte_unused struct iavf_hw *hw,
+ struct iavf_virt_mem *mem)
+{
+ if (!mem)
+ return IAVF_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return IAVF_SUCCESS;
+}
+
+RTE_INIT(iavf_common_init_log)
+{
+ iavf_common_logger = rte_log_register("pmd.common.iavf");
+ if (iavf_common_logger >= 0)
+ rte_log_set_level(iavf_common_logger, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h b/src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h
new file mode 100644
index 000000000..7cba13ff7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_osdep.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_OSDEP_H_
+#define _IAVF_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#ifndef __INTEL_NET_BASE_OSDEP__
+#define __INTEL_NET_BASE_OSDEP__
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t s64;
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f)
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
+#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
+#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
+
+#define NTOHS(a) rte_be_to_cpu_16(a)
+#define NTOHL(a) rte_be_to_cpu_32(a)
+#define HTONS(a) rte_cpu_to_be_16(a)
+#define HTONL(a) rte_cpu_to_be_32(a)
+
+static __rte_always_inline uint32_t
+readl(volatile void *addr)
+{
+ return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+static __rte_always_inline void
+writel(uint32_t value, volatile void *addr)
+{
+ rte_write32(rte_cpu_to_le_32(value), addr);
+}
+
+static __rte_always_inline void
+writel_relaxed(uint32_t value, volatile void *addr)
+{
+ rte_write32_relaxed(rte_cpu_to_le_32(value), addr);
+}
+
+static __rte_always_inline uint64_t
+readq(volatile void *addr)
+{
+ return rte_le_to_cpu_64(rte_read64(addr));
+}
+
+static __rte_always_inline void
+writeq(uint64_t value, volatile void *addr)
+{
+ rte_write64(rte_cpu_to_le_64(value), addr);
+}
+
+#define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+#define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+
+#endif /* __INTEL_NET_BASE_OSDEP__ */
+
+#define iavf_memset(a, b, c, d) memset((a), (b), (c))
+#define iavf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define iavf_usec_delay(x) rte_delay_us_sleep(x)
+#define iavf_msec_delay(x) iavf_usec_delay(1000 * (x))
+
+#define IAVF_PCI_REG_WRITE(reg, value) writel(value, reg)
+#define IAVF_PCI_REG_WRITE_RELAXED(reg, value) writel_relaxed(value, reg)
+
+#define IAVF_READ_REG(hw, reg) rd32(hw, reg)
+#define IAVF_WRITE_REG(hw, reg, value) wr32(hw, reg, value)
+
+#define IAVF_WRITE_FLUSH(a) IAVF_READ_REG(a, IAVF_VFGEN_RSTAT)
+
+extern int iavf_common_logger;
+
+#define DEBUGOUT(S) rte_log(RTE_LOG_DEBUG, iavf_common_logger, S)
+#define DEBUGOUT2(S, A...) rte_log(RTE_LOG_DEBUG, iavf_common_logger, S, ##A)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+
+#define iavf_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ rte_log(RTE_LOG_DEBUG, iavf_common_logger, \
+ "iavf %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* memory allocation tracking */
+struct iavf_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ const void *zone;
+} __rte_packed;
+
+struct iavf_virt_mem {
+ void *va;
+ u32 size;
+} __rte_packed;
+
+/* SW spinlock */
+struct iavf_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define iavf_allocate_dma_mem(h, m, unused, s, a) \
+ iavf_allocate_dma_mem_d(h, m, s, a)
+#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)
+
+#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
+#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
+
+static inline void
+iavf_init_spinlock_d(struct iavf_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+static inline void
+iavf_acquire_spinlock_d(struct iavf_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+static inline void
+iavf_release_spinlock_d(struct iavf_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+static inline void
+iavf_destroy_spinlock_d(__rte_unused struct iavf_spinlock *sp)
+{
+}
+
+#define iavf_init_spinlock(_sp) iavf_init_spinlock_d(_sp)
+#define iavf_acquire_spinlock(_sp) iavf_acquire_spinlock_d(_sp)
+#define iavf_release_spinlock(_sp) iavf_release_spinlock_d(_sp)
+#define iavf_destroy_spinlock(_sp) iavf_destroy_spinlock_d(_sp)
+
+#endif /* _IAVF_OSDEP_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h b/src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h
new file mode 100644
index 000000000..31ce2af49
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_prototype.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_PROTOTYPE_H_
+#define _IAVF_PROTOTYPE_H_
+
+#include "iavf_type.h"
+#include "iavf_alloc.h"
+#include "virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_init_asq(struct iavf_hw *hw);
+enum iavf_status iavf_init_arq(struct iavf_hw *hw);
+enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw);
+enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw);
+u16 iavf_clean_asq(struct iavf_hw *hw);
+void iavf_free_adminq_asq(struct iavf_hw *hw);
+void iavf_free_adminq_arq(struct iavf_hw *hw);
+enum iavf_status iavf_validate_mac_addr(u8 *mac_addr);
+void iavf_adminq_init_ring_data(struct iavf_hw *hw);
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *events_pending);
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details);
+bool iavf_asq_done(struct iavf_hw *hw);
+
+/* debug function for adminq */
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void iavf_idle_aq(struct iavf_hw *hw);
+bool iavf_check_asq_alive(struct iavf_hw *hw);
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
+
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
+ u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
+ u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
+
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
+
+extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
+
+STATIC INLINE struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return iavf_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void iavf_init_spinlock(struct iavf_spinlock *sp);
+void iavf_acquire_spinlock(struct iavf_spinlock *sp);
+void iavf_release_spinlock(struct iavf_spinlock *sp);
+void iavf_destroy_spinlock(struct iavf_spinlock *sp);
+
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg);
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details);
+enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct iavf_asq_cmd_details *cmd_details);
+enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
+ struct iavf_asq_cmd_details *cmd_details);
+#endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_register.h b/src/spdk/dpdk/drivers/common/iavf/iavf_register.h
new file mode 100644
index 000000000..03d62a9da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_register.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_REGISTER_H_
+#define _IAVF_REGISTER_H_
+
+#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define IAVF_VF_ARQH1_ARQH_SHIFT 0
+#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT)
+#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQVFE_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQVFE_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(1UL, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTL01_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTL01_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_MASK IAVF_MASK(0xFFF, IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define IAVF_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define IAVF_VFINT_ICR01_QUEUE_0_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_QUEUE_0_SHIFT)
+#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define IAVF_VFINT_ICR01_LINK_STAT_CHANGE_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define IAVF_VFINT_ICR01_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR01_ADMINQ_MASK IAVF_MASK(1UL, IAVF_VFINT_ICR01_ADMINQ_SHIFT)
+#define IAVF_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY_MAX_INDEX 12
+#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_VFQF_HLUT_MAX_INDEX 15
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(1UL, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+
+#endif /* _IAVF_REGISTER_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_status.h b/src/spdk/dpdk/drivers/common/iavf/iavf_status.h
new file mode 100644
index 000000000..f42563806
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_status.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_STATUS_H_
+#define _IAVF_STATUS_H_
+
+/* Error Codes */
+enum iavf_status {
+ IAVF_SUCCESS = 0,
+ IAVF_ERR_NVM = -1,
+ IAVF_ERR_NVM_CHECKSUM = -2,
+ IAVF_ERR_PHY = -3,
+ IAVF_ERR_CONFIG = -4,
+ IAVF_ERR_PARAM = -5,
+ IAVF_ERR_MAC_TYPE = -6,
+ IAVF_ERR_UNKNOWN_PHY = -7,
+ IAVF_ERR_LINK_SETUP = -8,
+ IAVF_ERR_ADAPTER_STOPPED = -9,
+ IAVF_ERR_INVALID_MAC_ADDR = -10,
+ IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_INVALID_LINK_SETTINGS = -13,
+ IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ IAVF_ERR_RESET_FAILED = -15,
+ IAVF_ERR_SWFW_SYNC = -16,
+ IAVF_ERR_NO_AVAILABLE_VSI = -17,
+ IAVF_ERR_NO_MEMORY = -18,
+ IAVF_ERR_BAD_PTR = -19,
+ IAVF_ERR_RING_FULL = -20,
+ IAVF_ERR_INVALID_PD_ID = -21,
+ IAVF_ERR_INVALID_QP_ID = -22,
+ IAVF_ERR_INVALID_CQ_ID = -23,
+ IAVF_ERR_INVALID_CEQ_ID = -24,
+ IAVF_ERR_INVALID_AEQ_ID = -25,
+ IAVF_ERR_INVALID_SIZE = -26,
+ IAVF_ERR_INVALID_ARP_INDEX = -27,
+ IAVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ IAVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ IAVF_ERR_INVALID_FRAG_COUNT = -31,
+ IAVF_ERR_QUEUE_EMPTY = -32,
+ IAVF_ERR_INVALID_ALIGNMENT = -33,
+ IAVF_ERR_FLUSHED_QUEUE = -34,
+ IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ IAVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ IAVF_ERR_TIMEOUT = -37,
+ IAVF_ERR_OPCODE_MISMATCH = -38,
+ IAVF_ERR_CQP_COMPL_ERROR = -39,
+ IAVF_ERR_INVALID_VF_ID = -40,
+ IAVF_ERR_INVALID_HMCFN_ID = -41,
+ IAVF_ERR_BACKING_PAGE_ERROR = -42,
+ IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ IAVF_ERR_INVALID_PBLE_INDEX = -44,
+ IAVF_ERR_INVALID_SD_INDEX = -45,
+ IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ IAVF_ERR_INVALID_SD_TYPE = -47,
+ IAVF_ERR_MEMCPY_FAILED = -48,
+ IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ IAVF_ERR_SRQ_ENABLED = -52,
+ IAVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ IAVF_ERR_BUF_TOO_SHORT = -55,
+ IAVF_ERR_ADMIN_QUEUE_FULL = -56,
+ IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_NVM_BLANK_MODE = -59,
+ IAVF_ERR_NOT_IMPLEMENTED = -60,
+ IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ IAVF_ERR_DIAG_TEST_FAILED = -62,
+ IAVF_ERR_NOT_READY = -63,
+ IAVF_NOT_SUPPORTED = -64,
+ IAVF_ERR_FIRMWARE_API_VERSION = -65,
+ IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _IAVF_STATUS_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/iavf_type.h b/src/spdk/dpdk/drivers/common/iavf/iavf_type.h
new file mode 100644
index 000000000..665b070a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/iavf_type.h
@@ -0,0 +1,1014 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _IAVF_TYPE_H_
+#define _IAVF_TYPE_H_
+
+#include "iavf_status.h"
+#include "iavf_osdep.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_devids.h"
+
+#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+/* IAVF_MASK is a macro used on 32 bit registers */
+#define IAVF_MASK(mask, shift) (mask << shift)
+
+#define IAVF_MAX_PF 16
+#define IAVF_MAX_PF_VSI 64
+#define IAVF_MAX_PF_QP 128
+#define IAVF_MAX_VSI_QP 16
+#define IAVF_MAX_VF_VSI 4
+#define IAVF_MAX_CHAINED_RX_BUFFERS 5
+
+/* something less than 1 minute */
+#define IAVF_HEARTBEAT_TIMEOUT (HZ * 50)
+
+
+/* Check whether address is multicast. */
+#define IAVF_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IAVF_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+
+/* forward declaration */
+struct iavf_hw;
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
+
+#define ETH_ALEN 6
+/* Data type manipulation macros. */
+#define IAVF_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define IAVF_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define IAVF_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define IAVF_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define IAVF_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define IAVF_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define IAVF_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define IAVF_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define IAVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define IAVF_QTX_CTL_VF_QUEUE 0x0
+#define IAVF_QTX_CTL_VM_QUEUE 0x1
+#define IAVF_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum iavf_debug_mask {
+ IAVF_DEBUG_INIT = 0x00000001,
+ IAVF_DEBUG_RELEASE = 0x00000002,
+
+ IAVF_DEBUG_LINK = 0x00000010,
+ IAVF_DEBUG_PHY = 0x00000020,
+ IAVF_DEBUG_HMC = 0x00000040,
+ IAVF_DEBUG_NVM = 0x00000080,
+ IAVF_DEBUG_LAN = 0x00000100,
+ IAVF_DEBUG_FLOW = 0x00000200,
+ IAVF_DEBUG_DCB = 0x00000400,
+ IAVF_DEBUG_DIAG = 0x00000800,
+ IAVF_DEBUG_FD = 0x00001000,
+ IAVF_DEBUG_PACKAGE = 0x00002000,
+
+ IAVF_DEBUG_AQ_MESSAGE = 0x01000000,
+ IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ IAVF_DEBUG_AQ_COMMAND = 0x06000000,
+ IAVF_DEBUG_AQ = 0x0F000000,
+
+ IAVF_DEBUG_USER = 0xF0000000,
+
+ IAVF_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define IAVF_PCI_LINK_STATUS 0xB2
+#define IAVF_PCI_LINK_WIDTH 0x3F0
+#define IAVF_PCI_LINK_WIDTH_1 0x10
+#define IAVF_PCI_LINK_WIDTH_2 0x20
+#define IAVF_PCI_LINK_WIDTH_4 0x40
+#define IAVF_PCI_LINK_WIDTH_8 0x80
+#define IAVF_PCI_LINK_SPEED 0xF
+#define IAVF_PCI_LINK_SPEED_2500 0x1
+#define IAVF_PCI_LINK_SPEED_5000 0x2
+#define IAVF_PCI_LINK_SPEED_8000 0x3
+
+#define IAVF_MDIO_CLAUSE22_STCODE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_STCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE22_OPCODE_WRITE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE22_OPCODE_READ_MASK IAVF_MASK(2, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define IAVF_MDIO_CLAUSE45_STCODE_MASK IAVF_MASK(0, \
+ IAVF_GLGEN_MSCA_STCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK IAVF_MASK(0, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_WRITE_MASK IAVF_MASK(1, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK IAVF_MASK(2, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define IAVF_MDIO_CLAUSE45_OPCODE_READ_MASK IAVF_MASK(3, \
+ IAVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define IAVF_PHY_COM_REG_PAGE 0x1E
+#define IAVF_PHY_LED_LINK_MODE_MASK 0xF0
+#define IAVF_PHY_LED_MANUAL_ON 0x100
+#define IAVF_PHY_LED_PROV_REG_1 0xC430
+#define IAVF_PHY_LED_MODE_MASK 0xFFFF
+#define IAVF_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum iavf_memset_type {
+ IAVF_NONDMA_MEM = 0,
+ IAVF_DMA_MEM
+};
+
+/* Memcpy types */
+enum iavf_memcpy_type {
+ IAVF_NONDMA_TO_NONDMA = 0,
+ IAVF_NONDMA_TO_DMA,
+ IAVF_DMA_TO_DMA,
+ IAVF_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum iavf_mac_type {
+ IAVF_MAC_UNKNOWN = 0,
+ IAVF_MAC_XL710,
+ IAVF_MAC_VF,
+ IAVF_MAC_X722,
+ IAVF_MAC_X722_VF,
+ IAVF_MAC_GENERIC,
+};
+
+enum iavf_vsi_type {
+ IAVF_VSI_MAIN = 0,
+ IAVF_VSI_VMDQ1 = 1,
+ IAVF_VSI_VMDQ2 = 2,
+ IAVF_VSI_CTRL = 3,
+ IAVF_VSI_FCOE = 4,
+ IAVF_VSI_MIRROR = 5,
+ IAVF_VSI_SRIOV = 6,
+ IAVF_VSI_FDIR = 7,
+ IAVF_VSI_TYPE_UNKNOWN
+};
+
+enum iavf_queue_type {
+ IAVF_QUEUE_TYPE_RX = 0,
+ IAVF_QUEUE_TYPE_TX,
+ IAVF_QUEUE_TYPE_PE_CEQ,
+ IAVF_QUEUE_TYPE_UNKNOWN
+};
+
+#define IAVF_HW_CAP_MAX_GPIO 30
+#define IAVF_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define IAVF_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+enum iavf_acpi_programming_method {
+ IAVF_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ IAVF_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define IAVF_WOL_SUPPORT_MASK 0x1
+#define IAVF_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define IAVF_PROXY_SUPPORT_MASK 0x4
+
+/* Capabilities of a PF or a VF or the whole device */
+struct iavf_hw_capabilities {
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define IAVF_CLOUD_FILTER_MODE1 0x6
+#define IAVF_CLOUD_FILTER_MODE2 0x7
+#define IAVF_CLOUD_FILTER_MODE3 0x8
+#define IAVF_SWITCH_MODE_MASK 0xF
+
+ bool dcb;
+ bool fcoe;
+ bool iwarp;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors_vf;
+ bool apm_wol_support;
+ enum iavf_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+};
+
+struct iavf_mac_info {
+ enum iavf_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+#define IAVF_NVM_EXEC_GET_AQ_RESULT 0x0
+#define IAVF_NVM_EXEC_FEATURES 0xe
+#define IAVF_NVM_EXEC_STATUS 0xf
+
+/* NVMUpdate features API */
+#define IAVF_NVMUPD_FEATURES_API_VER_MAJOR 0
+#define IAVF_NVMUPD_FEATURES_API_VER_MINOR 14
+#define IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
+
+#define IAVF_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
+
+struct iavf_nvmupd_features {
+ u8 major;
+ u8 minor;
+ u16 size;
+ u8 features[IAVF_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
+};
+
+#define IAVF_MODULE_SFF_DIAG_CAPAB 0x40
+/* PCI bus types */
+enum iavf_bus_type {
+ iavf_bus_type_unknown = 0,
+ iavf_bus_type_pci,
+ iavf_bus_type_pcix,
+ iavf_bus_type_pci_express,
+ iavf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum iavf_bus_speed {
+ iavf_bus_speed_unknown = 0,
+ iavf_bus_speed_33 = 33,
+ iavf_bus_speed_66 = 66,
+ iavf_bus_speed_100 = 100,
+ iavf_bus_speed_120 = 120,
+ iavf_bus_speed_133 = 133,
+ iavf_bus_speed_2500 = 2500,
+ iavf_bus_speed_5000 = 5000,
+ iavf_bus_speed_8000 = 8000,
+ iavf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum iavf_bus_width {
+ iavf_bus_width_unknown = 0,
+ iavf_bus_width_pcie_x1 = 1,
+ iavf_bus_width_pcie_x2 = 2,
+ iavf_bus_width_pcie_x4 = 4,
+ iavf_bus_width_pcie_x8 = 8,
+ iavf_bus_width_32 = 32,
+ iavf_bus_width_64 = 64,
+ iavf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct iavf_bus_info {
+ enum iavf_bus_speed speed;
+ enum iavf_bus_width width;
+ enum iavf_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+#define IAVF_MAX_USER_PRIORITY 8
+#define IAVF_TLV_STATUS_OPER 0x1
+#define IAVF_TLV_STATUS_SYNC 0x2
+#define IAVF_TLV_STATUS_ERR 0x4
+#define IAVF_CEE_OPER_MAX_APPS 3
+#define IAVF_APP_PROTOID_FCOE 0x8906
+#define IAVF_APP_PROTOID_ISCSI 0x0cbc
+#define IAVF_APP_PROTOID_FIP 0x8914
+#define IAVF_APP_SEL_ETHTYPE 0x1
+#define IAVF_APP_SEL_TCPIP 0x2
+#define IAVF_CEE_APP_SEL_ETHTYPE 0x0
+#define IAVF_CEE_APP_SEL_TCPIP 0x1
+
+/* Port hardware description */
+struct iavf_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct iavf_mac_info mac;
+ struct iavf_bus_info bus;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ /* capabilities for entire device and PCI func */
+ struct iavf_hw_capabilities dev_caps;
+
+ /* Admin Queue info */
+ struct iavf_adminq_info aq;
+
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#define IAVF_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define IAVF_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define IAVF_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define IAVF_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define IAVF_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+ u64 flags;
+
+ /* NVMUpdate features */
+ struct iavf_nvmupd_features nvmupd_features;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+struct iavf_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union iavf_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union iavf_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define IAVF_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define IAVF_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ IAVF_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define IAVF_RXD_QW0_FCOEINDX_SHIFT 0
+#define IAVF_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ IAVF_RXD_QW0_FCOEINDX_SHIFT)
+
+enum iavf_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
+ IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
+ IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+
+ IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
+ IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define IAVF_RXD_QW1_STATUS_SHIFT 0
+#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
+ << IAVF_RXD_QW1_STATUS_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_UMBCAST_SHIFT IAVF_RX_DESC_STATUS_UMBCAST
+#define IAVF_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum iavf_rx_desc_fltstat_values {
+ IAVF_RX_DESC_FLTSTAT_NO_DATA = 0,
+ IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ IAVF_RX_DESC_FLTSTAT_RSV = 2,
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define IAVF_RXD_PACKET_TYPE_UNICAST 0
+#define IAVF_RXD_PACKET_TYPE_MULTICAST 1
+#define IAVF_RXD_PACKET_TYPE_BROADCAST 2
+#define IAVF_RXD_PACKET_TYPE_MIRRORED 3
+
+#define IAVF_RXD_QW1_ERROR_SHIFT 19
+#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
+
+enum iavf_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
+ IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
+ IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
+ IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum iavf_rx_desc_error_l3l4e_fcoe_masks {
+ IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
+ IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
+ IAVF_RX_DESC_ERROR_L3L4E_FC = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define IAVF_RXD_QW1_PTYPE_SHIFT 30
+#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum iavf_rx_l2_ptype {
+ IAVF_RX_PTYPE_L2_RESERVED = 0,
+ IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ IAVF_RX_PTYPE_L2_ARP = 11,
+ IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153,
+ IAVF_RX_PTYPE_PARSER_ABORTED = 255
+};
+
+struct iavf_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum iavf_rx_ptype_outer_ip {
+ IAVF_RX_PTYPE_OUTER_L2 = 0,
+ IAVF_RX_PTYPE_OUTER_IP = 1
+};
+
+enum iavf_rx_ptype_outer_ip_ver {
+ IAVF_RX_PTYPE_OUTER_NONE = 0,
+ IAVF_RX_PTYPE_OUTER_IPV4 = 0,
+ IAVF_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum iavf_rx_ptype_outer_fragmented {
+ IAVF_RX_PTYPE_NOT_FRAG = 0,
+ IAVF_RX_PTYPE_FRAG = 1
+};
+
+enum iavf_rx_ptype_tunnel_type {
+ IAVF_RX_PTYPE_TUNNEL_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum iavf_rx_ptype_tunnel_end_prot {
+ IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum iavf_rx_ptype_inner_prot {
+ IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
+ IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
+ IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
+ IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum iavf_rx_ptype_payload_layer {
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define IAVF_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define IAVF_RX_PTYPE_SHIFT 56
+
+#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define IAVF_RXD_QW1_NEXTP_SHIFT 38
+#define IAVF_RXD_QW1_NEXTP_MASK (0x1FFFULL << IAVF_RXD_QW1_NEXTP_SHIFT)
+
+#define IAVF_RXD_QW2_EXT_STATUS_SHIFT 0
+#define IAVF_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ IAVF_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum iavf_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ IAVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ IAVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define IAVF_RXD_QW2_L2TAG2_SHIFT 0
+#define IAVF_RXD_QW2_L2TAG2_MASK (0xFFFFUL << IAVF_RXD_QW2_L2TAG2_SHIFT)
+
+#define IAVF_RXD_QW2_L2TAG3_SHIFT 16
+#define IAVF_RXD_QW2_L2TAG3_MASK (0xFFFFUL << IAVF_RXD_QW2_L2TAG3_SHIFT)
+
+enum iavf_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ IAVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ IAVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ IAVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ IAVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ IAVF_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ IAVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum iavf_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum iavf_rx_prog_status_desc_prog_id_masks {
+ IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum iavf_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define IAVF_TWO_BIT_MASK 0x3
+#define IAVF_THREE_BIT_MASK 0x7
+#define IAVF_FOUR_BIT_MASK 0xF
+#define IAVF_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct iavf_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define IAVF_TXD_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+enum iavf_tx_desc_dtype_value {
+ IAVF_TX_DESC_DTYPE_DATA = 0x0,
+ IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ IAVF_TX_DESC_DTYPE_CONTEXT = 0x1,
+ IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define IAVF_TXD_QW1_CMD_SHIFT 4
+#define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
+
+enum iavf_tx_desc_cmd_bits {
+ IAVF_TX_DESC_CMD_EOP = 0x0001,
+ IAVF_TX_DESC_CMD_RS = 0x0002,
+ IAVF_TX_DESC_CMD_ICRC = 0x0004,
+ IAVF_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ IAVF_TX_DESC_CMD_DUMMY = 0x0010,
+ IAVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ IAVF_TX_DESC_CMD_FCOET = 0x0080,
+ IAVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define IAVF_TXD_QW1_OFFSET_SHIFT 16
+#define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ IAVF_TXD_QW1_OFFSET_SHIFT)
+
+enum iavf_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define IAVF_TXD_QW1_MACLEN_MASK (0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_QW1_IPLEN_MASK (0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_QW1_L4LEN_MASK (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_QW1_FCLEN_MASK (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_QW1_L2TAG1_SHIFT 48
+#define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct iavf_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_CTX_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT 4
+#define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
+
+enum iavf_tx_ctx_desc_cmd_bits {
+ IAVF_TX_CTX_DESC_TSO = 0x01,
+ IAVF_TX_CTX_DESC_TSYN = 0x02,
+ IAVF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ IAVF_TX_CTX_DESC_SWPE = 0x40
+};
+
+struct iavf_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define IAVF_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_NOP_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_NOP_QW1_CMD_SHIFT 4
+#define IAVF_TXD_NOP_QW1_CMD_MASK (0x7FUL << IAVF_TXD_NOP_QW1_CMD_SHIFT)
+
+enum iavf_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_NOP_DESC_EOP_SHIFT = 0,
+ IAVF_TX_NOP_DESC_RS_SHIFT = 1,
+ IAVF_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+/* Packet Classifier Types for filters */
+enum iavf_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ IAVF_FILTER_PCTYPE_FCOE_OX = 48,
+ IAVF_FILTER_PCTYPE_FCOE_RX = 49,
+ IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+#define IAVF_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ IAVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define IAVF_TXD_FLTR_QW1_ATR_MASK BIT_ULL(IAVF_TXD_FLTR_QW1_ATR_SHIFT)
+
+
+#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_MSS_SHIFT 50
+#define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ IAVF_TXD_CTX_QW1_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_VSI_SHIFT 50
+#define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ IAVF_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum iavf_tx_ctx_desc_eipt_offload {
+ IAVF_TX_CTX_EXT_IP_NONE = 0x0,
+ IAVF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ IAVF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9
+#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT)
+#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ IAVF_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ IAVF_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct iavf_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+#define IAVF_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define IAVF_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define IAVF_SR_OPTION_ROM_PTR 0x05
+#define IAVF_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define IAVF_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define IAVF_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define IAVF_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define IAVF_SR_RO_PCIE_LCB_PTR 0x0A
+#define IAVF_SR_EMP_IMAGE_PTR 0x0B
+#define IAVF_SR_PE_IMAGE_PTR 0x0C
+#define IAVF_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define IAVF_SR_MNG_CONFIG_PTR 0x0E
+#define IAVF_SR_PBA_FLAGS 0x15
+#define IAVF_SR_PBA_BLOCK_PTR 0x16
+#define IAVF_SR_BOOT_CONFIG_PTR 0x17
+#define IAVF_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define IAVF_SR_NVM_MAP_VERSION 0x29
+#define IAVF_SR_NVM_IMAGE_VERSION 0x2A
+#define IAVF_SR_NVM_STRUCTURE_VERSION 0x2B
+#define IAVF_SR_PXE_SETUP_PTR 0x30
+#define IAVF_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define IAVF_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define IAVF_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define IAVF_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define IAVF_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define IAVF_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define IAVF_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define IAVF_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define IAVF_SR_PHY_ACTIVITY_LIST_PTR 0x3D
+#define IAVF_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define IAVF_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define IAVF_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define IAVF_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define IAVF_SR_EMP_SR_SETTINGS_PTR 0x48
+#define IAVF_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define IAVF_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define IAVF_SR_IMMEDIATE_VALUES_PTR 0x4E
+#define IAVF_SR_OCP_CFG_WORD0 0x2B
+#define IAVF_SR_OCP_ENABLED BIT(15)
+#define IAVF_SR_BUF_ALIGNMENT 4096
+
+
+struct iavf_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define IAVF_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define IAVF_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define IAVF_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define IAVF_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define IAVF_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define IAVF_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define IAVF_ALT_BW_VALUE_MASK 0xFF
+#define IAVF_ALT_BW_RELATIVE_MASK 0x40000000
+#define IAVF_ALT_BW_VALID_MASK 0x80000000
+
+#define IAVF_DDP_TRACKID_RDONLY 0
+#define IAVF_DDP_TRACKID_INVALID 0xFFFFFFFF
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
+struct iavf_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct iavf_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+#endif /* _IAVF_TYPE_H_ */
diff --git a/src/spdk/dpdk/drivers/common/iavf/meson.build b/src/spdk/dpdk/drivers/common/iavf/meson.build
new file mode 100644
index 000000000..1f4d8b898
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019-2020 Intel Corporation
+
+sources = files('iavf_adminq.c', 'iavf_common.c', 'iavf_impl.c')
+
+if cc.has_argument('-Wno-pointer-to-int-cast')
+ cflags += '-Wno-pointer-to-int-cast'
+endif
diff --git a/src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map b/src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map
new file mode 100644
index 000000000..92ceac108
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/rte_common_iavf_version.map
@@ -0,0 +1,12 @@
+DPDK_21 {
+ global:
+
+ iavf_init_adminq;
+ iavf_shutdown_adminq;
+ iavf_aq_send_msg_to_pf;
+ iavf_clean_arq_element;
+ iavf_set_mac_type;
+ iavf_vf_parse_hw_config;
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/common/iavf/virtchnl.h b/src/spdk/dpdk/drivers/common/iavf/virtchnl.h
new file mode 100644
index 000000000..79515ee8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/iavf/virtchnl.h
@@ -0,0 +1,1311 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2020 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
+};
+
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with IAVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
+ /* opcodes 20, 21, and 22 are reserved */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcodes 34, 35, 36, 37 and 38 are reserved */
+ VIRTCHNL_OP_DCF_CMD_DESC = 39,
+ VIRTCHNL_OP_DCF_CMD_BUFF = 40,
+ VIRTCHNL_OP_DCF_DISABLE = 41,
+ VIRTCHNL_OP_DCF_GET_VSI_MAP = 42,
+ VIRTCHNL_OP_DCF_GET_PKG_INFO = 43,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
+ VIRTCHNL_OP_ADD_RSS_CFG = 45,
+ VIRTCHNL_OP_DEL_RSS_CFG = 46,
+ VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+ VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_QUERY_FDIR_FILTER = 49,
+};
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures. */
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
+#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
+#define VIRTCHNL_VF_CAP_DCF 0X40000000
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC 0X04000000
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000
+ /* 0X80000000 is reserved */
+
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u8 crc_disable;
+ /* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */
+ u8 rxdid;
+ u8 pad1[2];
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
+ * PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes */
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* This is used by PF driver to enforce how many channels can be supported.
+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
+ * PF driver will allow only max 4 channels
+ */
+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* vlan_prio is part of this 16 bit field even from OS perspective
+ * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
+ * in future, when decided to offload vlan_prio, pass that information
+ * as part of the "vlan_id" field, Bit14..12
+ */
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+ VIRTCHNL_UDP_V4_FLOW,
+ VIRTCHNL_UDP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+/* VIRTCHNL_OP_DCF_GET_VSI_MAP
+ * VF sends this message to get VSI mapping table.
+ * PF responds with an indirect message containing VF's
+ * HW VSI IDs.
+ * The index of vf_vsi array is the logical VF ID, the
+ * value of vf_vsi array is the VF's HW VSI ID with its
+ * valid configuration.
+ */
+struct virtchnl_dcf_vsi_map {
+ u16 pf_vsi; /* PF's HW VSI ID */
+ u16 num_vfs; /* The actual number of VFs allocated */
+#define VIRTCHNL_DCF_VF_VSI_ID_S 0
+#define VIRTCHNL_DCF_VF_VSI_ID_M (0xFFF << VIRTCHNL_DCF_VF_VSI_ID_S)
+#define VIRTCHNL_DCF_VF_VSI_VALID (1 << 15)
+ u16 vf_vsi[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map);
+
+#define PKG_NAME_SIZE 32
+#define DSN_SIZE 8
+
+struct pkg_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, pkg_version);
+
+struct virtchnl_pkg_info {
+ struct pkg_version pkg_ver;
+ u32 track_id;
+ char pkg_name[PKG_NAME_SIZE];
+ u8 dsn[DSN_SIZE];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info);
+
+struct virtchnl_supported_rxdids {
+ u64 supported_rxdids;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_supported_rxdids);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+ VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
+ struct {
+ enum virtchnl_link_speed link_speed;
+ u8 link_status;
+ } link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
+ struct {
+ u16 vf_id;
+ u16 vsi_id;
+ } vf_vsi_map;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+ sizeof(struct virtchnl_queue_pair_info),
+
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+ sizeof(struct virtchnl_vector_map),
+
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr),
+
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+ sizeof(u16),
+
+
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+ sizeof(struct virtchnl_channel_info),
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) \
+ (proto_hdr_type << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+ (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+};
+
+struct virtchnl_proto_hdr {
+ enum virtchnl_proto_hdr_type type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+ u8 tunnel_level;
+ /**
+ * specify where protocol header start from.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+ enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+ enum virtchnl_action type;
+ union {
+ /* used for queue and qgroup action */
+ struct {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set {
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ int count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* query information to retrieve fdir rule counters.
+ * PF will fill out this structure to reset counter.
+ */
+struct virtchnl_fdir_query_info {
+ u32 match_packets_valid:1;
+ u32 match_bytes_valid:1;
+ u32 reserved:30; /* Reserved, must be zero. */
+ u32 pad;
+ u64 matched_packets; /* Number of packets for this rule. */
+ u64 matched_bytes; /* Number of bytes through this rule. */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status {
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
+/* VIRTCHNL_OP_QUERY_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * flow_id and reset_counter. PF will return query_info
+ * and query_status to VF.
+ */
+struct virtchnl_fdir_query {
+ u16 vsi_id; /* INPUT */
+ u16 pad1[3];
+ u32 flow_id; /* INPUT */
+ u32 reset_counter:1; /* INPUT */
+ struct virtchnl_fdir_query_info query_info; /* OUTPUT */
+ enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query);
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ u32 valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+
+ if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+
+ if (vimi->num_vectors == 0 || vimi->num_vectors >
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ if (veal->num_elements == 0 || veal->num_elements >
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+
+ if (vfl->num_elements == 0 || vfl->num_elements >
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += vfl->num_elements * sizeof(u16);
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len == 0) {
+ /* zero length is allowed as input */
+ break;
+ }
+
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries == 0) {
+ /* zero entries is allowed as input */
+ break;
+ }
+
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+
+ if (vti->num_tc == 0 || vti->num_tc >
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ case VIRTCHNL_OP_DCF_CMD_DESC:
+ case VIRTCHNL_OP_DCF_CMD_BUFF:
+ /* These two opcodes are specific to handle the AdminQ command,
+ * so the validation needs to be done in PF's context.
+ */
+ return 0;
+ case VIRTCHNL_OP_DCF_DISABLE:
+ case VIRTCHNL_OP_DCF_GET_VSI_MAP:
+ /* The two opcodes are required by DCF without message buffer,
+ * so the valid length keeps the default value 0.
+ */
+ break;
+ case VIRTCHNL_OP_DCF_GET_PKG_INFO:
+ break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ valid_len = sizeof(struct virtchnl_rss_cfg);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_add);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_del);
+ break;
+ case VIRTCHNL_OP_QUERY_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_query);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */