summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/iavf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/net/iavf
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/net/iavf')
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/Makefile54
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf.h275
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c1586
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c971
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c1044
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h320
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_hash.c1236
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_log.h51
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c2869
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h534
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c1541
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h276
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c1191
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c1077
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/meson.build37
-rw-r--r--src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map3
16 files changed, 13065 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/iavf/Makefile b/src/spdk/dpdk/drivers/net/iavf/Makefile
new file mode 100644
index 000000000..792cbb7f7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/Makefile
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_iavf.a
+
+CFLAGS += -I$(RTE_SDK)/drivers/common/iavf
+CFLAGS += -O3 $(WERROR_FLAGS) -Wno-strict-aliasing
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_common_iavf
+
+EXPORT_MAP := rte_pmd_iavf_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c
+ifeq ($(CONFIG_RTE_ARCH_X86), y)
+SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_IAVF_PMD), y)
+ ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
+ CC_AVX2_SUPPORT=1
+ else
+ CC_AVX2_SUPPORT=\
+ $(shell $(CC) -march=core-avx2 -dM -E - </dev/null 2>&1 | \
+ grep -q AVX2 && echo 1)
+ ifeq ($(CC_AVX2_SUPPORT), 1)
+ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+ CFLAGS_iavf_rxtx_vec_avx2.o += -march=core-avx2
+ else
+ CFLAGS_iavf_rxtx_vec_avx2.o += -mavx2
+ endif
+ endif
+ endif
+endif
+
+ifeq ($(CC_AVX2_SUPPORT), 1)
+ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_avx2.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf.h b/src/spdk/dpdk/drivers/net/iavf/iavf.h
new file mode 100644
index 000000000..9be8a2381
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IAVF_ETHDEV_H_
+#define _IAVF_ETHDEV_H_
+
+#include <rte_kvargs.h>
+#include <iavf_prototype.h>
+#include <iavf_adminq_cmd.h>
+#include <iavf_type.h>
+
+#include "iavf_log.h"
+
+#define IAVF_AQ_LEN 32
+#define IAVF_AQ_BUF_SZ 4096
+#define IAVF_RESET_WAIT_CNT 50
+#define IAVF_BUF_SIZE_MIN 1024
+#define IAVF_FRAME_SIZE_MAX 9728
+#define IAVF_QUEUE_BASE_ADDR_UNIT 128
+
+#define IAVF_MAX_NUM_QUEUES 16
+
+#define IAVF_NUM_MACADDR_MAX 64
+
+#define IAVF_DEFAULT_RX_PTHRESH 8
+#define IAVF_DEFAULT_RX_HTHRESH 8
+#define IAVF_DEFAULT_RX_WTHRESH 0
+
+#define IAVF_DEFAULT_RX_FREE_THRESH 32
+
+#define IAVF_DEFAULT_TX_PTHRESH 32
+#define IAVF_DEFAULT_TX_HTHRESH 0
+#define IAVF_DEFAULT_TX_WTHRESH 0
+
+#define IAVF_DEFAULT_TX_FREE_THRESH 32
+#define IAVF_DEFAULT_TX_RS_THRESH 32
+
+#define IAVF_BASIC_OFFLOAD_CAPS ( \
+ VF_BASE_MODE_OFFLOADS | \
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
+#define IAVF_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+
+#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define IAVF_ITR_INDEX_DEFAULT 0
+#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+/* The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define IAVF_VLAN_TAG_SIZE 4
+#define IAVF_ETH_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
+
+#define IAVF_32_BIT_WIDTH (CHAR_BIT * 4)
+#define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)
+#define IAVF_48_BIT_MASK RTE_LEN2MASK(IAVF_48_BIT_WIDTH, uint64_t)
+
+#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
+#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
+
+struct iavf_adapter;
+struct iavf_rx_queue;
+struct iavf_tx_queue;
+
+/* Structure that defines a VSI, associated with a adapter. */
+struct iavf_vsi {
+ struct iavf_adapter *adapter; /* Backreference to associated adapter */
+ uint16_t vsi_id;
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_vector;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+ struct virtchnl_eth_stats eth_stats_offset;
+};
+
+struct rte_flow;
+TAILQ_HEAD(iavf_flow_list, rte_flow);
+
+struct iavf_flow_parser_node;
+TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
+
+struct iavf_fdir_conf {
+ struct virtchnl_fdir_add add_fltr;
+ struct virtchnl_fdir_del del_fltr;
+ uint64_t input_set;
+ uint32_t flow_id;
+ uint32_t mark_flag;
+};
+
+struct iavf_fdir_info {
+ struct iavf_fdir_conf conf;
+};
+
+/* TODO: is that correct to assume the max number to be 16 ?*/
+#define IAVF_MAX_MSIX_VECTORS 16
+
+/* Structure to store private data specific for VF instance. */
+struct iavf_info {
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint16_t mac_num; /* Number of MAC addresses */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ struct virtchnl_version_info virtchnl_version;
+ struct virtchnl_vf_resource *vf_res; /* VF resource */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ uint64_t supported_rxdid;
+
+ volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+ uint32_t cmd_retval; /* return value of the cmd response from PF */
+ uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ uint32_t link_speed;
+
+ struct iavf_vsi vsi;
+ bool vf_reset;
+ uint64_t flags;
+
+ uint8_t *rss_lut;
+ uint8_t *rss_key;
+ uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
+ uint16_t msix_base; /* msix vector base from */
+ /* queue bitmask for each vector */
+ uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+ struct iavf_flow_list flow_list;
+ rte_spinlock_t flow_ops_lock;
+ struct iavf_parser_list rss_parser_list;
+ struct iavf_parser_list dist_parser_list;
+
+ struct iavf_fdir_info fdir; /* flow director info */
+};
+
+#define IAVF_MAX_PKT_TYPE 1024
+
+/* Structure to store private data for each VF instance. */
+struct iavf_adapter {
+ struct iavf_hw hw;
+ struct rte_eth_dev *eth_dev;
+ struct iavf_info vf;
+
+ bool rx_bulk_alloc_allowed;
+ /* For vector PMD */
+ bool rx_vec_allowed;
+ bool tx_vec_allowed;
+ const uint32_t *ptype_tbl;
+ bool stopped;
+ uint16_t fdir_ref_cnt;
+};
+
+/* IAVF_DEV_PRIVATE_TO */
+#define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct iavf_adapter *)adapter)
+#define IAVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct iavf_adapter *)adapter)->vf)
+#define IAVF_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct iavf_adapter *)adapter)->hw)
+
+/* IAVF_VSI_TO */
+#define IAVF_VSI_TO_HW(vsi) \
+ (&(((struct iavf_vsi *)vsi)->adapter->hw))
+#define IAVF_VSI_TO_VF(vsi) \
+ (&(((struct iavf_vsi *)vsi)->adapter->vf))
+#define IAVF_VSI_TO_ETH_DEV(vsi) \
+ (((struct iavf_vsi *)vsi)->adapter->eth_dev)
+
+static inline void
+iavf_init_adminq_parameter(struct iavf_hw *hw)
+{
+ hw->aq.num_arq_entries = IAVF_AQ_LEN;
+ hw->aq.num_asq_entries = IAVF_AQ_LEN;
+ hw->aq.arq_buf_size = IAVF_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = IAVF_AQ_BUF_SZ;
+}
+
+static inline uint16_t
+iavf_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
+ interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+/* structure used for sending and checking response of virtchnl ops */
+struct iavf_cmd_info {
+ enum virtchnl_ops ops;
+ uint8_t *in_args; /* buffer for sending */
+ uint32_t in_args_size; /* buffer size for sending */
+ uint8_t *out_buffer; /* buffer for response */
+ uint32_t out_size; /* buffer size for response */
+};
+
+/* notify current command done. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_notify_cmd(struct iavf_info *vf, uint32_t msg_ret)
+{
+ vf->cmd_retval = msg_ret;
+ rte_wmb();
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+}
+
+/* clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct iavf_info *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+ vf->cmd_retval = VIRTCHNL_STATUS_SUCCESS;
+}
+
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+int iavf_check_api_version(struct iavf_adapter *adapter);
+int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
+int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
+int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
+int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on);
+int iavf_enable_queues(struct iavf_adapter *adapter);
+int iavf_disable_queues(struct iavf_adapter *adapter);
+int iavf_configure_rss_lut(struct iavf_adapter *adapter);
+int iavf_configure_rss_key(struct iavf_adapter *adapter);
+int iavf_configure_queues(struct iavf_adapter *adapter);
+int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
+int iavf_config_irq_map(struct iavf_adapter *adapter);
+void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
+int iavf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+int iavf_query_stats(struct iavf_adapter *adapter,
+ struct virtchnl_eth_stats **pstats);
+int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
+ bool enable_multicast);
+int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
+ struct rte_ether_addr *addr, bool add);
+int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
+int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
+int iavf_fdir_check(struct iavf_adapter *adapter,
+ struct iavf_fdir_conf *filter);
+int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
+ struct virtchnl_rss_cfg *rss_cfg, bool add);
+#endif /* _IAVF_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c b/src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c
new file mode 100644
index 000000000..e09efffd1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c
@@ -0,0 +1,1586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_dev.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_generic_flow.h"
+
+static int iavf_dev_configure(struct rte_eth_dev *dev);
+static int iavf_dev_start(struct rte_eth_dev *dev);
+static void iavf_dev_stop(struct rte_eth_dev *dev);
+static void iavf_dev_close(struct rte_eth_dev *dev);
+static int iavf_dev_reset(struct rte_eth_dev *dev);
+static int iavf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int iavf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
+static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr);
+static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+
+int iavf_logtype_init;
+int iavf_logtype_driver;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_RX
+int iavf_logtype_rx;
+#endif
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+int iavf_logtype_tx;
+#endif
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
+int iavf_logtype_tx_free;
+#endif
+
+static const struct rte_pci_id pci_id_iavf_map[] = {
+ { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops iavf_eth_dev_ops = {
+ .dev_configure = iavf_dev_configure,
+ .dev_start = iavf_dev_start,
+ .dev_stop = iavf_dev_stop,
+ .dev_close = iavf_dev_close,
+ .dev_reset = iavf_dev_reset,
+ .dev_infos_get = iavf_dev_info_get,
+ .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
+ .link_update = iavf_dev_link_update,
+ .stats_get = iavf_dev_stats_get,
+ .stats_reset = iavf_dev_stats_reset,
+ .promiscuous_enable = iavf_dev_promiscuous_enable,
+ .promiscuous_disable = iavf_dev_promiscuous_disable,
+ .allmulticast_enable = iavf_dev_allmulticast_enable,
+ .allmulticast_disable = iavf_dev_allmulticast_disable,
+ .mac_addr_add = iavf_dev_add_mac_addr,
+ .mac_addr_remove = iavf_dev_del_mac_addr,
+ .vlan_filter_set = iavf_dev_vlan_filter_set,
+ .vlan_offload_set = iavf_dev_vlan_offload_set,
+ .rx_queue_start = iavf_dev_rx_queue_start,
+ .rx_queue_stop = iavf_dev_rx_queue_stop,
+ .tx_queue_start = iavf_dev_tx_queue_start,
+ .tx_queue_stop = iavf_dev_tx_queue_stop,
+ .rx_queue_setup = iavf_dev_rx_queue_setup,
+ .rx_queue_release = iavf_dev_rx_queue_release,
+ .tx_queue_setup = iavf_dev_tx_queue_setup,
+ .tx_queue_release = iavf_dev_tx_queue_release,
+ .mac_addr_set = iavf_dev_set_default_mac_addr,
+ .reta_update = iavf_dev_rss_reta_update,
+ .reta_query = iavf_dev_rss_reta_query,
+ .rss_hash_update = iavf_dev_rss_hash_update,
+ .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
+ .rxq_info_get = iavf_dev_rxq_info_get,
+ .txq_info_get = iavf_dev_txq_info_get,
+ .rx_queue_count = iavf_dev_rxq_count,
+ .rx_descriptor_status = iavf_dev_rx_desc_status,
+ .tx_descriptor_status = iavf_dev_tx_desc_status,
+ .mtu_set = iavf_dev_mtu_set,
+ .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
+ .filter_ctrl = iavf_dev_filter_ctrl,
+};
+
+static int
+iavf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ ad->rx_bulk_alloc_allowed = true;
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * vector Rx/Tx preconditions, it will be reset.
+ */
+ ad->rx_vec_allowed = true;
+ ad->tx_vec_allowed = true;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ /* Vlan stripping setting */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ iavf_enable_vlan_strip(ad);
+ else
+ iavf_disable_vlan_strip(ad);
+ }
+ return 0;
+}
+
+static int
+iavf_init_rss(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct rte_eth_rss_conf *rss_conf;
+ uint8_t i, j, nb_q;
+ int ret;
+
+ rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+ IAVF_MAX_NUM_QUEUES);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+ if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
+ /* set all lut items to default queue */
+ for (i = 0; i < vf->vf_res->rss_lut_size; i++)
+ vf->rss_lut[i] = 0;
+ ret = iavf_configure_rss_lut(adapter);
+ return ret;
+ }
+
+ /* In IAVF, RSS enablement is set by PF driver. It is not supported
+ * to set based on rss_conf->rss_hf.
+ */
+
+ /* configure RSS key */
+ if (!rss_conf->rss_key) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= vf->vf_res->rss_key_size; i++)
+ vf->rss_key[i] = (uint8_t)rte_rand();
+ } else
+ rte_memcpy(vf->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vf->vf_res->rss_key_size));
+
+ /* init RSS LUT table */
+ for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
+ if (j >= nb_q)
+ j = 0;
+ vf->rss_lut[i] = j;
+ }
+ /* send virtchnnl ops to configure rss*/
+ ret = iavf_configure_rss_lut(adapter);
+ if (ret)
+ return ret;
+ ret = iavf_configure_rss_key(adapter);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = dev->data;
+ uint16_t buf_size, max_pkt_len, len;
+
+ buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate the maximum packet length allowed */
+ len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
+ max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /* Check if the jumbo frame and maximum packet length are set
+ * correctly.
+ */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
+ max_pkt_len > IAVF_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is enabled",
+ (uint32_t)RTE_ETHER_MAX_LEN,
+ (uint32_t)IAVF_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
+ } else {
+ if (max_pkt_len < RTE_ETHER_MIN_LEN ||
+ max_pkt_len > RTE_ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
+ return -EINVAL;
+ }
+ }
+
+ rxq->max_pkt_len = max_pkt_len;
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ (rxq->max_pkt_len + 2 * IAVF_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ IAVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+iavf_init_queues(struct rte_eth_dev *dev)
+{
+ struct iavf_rx_queue **rxq =
+ (struct iavf_rx_queue **)dev->data->rx_queues;
+ int i, ret = IAVF_SUCCESS;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = iavf_init_rxq(dev, rxq[i]);
+ if (ret != IAVF_SUCCESS)
+ break;
+ }
+ /* set rx/tx function to vector/scatter/single-segment
+ * according to parameters
+ */
+ iavf_set_rx_function(dev);
+ iavf_set_tx_function(dev);
+
+ return ret;
+}
+
+static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ uint16_t interval, i;
+ int vec;
+
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+ }
+
+ if (!dev->data->dev_conf.intr_conf.rxq ||
+ !rte_intr_dp_is_en(intr_handle)) {
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+ vf->nb_msix = 1;
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+ /* If WB_ON_ITR supports, enable it */
+ vf->msix_base = IAVF_RX_VEC_START;
+ IAVF_WRITE_REG(hw,
+ IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+ } else {
+ /* If no WB_ON_ITR offload flags, need to set
+ * interrupt for descriptor write back.
+ */
+ vf->msix_base = IAVF_MISC_VEC_ID;
+
+ /* set ITR to max */
+ interval = iavf_calc_itr_interval(
+ IAVF_QUEUE_ITR_INTERVAL_MAX);
+ IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
+ IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+ (IAVF_ITR_INDEX_DEFAULT <<
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
+ }
+ IAVF_WRITE_FLUSH(hw);
+ /* map all queues to the same interrupt */
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ vf->rxq_map[vf->msix_base] |= 1 << i;
+ } else {
+ if (!rte_intr_allow_others(intr_handle)) {
+ vf->nb_msix = 1;
+ vf->msix_base = IAVF_MISC_VEC_ID;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vf->rxq_map[vf->msix_base] |= 1 << i;
+ intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "vector %u are mapping to all Rx queues",
+ vf->msix_base);
+ } else {
+ /* If Rx interrupt is reuquired, and we can use
+ * multi interrupts, then the vec is from 1
+ */
+ vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
+ intr_handle->nb_efd);
+ vf->msix_base = IAVF_RX_VEC_START;
+ vec = IAVF_RX_VEC_START;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vf->rxq_map[vec] |= 1 << i;
+ intr_handle->intr_vec[i] = vec++;
+ if (vec >= vf->nb_msix)
+ vec = IAVF_RX_VEC_START;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "%u vectors are mapping to %u Rx queues",
+ vf->nb_msix, dev->data->nb_rx_queues);
+ }
+ }
+
+ if (iavf_config_irq_map(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+iavf_start_queues(struct rte_eth_dev *dev)
+{
+ struct iavf_rx_queue *rxq;
+ struct iavf_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq->tx_deferred_start)
+ continue;
+ if (iavf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (iavf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+iavf_dev_start(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ adapter->stopped = 0;
+
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ if (iavf_init_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "failed to do Queue init");
+ return -1;
+ }
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (iavf_init_rss(adapter) != 0) {
+ PMD_DRV_LOG(ERR, "configure rss failed");
+ goto err_rss;
+ }
+ }
+
+ if (iavf_configure_queues(adapter) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+
+ if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
+ PMD_DRV_LOG(ERR, "configure irq failed");
+ goto err_queue;
+ }
+ /* re-enable intr again, because efd assign may change */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ rte_intr_disable(intr_handle);
+ rte_intr_enable(intr_handle);
+ }
+
+ /* Set all mac addrs */
+ iavf_add_del_all_mac_addr(adapter, true);
+
+ if (iavf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "enable queues failed");
+ goto err_mac;
+ }
+
+ return 0;
+
+err_mac:
+ iavf_add_del_all_mac_addr(adapter, false);
+err_queue:
+err_rss:
+ return -1;
+}
+
+static void
+iavf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (adapter->stopped == 1)
+ return;
+
+ iavf_stop_queues(dev);
+
+ /* Disable the interrupt for Rx */
+ rte_intr_efd_disable(intr_handle);
+ /* Rx interrupt vector mapping free */
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ /* remove all mac addrs */
+ iavf_add_del_all_mac_addr(adapter, false);
+ adapter->stopped = 1;
+}
+
+static int
+iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = vf->vf_res->rss_key_size;
+ dev_info->reta_size = vf->vf_res->rss_lut_size;
+ dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_RSS_HASH;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = IAVF_MAX_RING_DESC,
+ .nb_min = IAVF_MIN_RING_DESC,
+ .nb_align = IAVF_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = IAVF_MAX_RING_DESC,
+ .nb_min = IAVF_MIN_RING_DESC,
+ .nb_align = IAVF_ALIGN_RING_DESC,
+ };
+
+ return 0;
+}
+
+static const uint32_t *
+iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+ return ptypes;
+}
+
+int
+iavf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link new_link;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ /* Only read status info stored in VF, and the info is updated
+ * when receive LINK_CHANGE evnet from PF by Virtchnnl.
+ */
+ switch (vf->link_speed) {
+ case 10:
+ new_link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case 100:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case 1000:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case 10000:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case 20000:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case 25000:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case 40000:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case 50000:
+ new_link.link_speed = ETH_SPEED_NUM_50G;
+ break;
+ case 100000:
+ new_link.link_speed = ETH_SPEED_NUM_100G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_NONE;
+ break;
+ }
+
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+ new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+ if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
+ *(uint64_t *)&dev->data->dev_link,
+ *(uint64_t *)&new_link) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (vf->promisc_unicast_enabled)
+ return 0;
+
+ ret = iavf_config_promisc(adapter, true, vf->promisc_multicast_enabled);
+ if (!ret)
+ vf->promisc_unicast_enabled = true;
+ else
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int
+iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (!vf->promisc_unicast_enabled)
+ return 0;
+
+ ret = iavf_config_promisc(adapter, false,
+ vf->promisc_multicast_enabled);
+ if (!ret)
+ vf->promisc_unicast_enabled = false;
+ else
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int
+iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (vf->promisc_multicast_enabled)
+ return 0;
+
+ ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, true);
+ if (!ret)
+ vf->promisc_multicast_enabled = true;
+ else
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int
+iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (!vf->promisc_multicast_enabled)
+ return 0;
+
+ ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, false);
+ if (!ret)
+ vf->promisc_multicast_enabled = false;
+ else
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int
+iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ int err;
+
+ if (rte_is_zero_ether_addr(addr)) {
+ PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
+ return -EINVAL;
+ }
+
+ err = iavf_add_del_eth_addr(adapter, addr, true);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to add MAC address");
+ return -EIO;
+ }
+
+ vf->mac_num++;
+
+ return 0;
+}
+
+static void
+iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct rte_ether_addr *addr;
+ int err;
+
+ addr = &dev->data->mac_addrs[index];
+
+ err = iavf_add_del_eth_addr(adapter, addr, false);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to delete MAC address");
+
+ vf->mac_num--;
+}
+
+static int
+iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ int err;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ err = iavf_add_del_vlan(adapter, vlan_id, on);
+ if (err)
+ return -EIO;
+ return 0;
+}
+
+static int
+iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ int err;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ err = iavf_enable_vlan_strip(adapter);
+ else
+ err = iavf_disable_vlan_strip(adapter);
+
+ if (err)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t *lut;
+ uint16_t i, idx, shift;
+ int ret;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ if (reta_size != vf->vf_res->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vf->vf_res->rss_lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ /* store the old lut table temporarily */
+ rte_memcpy(lut, vf->rss_lut, reta_size);
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+
+ rte_memcpy(vf->rss_lut, lut, reta_size);
+ /* send virtchnnl ops to configure rss*/
+ ret = iavf_configure_rss_lut(adapter);
+ if (ret) /* revert back */
+ rte_memcpy(vf->rss_lut, lut, reta_size);
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ uint16_t i, idx, shift;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ if (reta_size != vf->vf_res->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vf->vf_res->rss_lut_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = vf->rss_lut[i];
+ }
+
+ return 0;
+}
+
+static int
+iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ /* HENA setting, it is enabled by default, no change */
+ if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
+ PMD_DRV_LOG(ERR, "The size of hash key configured "
+ "(%d) doesn't match the size of hardware can "
+ "support (%d)", rss_conf->rss_key_len,
+ vf->vf_res->rss_key_size);
+ return -EINVAL;
+ }
+
+ rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
+
+ return iavf_configure_rss_key(adapter);
+}
+
+static int
+iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ /* Just set it to default value now. */
+ rss_conf->rss_hf = IAVF_RSS_OFFLOAD_ALL;
+
+ if (!rss_conf->rss_key)
+ return 0;
+
+ rss_conf->rss_key_len = vf->vf_res->rss_key_size;
+ rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
+
+ return 0;
+}
+
+static int
+iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
+ int ret = 0;
+
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port must be stopped before configuration");
+ return -EBUSY;
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static int
+iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_ether_addr *perm_addr, *old_addr;
+ int ret;
+
+ old_addr = (struct rte_ether_addr *)hw->mac.addr;
+ perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
+
+ if (rte_is_same_ether_addr(mac_addr, old_addr))
+ return 0;
+
+ /* If the MAC address is configured by host, skip the setting */
+ if (rte_is_valid_assigned_ether_addr(perm_addr))
+ return -EPERM;
+
+ ret = iavf_add_del_eth_addr(adapter, old_addr, false);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ old_addr->addr_bytes[0],
+ old_addr->addr_bytes[1],
+ old_addr->addr_bytes[2],
+ old_addr->addr_bytes[3],
+ old_addr->addr_bytes[4],
+ old_addr->addr_bytes[5]);
+
+ ret = iavf_add_del_eth_addr(adapter, mac_addr, true);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to add new MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ if (ret)
+ return -EIO;
+
+ rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
+ return 0;
+}
+
+static void
+iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
+
+ *stat &= IAVF_48_BIT_MASK;
+}
+
+static void
+iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
+}
+
+static void
+iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
+{
+ struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+
+ iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
+ iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
+ iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
+ iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
+ iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
+ iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
+ iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
+ iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
+ iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
+ iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
+ iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
+}
+
+static int
+iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_vsi *vsi = &vf->vsi;
+ struct virtchnl_eth_stats *pstats = NULL;
+ int ret;
+
+ ret = iavf_query_stats(adapter, &pstats);
+ if (ret == 0) {
+ iavf_update_stats(vsi, pstats);
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast - pstats->rx_discards;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->imissed = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+ stats->obytes = pstats->tx_bytes;
+ } else {
+ PMD_DRV_LOG(ERR, "Get statistics failed");
+ }
+ return ret;
+}
+
+static int
+iavf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_vsi *vsi = &vf->vsi;
+ struct virtchnl_eth_stats *pstats = NULL;
+
+ /* read stat values to clear hardware registers */
+ ret = iavf_query_stats(adapter, &pstats);
+ if (ret != 0)
+ return ret;
+
+ /* set stats offset base on current values */
+ vsi->eth_stats_offset = *pstats;
+
+ return 0;
+}
+
+static int
+iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ uint16_t msix_intr;
+
+ msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ if (msix_intr == IAVF_MISC_VEC_ID) {
+ PMD_DRV_LOG(INFO, "MISC is also enabled for control");
+ IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
+ IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+ IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ } else {
+ IAVF_WRITE_REG(hw,
+ IAVF_VFINT_DYN_CTLN1
+ (msix_intr - IAVF_RX_VEC_START),
+ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+ }
+
+ IAVF_WRITE_FLUSH(hw);
+
+ rte_intr_ack(&pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ if (msix_intr == IAVF_MISC_VEC_ID) {
+ PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
+ return -EIO;
+ }
+
+ IAVF_WRITE_REG(hw,
+ IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
+ 0);
+
+ IAVF_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+iavf_check_vf_reset_done(struct iavf_hw *hw)
+{
+ int i, reset;
+
+ for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
+ reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (reset == VIRTCHNL_VFR_VFACTIVE ||
+ reset == VIRTCHNL_VFR_COMPLETED)
+ break;
+ rte_delay_ms(20);
+ }
+
+ if (i >= IAVF_RESET_WAIT_CNT)
+ return -1;
+
+ return 0;
+}
+
+static int
+iavf_init_vf(struct rte_eth_dev *dev)
+{
+ int err, bufsz;
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ err = iavf_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ err = iavf_check_vf_reset_done(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "VF is still resetting");
+ goto err;
+ }
+
+ iavf_init_adminq_parameter(hw);
+ err = iavf_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+ vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
+ if (!vf->aq_resp) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+ goto err_aq;
+ }
+ if (iavf_check_api_version(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_api;
+ }
+
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_api;
+ }
+ if (iavf_get_vf_resource(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
+ goto err_alloc;
+ }
+ /* Allocate memort for RSS info */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vf->rss_key = rte_zmalloc("rss_key",
+ vf->vf_res->rss_key_size, 0);
+ if (!vf->rss_key) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
+ goto err_rss;
+ }
+ vf->rss_lut = rte_zmalloc("rss_lut",
+ vf->vf_res->rss_lut_size, 0);
+ if (!vf->rss_lut) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
+ goto err_rss;
+ }
+ }
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ if (iavf_get_supported_rxdid(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
+ goto err_rss;
+ }
+ }
+
+ return 0;
+err_rss:
+ rte_free(vf->rss_key);
+ rte_free(vf->rss_lut);
+err_alloc:
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+err_api:
+ rte_free(vf->aq_resp);
+err_aq:
+ iavf_shutdown_adminq(hw);
+err:
+ return -1;
+}
+
+/* Enable default admin queue interrupt setting */
+static inline void
+iavf_enable_irq0(struct iavf_hw *hw)
+{
+ /* Enable admin queue interrupt trigger */
+ IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
+ IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
+
+ IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
+ IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+ IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
+
+ IAVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+iavf_disable_irq0(struct iavf_hw *hw)
+{
+ /* Disable all interrupt types */
+ IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
+ IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ IAVF_WRITE_FLUSH(hw);
+}
+
+static void
+iavf_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ iavf_disable_irq0(hw);
+
+ iavf_handle_virtchnl_msg(dev);
+
+ iavf_enable_irq0(hw);
+}
+
+static int
+iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &iavf_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
+static int
+iavf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &iavf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &iavf_recv_pkts;
+ eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
+
+ /* For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check if we need a different RX
+ * and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ iavf_set_rx_function(eth_dev);
+ iavf_set_tx_function(eth_dev);
+ return 0;
+ }
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.bus_id = pci_dev->addr.bus;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ adapter->eth_dev = eth_dev;
+ adapter->stopped = 1;
+
+ if (iavf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ /* set default ptype table */
+ adapter->ptype_tbl = iavf_get_default_ptype_table();
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(
+ "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+ " store MAC addresses",
+ RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
+ return -ENOMEM;
+ }
+ /* If the MAC address is not configured by host,
+ * generate a random one.
+ */
+ if (!rte_is_valid_assigned_ether_addr(
+ (struct rte_ether_addr *)hw->mac.addr))
+ rte_eth_random_addr(hw->mac.addr);
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ iavf_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* configure and enable device interrupt */
+ iavf_enable_irq0(hw);
+
+ ret = iavf_flow_init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+iavf_dev_close(struct rte_eth_dev *dev)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ iavf_dev_stop(dev);
+ iavf_flow_flush(dev, NULL);
+ iavf_shutdown_adminq(hw);
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ iavf_dev_interrupt_handler, dev);
+ iavf_disable_irq0(hw);
+
+ iavf_flow_uninit(adapter);
+}
+
+static int
+iavf_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ iavf_dev_close(dev);
+
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+ vf->vf_res = NULL;
+
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ if (vf->rss_lut) {
+ rte_free(vf->rss_lut);
+ vf->rss_lut = NULL;
+ }
+ if (vf->rss_key) {
+ rte_free(vf->rss_key);
+ vf->rss_key = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Reset VF device only to re-initialize resources in PMD layer
+ */
+static int
+iavf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = iavf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ return iavf_dev_init(dev);
+}
+
+static int
+iavf_dcf_cap_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "dcf"))
+ return -1;
+
+ return 0;
+}
+
+static int
+iavf_dcf_cap_selected(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *key = "cap";
+ int ret = 0;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key))
+ goto exit;
+
+ /* dcf capability selected when there's a key-value pair: cap=dcf */
+ if (rte_kvargs_process(kvlist, key,
+ iavf_dcf_cap_check_handler, NULL) < 0)
+ goto exit;
+
+ ret = 1;
+
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ if (iavf_dcf_cap_selected(pci_dev->device.devargs))
+ return 1;
+
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct iavf_adapter), iavf_dev_init);
+}
+
+static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
+}
+
+/* Adaptive virtual function driver struct */
+static struct rte_pci_driver rte_iavf_pmd = {
+ .id_table = pci_id_iavf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_iavf_pci_probe,
+ .remove = eth_iavf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
+RTE_INIT(iavf_init_log)
+{
+ iavf_logtype_init = rte_log_register("pmd.net.iavf.init");
+ if (iavf_logtype_init >= 0)
+ rte_log_set_level(iavf_logtype_init, RTE_LOG_NOTICE);
+ iavf_logtype_driver = rte_log_register("pmd.net.iavf.driver");
+ if (iavf_logtype_driver >= 0)
+ rte_log_set_level(iavf_logtype_driver, RTE_LOG_NOTICE);
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_RX
+ iavf_logtype_rx = rte_log_register("pmd.net.iavf.rx");
+ if (iavf_logtype_rx >= 0)
+ rte_log_set_level(iavf_logtype_rx, RTE_LOG_DEBUG);
+#endif
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+ iavf_logtype_tx = rte_log_register("pmd.net.iavf.tx");
+ if (iavf_logtype_tx >= 0)
+ rte_log_set_level(iavf_logtype_tx, RTE_LOG_DEBUG);
+#endif
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
+ iavf_logtype_tx_free = rte_log_register("pmd.net.iavf.tx_free");
+ if (iavf_logtype_tx_free >= 0)
+ rte_log_set_level(iavf_logtype_tx_free, RTE_LOG_DEBUG);
+#endif
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c b/src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c
new file mode 100644
index 000000000..264c47d83
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+#include "virtchnl.h"
+#include "iavf_rxtx.h"
+
+#define IAVF_FDIR_MAX_QREGION_SIZE 128
+
+#define IAVF_FDIR_IPV6_TC_OFFSET 20
+#define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
+
+#define IAVF_FDIR_INSET_ETH (\
+ IAVF_INSET_ETHERTYPE)
+
+#define IAVF_FDIR_INSET_ETH_IPV4 (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
+ IAVF_INSET_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+ IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+ IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
+ IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6 (\
+ IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+ IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
+ IAVF_INSET_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
+ IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+ IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+ IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
+ IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+ IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+ IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
+
+#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
+ IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+ IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+ IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_GTPU_TEID)
+
+#define IAVF_FDIR_INSET_GTPU_EH (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
+
+#define IAVF_FDIR_INSET_L2TPV3OIP (\
+ IAVF_L2TPV3OIP_SESSION_ID)
+
+#define IAVF_FDIR_INSET_ESP (\
+ IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_AH (\
+ IAVF_INSET_AH_SPI)
+
+#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
+ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+ IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
+ IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+ IAVF_INSET_ESP_SPI)
+
+#define IAVF_FDIR_INSET_PFCP (\
+ IAVF_INSET_PFCP_S_FIELD)
+
+static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+ {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
+};
+
+static struct iavf_flow_parser iavf_fdir_parser;
+
+static int
+iavf_fdir_init(struct iavf_adapter *ad)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct iavf_flow_parser *parser;
+
+ if (!vf->vf_res)
+ return -EINVAL;
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ parser = &iavf_fdir_parser;
+ else
+ return -ENOTSUP;
+
+ return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fdir_uninit(struct iavf_adapter *ad)
+{
+ iavf_unregister_parser(&iavf_fdir_parser, ad);
+}
+
+static int
+iavf_fdir_create(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_fdir_conf *filter = meta;
+ struct iavf_fdir_conf *rule;
+ int ret;
+
+ rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
+ if (!rule) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for fdir rule");
+ return -rte_errno;
+ }
+
+ ret = iavf_fdir_add(ad, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to add filter rule.");
+ goto free_entry;
+ }
+
+ if (filter->mark_flag == 1)
+ iavf_fdir_rx_proc_enable(ad, 1);
+
+ rte_memcpy(rule, filter, sizeof(*rule));
+ flow->rule = rule;
+
+ return 0;
+
+free_entry:
+ rte_free(rule);
+ return -rte_errno;
+}
+
+static int
+iavf_fdir_destroy(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct iavf_fdir_conf *filter;
+ int ret;
+
+ filter = (struct iavf_fdir_conf *)flow->rule;
+
+ ret = iavf_fdir_del(ad, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to delete filter rule.");
+ return -rte_errno;
+ }
+
+ if (filter->mark_flag == 1)
+ iavf_fdir_rx_proc_enable(ad, 0);
+
+ flow->rule = NULL;
+ rte_free(filter);
+
+ return 0;
+}
+
+static int
+iavf_fdir_validation(struct iavf_adapter *ad,
+ __rte_unused struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_fdir_conf *filter = meta;
+ int ret;
+
+ ret = iavf_fdir_check(ad, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to validate filter rule.");
+ return -rte_errno;
+ }
+
+ return 0;
+};
+
+static struct iavf_flow_engine iavf_fdir_engine = {
+ .init = iavf_fdir_init,
+ .uninit = iavf_fdir_uninit,
+ .create = iavf_fdir_create,
+ .destroy = iavf_fdir_destroy,
+ .validation = iavf_fdir_validation,
+ .type = IAVF_FLOW_ENGINE_FDIR,
+};
+
+static int
+iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
+ struct rte_flow_error *error,
+ const struct rte_flow_action *act,
+ struct virtchnl_filter_action *filter_action)
+{
+ const struct rte_flow_action_rss *rss = act->conf;
+ uint32_t i;
+
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (rss->queue_num <= 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Queue region size can't be 0 or 1.");
+ return -rte_errno;
+ }
+
+ /* check if queue index for queue region is continuous */
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Discontinuous queue region");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue region indexes.");
+ return -rte_errno;
+ }
+
+ if (!(rte_is_power_of_2(rss->queue_num) &&
+ rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "The region size should be any of the following values:"
+ "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+ "of queues do not exceed the VSI allocation.");
+ return -rte_errno;
+ }
+
+ filter_action->act_conf.queue.index = rss->queue[0];
+ filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
+
+ return 0;
+}
+
+static int
+iavf_fdir_parse_action(struct iavf_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct iavf_fdir_conf *filter)
+{
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec = NULL;
+ uint32_t dest_num = 0;
+ uint32_t mark_num = 0;
+ int ret;
+
+ int number = 0;
+ struct virtchnl_filter_action *filter_action;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ dest_num++;
+
+ filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+ filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+
+ filter->add_fltr.rule_cfg.action_set.count = ++number;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+ filter_action->type = VIRTCHNL_ACTION_DROP;
+
+ filter->add_fltr.rule_cfg.action_set.count = ++number;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_num++;
+
+ act_q = actions->conf;
+ filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+ filter_action->type = VIRTCHNL_ACTION_QUEUE;
+ filter_action->act_conf.queue.index = act_q->index;
+
+ if (filter_action->act_conf.queue.index >=
+ ad->eth_dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid queue for FDIR.");
+ return -rte_errno;
+ }
+
+ filter->add_fltr.rule_cfg.action_set.count = ++number;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ dest_num++;
+
+ filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+ filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+
+ ret = iavf_fdir_parse_action_qregion(ad,
+ error, actions, filter_action);
+ if (ret)
+ return ret;
+
+ filter->add_fltr.rule_cfg.action_set.count = ++number;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_num++;
+
+ filter->mark_flag = 1;
+ mark_spec = actions->conf;
+ filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+
+ filter_action->type = VIRTCHNL_ACTION_MARK;
+ filter_action->act_conf.mark_id = mark_spec->id;
+
+ filter->add_fltr.rule_cfg.action_set.count = ++number;
+ break;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action numbers exceed the maximum value");
+ return -rte_errno;
+ }
+
+ if (dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ if (mark_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many mark actions");
+ return -rte_errno;
+ }
+
+ if (dest_num + mark_num == 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Empty action");
+ return -rte_errno;
+ }
+
+ /* Mark only is equal to mark + passthru. */
+ if (dest_num == 0) {
+ filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
+ filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
+ filter->add_fltr.rule_cfg.action_set.count = ++number;
+ }
+
+ return 0;
+}
+
+static int
+iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct iavf_fdir_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+ const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+ const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
+ const struct rte_flow_item_esp *esp_spec, *esp_mask;
+ const struct rte_flow_item_ah *ah_spec, *ah_mask;
+ const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
+ uint64_t input_set = IAVF_INSET_NONE;
+
+ enum rte_flow_item_type next_type;
+ uint16_t ether_type;
+
+ int layer = 0;
+ struct virtchnl_proto_hdr *hdr;
+
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not support range");
+ }
+
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ next_type = (item + 1)->type;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+ if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+ (!eth_spec || !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "NULL eth spec/mask.");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+ !rte_is_zero_ether_addr(&eth_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid MAC_addr mask.");
+ return -rte_errno;
+ }
+ }
+
+ if (eth_spec && eth_mask && eth_mask->type) {
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid type mask.");
+ return -rte_errno;
+ }
+
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+ ether_type == RTE_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+
+ input_set |= IAVF_INSET_ETHERTYPE;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+
+ rte_memcpy(hdr->buffer,
+ eth_spec, sizeof(*eth_spec));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (ipv4_spec && ipv4_mask) {
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.type_of_service ==
+ UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV4_TOS;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+ }
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV4_PROTO;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+ }
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV4_TTL;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
+ }
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ input_set |= IAVF_INSET_IPV4_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+ }
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ input_set |= IAVF_INSET_IPV4_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ }
+
+ rte_memcpy(hdr->buffer,
+ &ipv4_spec->hdr,
+ sizeof(ipv4_spec->hdr));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (ipv6_spec && ipv6_mask) {
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+ input_set |= IAVF_INSET_IPV6_TC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+ }
+ if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+ }
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
+ }
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr))) {
+ input_set |= IAVF_INSET_IPV6_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+ }
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+ input_set |= IAVF_INSET_IPV6_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ }
+
+ rte_memcpy(hdr->buffer,
+ &ipv6_spec->hdr,
+ sizeof(ipv6_spec->hdr));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (udp_spec && udp_mask) {
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port == UINT16_MAX) {
+ input_set |= IAVF_INSET_UDP_SRC_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+ }
+ if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ input_set |= IAVF_INSET_UDP_DST_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ rte_memcpy(hdr->buffer,
+ &udp_spec->hdr,
+ sizeof(udp_spec->hdr));
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ rte_memcpy(hdr->buffer,
+ &udp_spec->hdr,
+ sizeof(udp_spec->hdr));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (tcp_spec && tcp_mask) {
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ input_set |= IAVF_INSET_TCP_SRC_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+ }
+ if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ input_set |= IAVF_INSET_TCP_DST_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ rte_memcpy(hdr->buffer,
+ &tcp_spec->hdr,
+ sizeof(tcp_spec->hdr));
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ rte_memcpy(hdr->buffer,
+ &tcp_spec->hdr,
+ sizeof(tcp_spec->hdr));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+ if (sctp_spec && sctp_mask) {
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ input_set |= IAVF_INSET_SCTP_SRC_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+ }
+ if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ input_set |= IAVF_INSET_SCTP_DST_PORT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ rte_memcpy(hdr->buffer,
+ &sctp_spec->hdr,
+ sizeof(sctp_spec->hdr));
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ rte_memcpy(hdr->buffer,
+ &sctp_spec->hdr,
+ sizeof(sctp_spec->hdr));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
+
+ if (gtp_spec && gtp_mask) {
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ if (gtp_mask->teid == UINT32_MAX) {
+ input_set |= IAVF_INSET_GTPU_TEID;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
+ }
+
+ rte_memcpy(hdr->buffer,
+ gtp_spec, sizeof(*gtp_spec));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ gtp_psc_spec = item->spec;
+ gtp_psc_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+
+ if (gtp_psc_spec && gtp_psc_mask) {
+ if (gtp_psc_mask->qfi == UINT8_MAX) {
+ input_set |= IAVF_INSET_GTPU_QFI;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
+ }
+
+ rte_memcpy(hdr->buffer, gtp_psc_spec,
+ sizeof(*gtp_psc_spec));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+ l2tpv3oip_spec = item->spec;
+ l2tpv3oip_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+
+ if (l2tpv3oip_spec && l2tpv3oip_mask) {
+ if (l2tpv3oip_mask->session_id == UINT32_MAX) {
+ input_set |= IAVF_L2TPV3OIP_SESSION_ID;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+ }
+
+ rte_memcpy(hdr->buffer, l2tpv3oip_spec,
+ sizeof(*l2tpv3oip_spec));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ if (esp_spec && esp_mask) {
+ if (esp_mask->hdr.spi == UINT32_MAX) {
+ input_set |= IAVF_INSET_ESP_SPI;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+ }
+
+ rte_memcpy(hdr->buffer, &esp_spec->hdr,
+ sizeof(esp_spec->hdr));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_AH:
+ ah_spec = item->spec;
+ ah_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+ if (ah_spec && ah_mask) {
+ if (ah_mask->spi == UINT32_MAX) {
+ input_set |= IAVF_INSET_AH_SPI;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+ }
+
+ rte_memcpy(hdr->buffer, ah_spec,
+ sizeof(*ah_spec));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_PFCP:
+ pfcp_spec = item->spec;
+ pfcp_mask = item->mask;
+
+ hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+ if (pfcp_spec && pfcp_mask) {
+ if (pfcp_mask->s_field == UINT8_MAX) {
+ input_set |= IAVF_INSET_PFCP_S_FIELD;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+ }
+
+ rte_memcpy(hdr->buffer, pfcp_spec,
+ sizeof(*pfcp_spec));
+ }
+
+ filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Protocol header layers exceed the maximum value");
+ return -rte_errno;
+ }
+
+ filter->input_set = input_set;
+
+ return 0;
+}
+
+static int
+iavf_fdir_parse(struct iavf_adapter *ad,
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct iavf_fdir_conf *filter = &vf->fdir.conf;
+ struct iavf_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+
+ item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+ if (!item)
+ return -rte_errno;
+
+ ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ goto error;
+
+ input_set = filter->input_set;
+ if (!input_set || input_set & ~item->input_set_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+ "Invalid input set");
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = iavf_fdir_parse_action(ad, actions, error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(item);
+ return ret;
+}
+
+static struct iavf_flow_parser iavf_fdir_parser = {
+ .engine = &iavf_fdir_engine,
+ .array = iavf_fdir_pattern,
+ .array_len = RTE_DIM(iavf_fdir_pattern),
+ .parse_pattern_action = iavf_fdir_parse,
+ .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fdir_engine_register)
+{
+ iavf_register_flow_engine(&iavf_fdir_engine);
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c
new file mode 100644
index 000000000..b6c26c4fd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c
@@ -0,0 +1,1044 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+
+static struct iavf_engine_list engine_list =
+ TAILQ_HEAD_INITIALIZER(engine_list);
+
+static int iavf_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int iavf_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int iavf_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error);
+
+const struct rte_flow_ops iavf_flow_ops = {
+ .validate = iavf_flow_validate,
+ .create = iavf_flow_create,
+ .destroy = iavf_flow_destroy,
+ .flush = iavf_flow_flush,
+ .query = iavf_flow_query,
+};
+
+/* empty */
+enum rte_flow_item_type iavf_pattern_empty[] = {
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* L2 */
+enum rte_flow_item_type iavf_pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* ARP */
+enum rte_flow_item_type iavf_pattern_eth_arp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_ICMP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_ICMP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_ICMP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ICMP6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ICMP6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ICMP6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* GTPU */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_GTP_PSC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_GTP_PSC,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_GTP_PSC,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_GTP_PSC,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_GTP_PSC,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_ICMP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* ESP */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* AH */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* L2TPV3 */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* PFCP */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ struct iavf_parser_list *parser_list,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+void
+iavf_register_flow_engine(struct iavf_flow_engine *engine)
+{
+ TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+int
+iavf_flow_init(struct iavf_adapter *ad)
+{
+ int ret;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ void *temp;
+ struct iavf_flow_engine *engine;
+
+ TAILQ_INIT(&vf->flow_list);
+ TAILQ_INIT(&vf->rss_parser_list);
+ TAILQ_INIT(&vf->dist_parser_list);
+ rte_spinlock_init(&vf->flow_ops_lock);
+
+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine->init == NULL) {
+ PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+ engine->type);
+ return -ENOTSUP;
+ }
+
+ ret = engine->init(ad);
+ if (ret && ret != -ENOTSUP) {
+ PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+ engine->type);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+void
+iavf_flow_uninit(struct iavf_adapter *ad)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct iavf_flow_engine *engine;
+ struct rte_flow *p_flow;
+ struct iavf_flow_parser_node *p_parser;
+ void *temp;
+
+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine->uninit)
+ engine->uninit(ad);
+ }
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
+ TAILQ_REMOVE(&vf->flow_list, p_flow, node);
+ if (p_flow->engine->free)
+ p_flow->engine->free(p_flow);
+ rte_free(p_flow);
+ }
+
+ /* Cleanup parser list */
+ while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
+ TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
+ rte_free(p_parser);
+ }
+
+ while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
+ TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
+ rte_free(p_parser);
+ }
+}
+
+int
+iavf_register_parser(struct iavf_flow_parser *parser,
+ struct iavf_adapter *ad)
+{
+ struct iavf_parser_list *list = NULL;
+ struct iavf_flow_parser_node *parser_node;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+
+ parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
+ if (parser_node == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return -ENOMEM;
+ }
+ parser_node->parser = parser;
+
+ if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
+ list = &vf->rss_parser_list;
+ TAILQ_INSERT_TAIL(list, parser_node, node);
+ } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
+ list = &vf->dist_parser_list;
+ TAILQ_INSERT_HEAD(list, parser_node, node);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+iavf_unregister_parser(struct iavf_flow_parser *parser,
+ struct iavf_adapter *ad)
+{
+ struct iavf_parser_list *list = NULL;
+ struct iavf_flow_parser_node *p_parser;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ void *temp;
+
+ if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
+ list = &vf->rss_parser_list;
+ else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+ list = &vf->dist_parser_list;
+
+ if (list == NULL)
+ return;
+
+ TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
+ if (p_parser->parser->engine->type == parser->engine->type) {
+ TAILQ_REMOVE(list, p_parser, node);
+ rte_free(p_parser);
+ }
+ }
+}
+
+static int
+iavf_flow_valid_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+iavf_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = iavf_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = iavf_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+iavf_match_pattern(enum rte_flow_item_type *item_array,
+ const struct rte_flow_item *pattern)
+{
+ const struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+struct iavf_pattern_match_item *
+iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ struct rte_flow_error *error)
+{
+ uint16_t i = 0;
+ struct iavf_pattern_match_item *pattern_match_item;
+ /* need free by each filter */
+ struct rte_flow_item *items; /* used for pattern without VOID items */
+ uint32_t item_num = 0; /* non-void item number */
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("iavf_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return NULL;
+ }
+ pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
+ sizeof(struct iavf_pattern_match_item), 0);
+ if (!pattern_match_item) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to allocate memory.");
+ return NULL;
+ }
+
+ iavf_pattern_skip_void_item(items, pattern);
+
+ for (i = 0; i < array_len; i++)
+ if (iavf_match_pattern(array[i].pattern_list,
+ items)) {
+ pattern_match_item->input_set_mask =
+ array[i].input_set_mask;
+ pattern_match_item->pattern_list =
+ array[i].pattern_list;
+ pattern_match_item->meta = array[i].meta;
+ rte_free(items);
+ return pattern_match_item;
+ }
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+
+ rte_free(items);
+ rte_free(pattern_match_item);
+ return NULL;
+}
+
+static struct iavf_flow_engine *
+iavf_parse_engine_create(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ struct iavf_parser_list *parser_list,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct iavf_flow_engine *engine = NULL;
+ struct iavf_flow_parser_node *parser_node;
+ void *temp;
+ void *meta = NULL;
+
+ TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
+ if (parser_node->parser->parse_pattern_action(ad,
+ parser_node->parser->array,
+ parser_node->parser->array_len,
+ pattern, actions, &meta, error) < 0)
+ continue;
+
+ engine = parser_node->parser->engine;
+
+ RTE_ASSERT(engine->create != NULL);
+ if (!(engine->create(ad, flow, meta, error)))
+ return engine;
+ }
+ return NULL;
+}
+
+static struct iavf_flow_engine *
+iavf_parse_engine_validate(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ struct iavf_parser_list *parser_list,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct iavf_flow_engine *engine = NULL;
+ struct iavf_flow_parser_node *parser_node;
+ void *temp;
+ void *meta = NULL;
+
+ TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
+ if (parser_node->parser->parse_pattern_action(ad,
+ parser_node->parser->array,
+ parser_node->parser->array_len,
+ pattern, actions, &meta, error) < 0)
+ continue;
+
+ engine = parser_node->parser->engine;
+ if (engine->validation == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Validation not support");
+ continue;
+ }
+
+ if (engine->validation(ad, flow, meta, error)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Validation failed");
+ break;
+ }
+ }
+ return engine;
+}
+
+
+static int
+iavf_flow_process_filter(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct iavf_flow_engine **engine,
+ parse_engine_t iavf_parse_engine,
+ struct rte_flow_error *error)
+{
+ int ret = IAVF_ERR_CONFIG;
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ ret = iavf_flow_valid_attr(attr, error);
+ if (ret)
+ return ret;
+
+ *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
+ actions, error);
+ if (*engine)
+ return 0;
+
+ *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
+ actions, error);
+
+ if (!*engine) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create parser engine.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+iavf_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct iavf_flow_engine *engine;
+
+ return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
+ &engine, iavf_parse_engine_validate, error);
+}
+
+static struct rte_flow *
+iavf_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct iavf_flow_engine *engine = NULL;
+ struct rte_flow *flow = NULL;
+ int ret;
+
+ flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
+ &engine, iavf_parse_engine_create, error);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to create flow");
+ rte_free(flow);
+ flow = NULL;
+ goto free_flow;
+ }
+
+ flow->engine = engine;
+ TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
+ PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
+
+free_flow:
+ rte_spinlock_unlock(&vf->flow_ops_lock);
+ return flow;
+}
+
+static bool
+iavf_flow_is_valid(struct rte_flow *flow)
+{
+ struct iavf_flow_engine *engine;
+ void *temp;
+
+ if (flow && flow->engine) {
+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine == flow->engine)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+iavf_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ int ret = 0;
+
+ if (!iavf_flow_is_valid(flow) || !flow->engine->destroy) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Invalid flow destroy");
+ return -rte_errno;
+ }
+
+ rte_spinlock_lock(&vf->flow_ops_lock);
+
+ ret = flow->engine->destroy(ad, flow, error);
+
+ if (!ret) {
+ TAILQ_REMOVE(&vf->flow_list, flow, node);
+ rte_free(flow);
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
+ }
+
+ rte_spinlock_unlock(&vf->flow_ops_lock);
+
+ return ret;
+}
+
+int
+iavf_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct rte_flow *p_flow;
+ void *temp;
+ int ret = 0;
+
+ TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
+ ret = iavf_flow_destroy(dev, p_flow, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to flush flows");
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+iavf_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = -EINVAL;
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_flow_query_count *count = data;
+
+ if (!iavf_flow_is_valid(flow) || !flow->engine->query_count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Invalid flow query");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow->engine->query_count(ad, flow, count, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ return ret;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h
new file mode 100644
index 000000000..978d0716b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _IAVF_GENERIC_FLOW_H_
+#define _IAVF_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+/* protocol */
+
+#define IAVF_PROT_MAC_INNER (1ULL << 1)
+#define IAVF_PROT_MAC_OUTER (1ULL << 2)
+#define IAVF_PROT_VLAN_INNER (1ULL << 3)
+#define IAVF_PROT_VLAN_OUTER (1ULL << 4)
+#define IAVF_PROT_IPV4_INNER (1ULL << 5)
+#define IAVF_PROT_IPV4_OUTER (1ULL << 6)
+#define IAVF_PROT_IPV6_INNER (1ULL << 7)
+#define IAVF_PROT_IPV6_OUTER (1ULL << 8)
+#define IAVF_PROT_TCP_INNER (1ULL << 9)
+#define IAVF_PROT_TCP_OUTER (1ULL << 10)
+#define IAVF_PROT_UDP_INNER (1ULL << 11)
+#define IAVF_PROT_UDP_OUTER (1ULL << 12)
+#define IAVF_PROT_SCTP_INNER (1ULL << 13)
+#define IAVF_PROT_SCTP_OUTER (1ULL << 14)
+#define IAVF_PROT_ICMP4_INNER (1ULL << 15)
+#define IAVF_PROT_ICMP4_OUTER (1ULL << 16)
+#define IAVF_PROT_ICMP6_INNER (1ULL << 17)
+#define IAVF_PROT_ICMP6_OUTER (1ULL << 18)
+#define IAVF_PROT_VXLAN (1ULL << 19)
+#define IAVF_PROT_NVGRE (1ULL << 20)
+#define IAVF_PROT_GTPU (1ULL << 21)
+#define IAVF_PROT_ESP (1ULL << 22)
+#define IAVF_PROT_AH (1ULL << 23)
+#define IAVF_PROT_L2TPV3OIP (1ULL << 24)
+#define IAVF_PROT_PFCP (1ULL << 25)
+
+
+/* field */
+
+#define IAVF_SMAC (1ULL << 63)
+#define IAVF_DMAC (1ULL << 62)
+#define IAVF_ETHERTYPE (1ULL << 61)
+#define IAVF_IP_SRC (1ULL << 60)
+#define IAVF_IP_DST (1ULL << 59)
+#define IAVF_IP_PROTO (1ULL << 58)
+#define IAVF_IP_TTL (1ULL << 57)
+#define IAVF_IP_TOS (1ULL << 56)
+#define IAVF_SPORT (1ULL << 55)
+#define IAVF_DPORT (1ULL << 54)
+#define IAVF_ICMP_TYPE (1ULL << 53)
+#define IAVF_ICMP_CODE (1ULL << 52)
+#define IAVF_VXLAN_VNI (1ULL << 51)
+#define IAVF_NVGRE_TNI (1ULL << 50)
+#define IAVF_GTPU_TEID (1ULL << 49)
+#define IAVF_GTPU_QFI (1ULL << 48)
+#define IAVF_ESP_SPI (1ULL << 47)
+#define IAVF_AH_SPI (1ULL << 46)
+#define IAVF_L2TPV3OIP_SESSION_ID (1ULL << 45)
+#define IAVF_PFCP_S_FIELD (1ULL << 44)
+#define IAVF_PFCP_SEID (1ULL << 43)
+
+/* input set */
+
+#define IAVF_INSET_NONE 0ULL
+
+/* non-tunnel */
+
+#define IAVF_INSET_SMAC (IAVF_PROT_MAC_OUTER | IAVF_SMAC)
+#define IAVF_INSET_DMAC (IAVF_PROT_MAC_OUTER | IAVF_DMAC)
+#define IAVF_INSET_VLAN_INNER (IAVF_PROT_VLAN_INNER)
+#define IAVF_INSET_VLAN_OUTER (IAVF_PROT_VLAN_OUTER)
+#define IAVF_INSET_ETHERTYPE (IAVF_ETHERTYPE)
+
+#define IAVF_INSET_IPV4_SRC \
+ (IAVF_PROT_IPV4_OUTER | IAVF_IP_SRC)
+#define IAVF_INSET_IPV4_DST \
+ (IAVF_PROT_IPV4_OUTER | IAVF_IP_DST)
+#define IAVF_INSET_IPV4_TOS \
+ (IAVF_PROT_IPV4_OUTER | IAVF_IP_TOS)
+#define IAVF_INSET_IPV4_PROTO \
+ (IAVF_PROT_IPV4_OUTER | IAVF_IP_PROTO)
+#define IAVF_INSET_IPV4_TTL \
+ (IAVF_PROT_IPV4_OUTER | IAVF_IP_TTL)
+#define IAVF_INSET_IPV6_SRC \
+ (IAVF_PROT_IPV6_OUTER | IAVF_IP_SRC)
+#define IAVF_INSET_IPV6_DST \
+ (IAVF_PROT_IPV6_OUTER | IAVF_IP_DST)
+#define IAVF_INSET_IPV6_NEXT_HDR \
+ (IAVF_PROT_IPV6_OUTER | IAVF_IP_PROTO)
+#define IAVF_INSET_IPV6_HOP_LIMIT \
+ (IAVF_PROT_IPV6_OUTER | IAVF_IP_TTL)
+#define IAVF_INSET_IPV6_TC \
+ (IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
+
+#define IAVF_INSET_TCP_SRC_PORT \
+ (IAVF_PROT_TCP_OUTER | IAVF_SPORT)
+#define IAVF_INSET_TCP_DST_PORT \
+ (IAVF_PROT_TCP_OUTER | IAVF_DPORT)
+#define IAVF_INSET_UDP_SRC_PORT \
+ (IAVF_PROT_UDP_OUTER | IAVF_SPORT)
+#define IAVF_INSET_UDP_DST_PORT \
+ (IAVF_PROT_UDP_OUTER | IAVF_DPORT)
+#define IAVF_INSET_SCTP_SRC_PORT \
+ (IAVF_PROT_SCTP_OUTER | IAVF_SPORT)
+#define IAVF_INSET_SCTP_DST_PORT \
+ (IAVF_PROT_SCTP_OUTER | IAVF_DPORT)
+#define IAVF_INSET_ICMP4_SRC_PORT \
+ (IAVF_PROT_ICMP4_OUTER | IAVF_SPORT)
+#define IAVF_INSET_ICMP4_DST_PORT \
+ (IAVF_PROT_ICMP4_OUTER | IAVF_DPORT)
+#define IAVF_INSET_ICMP6_SRC_PORT \
+ (IAVF_PROT_ICMP6_OUTER | IAVF_SPORT)
+#define IAVF_INSET_ICMP6_DST_PORT \
+ (IAVF_PROT_ICMP6_OUTER | IAVF_DPORT)
+#define IAVF_INSET_ICMP4_TYPE \
+ (IAVF_PROT_ICMP4_OUTER | IAVF_ICMP_TYPE)
+#define IAVF_INSET_ICMP4_CODE \
+ (IAVF_PROT_ICMP4_OUTER | IAVF_ICMP_CODE)
+#define IAVF_INSET_ICMP6_TYPE \
+ (IAVF_PROT_ICMP6_OUTER | IAVF_ICMP_TYPE)
+#define IAVF_INSET_ICMP6_CODE \
+ (IAVF_PROT_ICMP6_OUTER | IAVF_ICMP_CODE)
+#define IAVF_INSET_GTPU_TEID \
+ (IAVF_PROT_GTPU | IAVF_GTPU_TEID)
+#define IAVF_INSET_GTPU_QFI \
+ (IAVF_PROT_GTPU | IAVF_GTPU_QFI)
+#define IAVF_INSET_ESP_SPI \
+ (IAVF_PROT_ESP | IAVF_ESP_SPI)
+#define IAVF_INSET_AH_SPI \
+ (IAVF_PROT_AH | IAVF_AH_SPI)
+#define IAVF_INSET_L2TPV3OIP_SESSION_ID \
+ (IAVF_PROT_L2TPV3OIP | IAVF_L2TPV3OIP_SESSION_ID)
+#define IAVF_INSET_PFCP_S_FIELD \
+ (IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD)
+#define IAVF_INSET_PFCP_SEID \
+ (IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD | IAVF_PFCP_SEID)
+
+
+/* empty pattern */
+extern enum rte_flow_item_type iavf_pattern_empty[];
+
+/* L2 */
+extern enum rte_flow_item_type iavf_pattern_ethertype[];
+extern enum rte_flow_item_type iavf_pattern_ethertype_vlan[];
+extern enum rte_flow_item_type iavf_pattern_ethertype_qinq[];
+
+/* ARP */
+extern enum rte_flow_item_type iavf_pattern_eth_arp[];
+
+/* non-tunnel IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[];
+
+/* non-tunnel IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[];
+
+/* GTPU */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[];
+
+/* ESP */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[];
+
+/* AH */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[];
+
+/* L2TPV3 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[];
+
+/* PFCP */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[];
+
+
+extern const struct rte_flow_ops iavf_flow_ops;
+
+/* pattern structure */
+struct iavf_pattern_match_item {
+ enum rte_flow_item_type *pattern_list;
+ /* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */
+ uint64_t input_set_mask;
+ void *meta;
+};
+
+typedef int (*engine_init_t)(struct iavf_adapter *ad);
+typedef void (*engine_uninit_t)(struct iavf_adapter *ad);
+typedef int (*engine_validation_t)(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error);
+typedef int (*engine_create_t)(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error);
+
+/* engine types. */
+enum iavf_flow_engine_type {
+ IAVF_FLOW_ENGINE_NONE = 0,
+ IAVF_FLOW_ENGINE_FDIR,
+ IAVF_FLOW_ENGINE_HASH,
+ IAVF_FLOW_ENGINE_MAX,
+};
+
+/**
+ * classification stages.
+ * for non-pipeline mode, we have two classification stages: Distributor/RSS
+ * for pipeline-mode we have three classification stages:
+ * Permission/Distributor/RSS
+ */
+enum iavf_flow_classification_stage {
+ IAVF_FLOW_STAGE_NONE = 0,
+ IAVF_FLOW_STAGE_RSS,
+ IAVF_FLOW_STAGE_DISTRIBUTOR,
+ IAVF_FLOW_STAGE_MAX,
+};
+
+/* Struct to store engine created. */
+struct iavf_flow_engine {
+ TAILQ_ENTRY(iavf_flow_engine) node;
+ engine_init_t init;
+ engine_uninit_t uninit;
+ engine_validation_t validation;
+ engine_create_t create;
+ engine_destroy_t destroy;
+ engine_query_t query_count;
+ engine_free_t free;
+ enum iavf_flow_engine_type type;
+};
+
+TAILQ_HEAD(iavf_engine_list, iavf_flow_engine);
+
+/* Struct to store flow created. */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ struct iavf_flow_engine *engine;
+ void *rule;
+};
+
+struct iavf_flow_parser {
+ struct iavf_flow_engine *engine;
+ struct iavf_pattern_match_item *array;
+ uint32_t array_len;
+ parse_pattern_action_t parse_pattern_action;
+ enum iavf_flow_classification_stage stage;
+};
+
+/* Struct to store parser created. */
+struct iavf_flow_parser_node {
+ TAILQ_ENTRY(iavf_flow_parser_node) node;
+ struct iavf_flow_parser *parser;
+};
+
+void iavf_register_flow_engine(struct iavf_flow_engine *engine);
+int iavf_flow_init(struct iavf_adapter *ad);
+void iavf_flow_uninit(struct iavf_adapter *ad);
+int iavf_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+int iavf_register_parser(struct iavf_flow_parser *parser,
+ struct iavf_adapter *ad);
+void iavf_unregister_parser(struct iavf_flow_parser *parser,
+ struct iavf_adapter *ad);
+struct iavf_pattern_match_item *
+iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ struct rte_flow_error *error);
+#endif
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_hash.c b/src/spdk/dpdk/drivers/net/iavf/iavf_hash.c
new file mode 100644
index 000000000..af528863b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_hash.c
@@ -0,0 +1,1236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "iavf_log.h"
+#include "iavf.h"
+#include "iavf_generic_flow.h"
+
+enum iavf_pattern_hint_type {
+ IAVF_PATTERN_HINT_NONE,
+ IAVF_PATTERN_HINT_IPV4,
+ IAVF_PATTERN_HINT_IPV4_UDP,
+ IAVF_PATTERN_HINT_IPV4_TCP,
+ IAVF_PATTERN_HINT_IPV4_SCTP,
+ IAVF_PATTERN_HINT_IPV6,
+ IAVF_PATTERN_HINT_IPV6_UDP,
+ IAVF_PATTERN_HINT_IPV6_TCP,
+ IAVF_PATTERN_HINT_IPV6_SCTP,
+};
+
+struct iavf_pattern_match_type {
+ enum iavf_pattern_hint_type phint_type;
+};
+
+struct iavf_hash_match_type {
+ enum iavf_pattern_hint_type phint_type;
+ uint64_t hash_type;
+ struct virtchnl_proto_hdrs *proto_hdrs;
+};
+
+struct iavf_rss_meta {
+ struct virtchnl_proto_hdrs *proto_hdrs;
+ enum virtchnl_rss_algorithm rss_algorithm;
+};
+
+struct iavf_hash_flow_cfg {
+ struct virtchnl_rss_cfg *rss_cfg;
+ bool simple_xor;
+};
+
+static int
+iavf_hash_init(struct iavf_adapter *ad);
+static int
+iavf_hash_create(struct iavf_adapter *ad, struct rte_flow *flow, void *meta,
+ struct rte_flow_error *error);
+static int
+iavf_hash_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+ struct rte_flow_error *error);
+static void
+iavf_hash_uninit(struct iavf_adapter *ad);
+static void
+iavf_hash_free(struct rte_flow *flow);
+static int
+iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error);
+
+struct iavf_pattern_match_type phint_empty = {
+ IAVF_PATTERN_HINT_NONE};
+struct iavf_pattern_match_type phint_eth_ipv4 = {
+ IAVF_PATTERN_HINT_IPV4};
+struct iavf_pattern_match_type phint_eth_ipv4_udp = {
+ IAVF_PATTERN_HINT_IPV4_UDP};
+struct iavf_pattern_match_type phint_eth_ipv4_tcp = {
+ IAVF_PATTERN_HINT_IPV4_TCP};
+struct iavf_pattern_match_type phint_eth_ipv4_sctp = {
+ IAVF_PATTERN_HINT_IPV4_SCTP};
+struct iavf_pattern_match_type phint_eth_ipv4_gtpu_eh = {
+ IAVF_PATTERN_HINT_IPV4_UDP};
+struct iavf_pattern_match_type phint_eth_ipv4_esp = {
+ IAVF_PATTERN_HINT_IPV4};
+struct iavf_pattern_match_type phint_eth_ipv4_ah = {
+ IAVF_PATTERN_HINT_IPV4};
+struct iavf_pattern_match_type phint_eth_ipv4_l2tpv3 = {
+ IAVF_PATTERN_HINT_IPV4};
+struct iavf_pattern_match_type phint_eth_ipv4_pfcp = {
+ IAVF_PATTERN_HINT_IPV4_UDP};
+struct iavf_pattern_match_type phint_eth_ipv6 = {
+ IAVF_PATTERN_HINT_IPV6};
+struct iavf_pattern_match_type phint_eth_ipv6_udp = {
+ IAVF_PATTERN_HINT_IPV6_UDP};
+struct iavf_pattern_match_type phint_eth_ipv6_tcp = {
+ IAVF_PATTERN_HINT_IPV6_TCP};
+struct iavf_pattern_match_type phint_eth_ipv6_sctp = {
+ IAVF_PATTERN_HINT_IPV6_SCTP};
+struct iavf_pattern_match_type phint_eth_ipv6_esp = {
+ IAVF_PATTERN_HINT_IPV6};
+struct iavf_pattern_match_type phint_eth_ipv6_ah = {
+ IAVF_PATTERN_HINT_IPV6};
+struct iavf_pattern_match_type phint_eth_ipv6_l2tpv3 = {
+ IAVF_PATTERN_HINT_IPV6};
+struct iavf_pattern_match_type phint_eth_ipv6_pfcp = {
+ IAVF_PATTERN_HINT_IPV6_UDP};
+
+/**
+ * Supported pattern for hash.
+ * The first member is pattern item type,
+ * the second member is input set mask,
+ * the third member is pattern hint for hash.
+ */
+static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
+ {iavf_pattern_eth_ipv4, IAVF_INSET_NONE, &phint_eth_ipv4},
+ {iavf_pattern_eth_ipv4_udp, IAVF_INSET_NONE, &phint_eth_ipv4_udp},
+ {iavf_pattern_eth_ipv4_tcp, IAVF_INSET_NONE, &phint_eth_ipv4_tcp},
+ {iavf_pattern_eth_ipv4_sctp, IAVF_INSET_NONE, &phint_eth_ipv4_sctp},
+ {iavf_pattern_eth_ipv6, IAVF_INSET_NONE, &phint_eth_ipv6},
+ {iavf_pattern_eth_ipv4_gtpu_eh_ipv4, IAVF_INSET_NONE,
+ &phint_eth_ipv4_gtpu_eh},
+ {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_INSET_NONE,
+ &phint_eth_ipv4_gtpu_eh},
+ {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_INSET_NONE,
+ &phint_eth_ipv4_gtpu_eh},
+ {iavf_pattern_eth_ipv4_esp, IAVF_INSET_NONE, &phint_eth_ipv4_esp},
+ {iavf_pattern_eth_ipv4_ah, IAVF_INSET_NONE, &phint_eth_ipv4_ah},
+ {iavf_pattern_eth_ipv4_l2tpv3, IAVF_INSET_NONE,
+ &phint_eth_ipv4_l2tpv3},
+ {iavf_pattern_eth_ipv4_pfcp, IAVF_INSET_NONE, &phint_eth_ipv4_pfcp},
+ {iavf_pattern_eth_ipv6_udp, IAVF_INSET_NONE, &phint_eth_ipv6_udp},
+ {iavf_pattern_eth_ipv6_tcp, IAVF_INSET_NONE, &phint_eth_ipv6_tcp},
+ {iavf_pattern_eth_ipv6_sctp, IAVF_INSET_NONE, &phint_eth_ipv6_sctp},
+ {iavf_pattern_eth_ipv6_esp, IAVF_INSET_NONE, &phint_eth_ipv6_esp},
+ {iavf_pattern_eth_ipv6_ah, IAVF_INSET_NONE, &phint_eth_ipv6_ah},
+ {iavf_pattern_eth_ipv6_l2tpv3, IAVF_INSET_NONE,
+ &phint_eth_ipv6_l2tpv3},
+ {iavf_pattern_eth_ipv6_pfcp, IAVF_INSET_NONE, &phint_eth_ipv6_pfcp},
+ {iavf_pattern_empty, IAVF_INSET_NONE, &phint_empty},
+};
+
+#define GTP_EH_PDU_LINK_UP 1
+#define GTP_EH_PDU_LINK_DWN 0
+
+#define TUNNEL_LEVEL_OUTER 0
+#define TUNNEL_LEVEL_FIRST_INNER 1
+
+#define PROTO_COUNT_ONE 1
+#define PROTO_COUNT_TWO 2
+#define PROTO_COUNT_THREE 3
+
+#define BUFF_NOUSED 0
+#define FIELD_FOR_PROTO_ONLY 0
+
+#define proto_hint_eth_src { \
+ VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_eth_dst { \
+ VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_DST, {BUFF_NOUSED } }
+
+#define proto_hint_eth_only { \
+ VIRTCHNL_PROTO_HDR_ETH, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_eth { \
+ VIRTCHNL_PROTO_HDR_ETH, \
+ VIRTCHNL_PROTO_HDR_ETH_SRC | VIRTCHNL_PROTO_HDR_ETH_DST, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_svlan { \
+ VIRTCHNL_PROTO_HDR_S_VLAN, VIRTCHNL_PROTO_HDR_S_VLAN_ID, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_cvlan { \
+ VIRTCHNL_PROTO_HDR_C_VLAN, VIRTCHNL_PROTO_HDR_C_VLAN_ID, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_src { \
+ VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_dst { \
+ VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_DST, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4_only { \
+ VIRTCHNL_PROTO_HDR_IPV4, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_ipv4 { \
+ VIRTCHNL_PROTO_HDR_IPV4, \
+ VIRTCHNL_PROTO_HDR_IPV4_SRC | VIRTCHNL_PROTO_HDR_IPV4_DST, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_udp_src_port { \
+ VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_udp_dst_port { \
+ VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_udp_only { \
+ VIRTCHNL_PROTO_HDR_UDP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_udp { \
+ VIRTCHNL_PROTO_HDR_UDP, \
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT | VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_tcp_src_port { \
+ VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_tcp_dst_port { \
+ VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_tcp_only { \
+ VIRTCHNL_PROTO_HDR_TCP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_tcp { \
+ VIRTCHNL_PROTO_HDR_TCP, \
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT | VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_sctp_src_port { \
+ VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_sctp_dst_port { \
+ VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_sctp_only { \
+ VIRTCHNL_PROTO_HDR_SCTP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_sctp { \
+ VIRTCHNL_PROTO_HDR_SCTP, \
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT | VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_src { \
+ VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_SRC, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_dst { \
+ VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_DST, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6_only { \
+ VIRTCHNL_PROTO_HDR_IPV6, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_ipv6 { \
+ VIRTCHNL_PROTO_HDR_IPV6, \
+ VIRTCHNL_PROTO_HDR_IPV6_SRC | VIRTCHNL_PROTO_HDR_IPV6_DST, \
+ {BUFF_NOUSED } }
+
+#define proto_hint_gtpu_up_only { \
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, \
+ FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_gtpu_dwn_only { \
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, \
+ FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } }
+
+#define proto_hint_esp { \
+ VIRTCHNL_PROTO_HDR_ESP, \
+ VIRTCHNL_PROTO_HDR_ESP_SPI, {BUFF_NOUSED } }
+
+#define proto_hint_ah { \
+ VIRTCHNL_PROTO_HDR_AH, \
+ VIRTCHNL_PROTO_HDR_AH_SPI, {BUFF_NOUSED } }
+
+#define proto_hint_l2tpv3 { \
+ VIRTCHNL_PROTO_HDR_L2TPV3, \
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, {BUFF_NOUSED } }
+
+#define proto_hint_pfcp { \
+ VIRTCHNL_PROTO_HDR_PFCP, VIRTCHNL_PROTO_HDR_PFCP_SEID, {BUFF_NOUSED } }
+
+struct virtchnl_proto_hdrs hdrs_hint_eth_src = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_src }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_eth_dst = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_dst }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_eth = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_svlan = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_svlan }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_cvlan = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_cvlan }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_src }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_dst }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_gtpu_up = {
+ TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_up_only,
+ proto_hint_ipv4_src }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_gtpu_dwn = {
+ TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_dwn_only,
+ proto_hint_ipv4_dst }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_esp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_esp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_ah = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_ah }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_l2tpv3 = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_l2tpv3 }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_pfcp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_pfcp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4 = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4 }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_udp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+ proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_udp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+ proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_udp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+ proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_udp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+ proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_udp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_udp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_udp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+ proto_hint_udp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_tcp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+ proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_tcp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+ proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_tcp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+ proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_tcp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+ proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_tcp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_tcp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_tcp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+ proto_hint_tcp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_sctp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+ proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_sctp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src,
+ proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_sctp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+ proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_sctp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst,
+ proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_sctp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_sctp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only,
+ proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv4_sctp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4,
+ proto_hint_sctp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_src }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_dst }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_esp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+ proto_hint_esp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_ah = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+ proto_hint_ah }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_l2tpv3 = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+ proto_hint_l2tpv3 }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_pfcp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only,
+ proto_hint_pfcp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6 = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6 }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_udp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+ proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_udp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+ proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_udp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+ proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_udp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+ proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_udp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+ proto_hint_udp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_udp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+ proto_hint_udp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_udp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+ proto_hint_udp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_tcp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+ proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_tcp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+ proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_tcp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+ proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_tcp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+ proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_tcp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+ proto_hint_tcp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_tcp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+ proto_hint_tcp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_tcp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+ proto_hint_tcp }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_sctp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+ proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_sctp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src,
+ proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_sctp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+ proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_sctp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst,
+ proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_sctp_src_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+ proto_hint_sctp_src_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_sctp_dst_port = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only,
+ proto_hint_sctp_dst_port }
+};
+
+struct virtchnl_proto_hdrs hdrs_hint_ipv6_sctp = {
+ TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6,
+ proto_hint_sctp }
+};
+
+/**
+ * The first member is pattern hint type,
+ * the second member is hash type,
+ * the third member is virtchnl protocol hdrs.
+ */
+struct iavf_hash_match_type iavf_hash_type_list[] = {
+ /* IPV4 */
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L2_SRC_ONLY, &hdrs_hint_eth_src},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L2_DST_ONLY, &hdrs_hint_eth_dst},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ETH | ETH_RSS_L2_SRC_ONLY,
+ &hdrs_hint_eth_src},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ETH | ETH_RSS_L2_DST_ONLY,
+ &hdrs_hint_eth_dst},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ETH, &hdrs_hint_eth},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_S_VLAN, &hdrs_hint_svlan},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_C_VLAN, &hdrs_hint_cvlan},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ESP, &hdrs_hint_ipv4_esp},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_AH, &hdrs_hint_ipv4_ah},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L2TPV3, &hdrs_hint_ipv4_l2tpv3},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_IPV4, &hdrs_hint_ipv4},
+ /* IPV4 UDP */
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_src_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_src_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU,
+ &hdrs_hint_ipv4_src_gtpu_up},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_dst_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_dst_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU,
+ &hdrs_hint_ipv4_dst_gtpu_dwn},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_src_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_src_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU,
+ &hdrs_hint_ipv4_src_gtpu_up},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_dst_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_dst_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU,
+ &hdrs_hint_ipv4_dst_gtpu_dwn},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_PFCP,
+ &hdrs_hint_ipv4_pfcp},
+ {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP,
+ &hdrs_hint_ipv4_udp},
+ /* IPV4 TCP */
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_src_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_src_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_dst_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_dst_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_src_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_src_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_dst_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_dst_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP,
+ &hdrs_hint_ipv4_tcp},
+ /* IPV4 SCTP */
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_src_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_src_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_dst_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_dst_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_src_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_src_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP |
+ ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_dst_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_dst_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP |
+ ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv4_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv4_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP,
+ &hdrs_hint_ipv4_sctp},
+ /* IPV6 */
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L2_SRC_ONLY, &hdrs_hint_eth_src},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L2_DST_ONLY, &hdrs_hint_eth_dst},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L2_SRC_ONLY,
+ &hdrs_hint_eth_src},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L2_DST_ONLY,
+ &hdrs_hint_eth_dst},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_ETH, &hdrs_hint_eth},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_S_VLAN, &hdrs_hint_svlan},
+ {IAVF_PATTERN_HINT_IPV4, ETH_RSS_C_VLAN, &hdrs_hint_cvlan},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_ESP, &hdrs_hint_ipv6_esp},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_AH, &hdrs_hint_ipv6_ah},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L2TPV3, &hdrs_hint_ipv6_l2tpv3},
+ {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6, &hdrs_hint_ipv6},
+ /* IPV6 UDP */
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_src_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_src_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_dst_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_dst_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_src_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_src_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_dst_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_dst_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_udp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_udp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_PFCP,
+ &hdrs_hint_ipv6_pfcp},
+ {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP,
+ &hdrs_hint_ipv6_udp},
+ /* IPV6 TCP */
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_src_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_src_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_dst_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_dst_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_src_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_src_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_dst_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_dst_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_tcp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_tcp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP,
+ &hdrs_hint_ipv6_tcp},
+ /* IPV6 SCTP */
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_src_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_src_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_SRC_ONLY,
+ &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_dst_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_dst_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_DST_ONLY,
+ &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_src_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP |
+ ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_src_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP |
+ ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_dst_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP |
+ ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_dst_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP |
+ ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L4_SRC_ONLY,
+ &hdrs_hint_ipv6_sctp_src_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L4_DST_ONLY,
+ &hdrs_hint_ipv6_sctp_dst_port},
+ {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP,
+ &hdrs_hint_ipv6_sctp},
+};
+
+struct virtchnl_proto_hdrs *iavf_hash_default_hdrs[] = {
+ &hdrs_hint_ipv4,
+ &hdrs_hint_ipv4_udp,
+ &hdrs_hint_ipv4_tcp,
+ &hdrs_hint_ipv4_sctp,
+ &hdrs_hint_ipv6,
+ &hdrs_hint_ipv6_udp,
+ &hdrs_hint_ipv6_tcp,
+ &hdrs_hint_ipv6_sctp,
+};
+
+static struct iavf_flow_engine iavf_hash_engine = {
+ .init = iavf_hash_init,
+ .create = iavf_hash_create,
+ .destroy = iavf_hash_destroy,
+ .uninit = iavf_hash_uninit,
+ .free = iavf_hash_free,
+ .type = IAVF_FLOW_ENGINE_HASH,
+};
+
+/* Register parser for comms package. */
+static struct iavf_flow_parser iavf_hash_parser = {
+ .engine = &iavf_hash_engine,
+ .array = iavf_hash_pattern_list,
+ .array_len = RTE_DIM(iavf_hash_pattern_list),
+ .parse_pattern_action = iavf_hash_parse_pattern_action,
+ .stage = IAVF_FLOW_STAGE_RSS,
+};
+
+static int
+iavf_hash_default_set(struct iavf_adapter *ad, bool add)
+{
+ struct virtchnl_rss_cfg *rss_cfg;
+ uint16_t i;
+ int ret;
+
+ rss_cfg = rte_zmalloc("iavf rss rule",
+ sizeof(struct virtchnl_rss_cfg), 0);
+ if (!rss_cfg)
+ return -ENOMEM;
+
+ for (i = 0; i < RTE_DIM(iavf_hash_default_hdrs); i++) {
+ rss_cfg->proto_hdrs = *iavf_hash_default_hdrs[i];
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+
+ ret = iavf_add_del_rss_cfg(ad, rss_cfg, add);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to %s RSS configure",
+ add ? "add" : "delete");
+ rte_free(rss_cfg);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+RTE_INIT(iavf_hash_engine_init)
+{
+ struct iavf_flow_engine *engine = &iavf_hash_engine;
+
+ iavf_register_flow_engine(engine);
+}
+
+static int
+iavf_hash_init(struct iavf_adapter *ad)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct iavf_flow_parser *parser;
+ int ret;
+
+ if (!vf->vf_res)
+ return -EINVAL;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF))
+ return -ENOTSUP;
+
+ parser = &iavf_hash_parser;
+
+ ret = iavf_register_parser(parser, ad);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to register hash parser");
+ return ret;
+ }
+
+ ret = iavf_hash_default_set(ad, true);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to set default RSS");
+ iavf_unregister_parser(parser, ad);
+ }
+
+ return ret;
+}
+
+static int
+iavf_hash_check_inset(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = pattern;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t
+iavf_hash_refine_type(uint64_t rss_type, const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item *item;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
+ const struct rte_flow_item_gtp_psc *psc = item->spec;
+
+ if (psc && (psc->pdu_type == GTP_EH_PDU_LINK_UP ||
+ psc->pdu_type == GTP_EH_PDU_LINK_DWN)) {
+ rss_type |= ETH_RSS_GTPU;
+ }
+ }
+ }
+
+ return rss_type;
+}
+
+static int
+iavf_hash_parse_action(struct iavf_pattern_match_item *pattern_match_item,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta, struct rte_flow_error *error)
+{
+ struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)*meta;
+ uint32_t type_list_len = RTE_DIM(iavf_hash_type_list);
+ struct iavf_hash_match_type *type_match_item;
+ enum rte_flow_action_type action_type;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action *action;
+ bool item_found = false;
+ uint64_t rss_type;
+ uint16_t i;
+
+ struct iavf_pattern_match_type *tt = (struct iavf_pattern_match_type *)
+ (pattern_match_item->meta);
+
+ /* Supported action is RSS. */
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = action->conf;
+ rss_type = rss->types;
+
+ /**
+ * Check simultaneous use of SRC_ONLY and DST_ONLY
+ * of the same level.
+ */
+ rss_type = rte_eth_rss_hf_refine(rss_type);
+
+ /**
+ * Refine the hash type base on some specific item of
+ * the pattern, such as identify the gtpu hash.
+ */
+ rss_type = iavf_hash_refine_type(rss_type, pattern);
+
+ /* Check if pattern is empty. */
+ if (pattern_match_item->pattern_list !=
+ iavf_pattern_empty && rss->func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Not supported flow");
+
+ if (rss->level)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "a nonzero RSS encapsulation level is not supported");
+
+ if (rss->key_len)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "a nonzero RSS key_len is not supported");
+
+ if (rss->queue_num)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "a non-NULL RSS queue is not supported");
+
+ /* Check hash function and save it to rss_meta. */
+ if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+ rss_meta->rss_algorithm =
+ VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC;
+ else if (rss->func ==
+ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ rss_meta->rss_algorithm =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ else
+ rss_meta->rss_algorithm =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+
+ type_match_item =
+ rte_zmalloc("iavf_type_match_item",
+ sizeof(struct iavf_hash_match_type), 0);
+ if (!type_match_item) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "No memory for type_match_item");
+ return -ENOMEM;
+ }
+
+ /* Find matched proto hdrs according to hash type. */
+ for (i = 0; i < type_list_len; i++) {
+ struct iavf_hash_match_type *ht_map =
+ &iavf_hash_type_list[i];
+ if (rss_type == ht_map->hash_type &&
+ tt->phint_type == ht_map->phint_type) {
+ type_match_item->hash_type =
+ ht_map->hash_type;
+ type_match_item->proto_hdrs =
+ ht_map->proto_hdrs;
+ rss_meta->proto_hdrs =
+ type_match_item->proto_hdrs;
+ item_found = true;
+ }
+ }
+
+ rte_free(type_match_item);
+
+ if (!item_found)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Not supported flow");
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_END:
+ break;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_pattern_match_item *pattern_match_item;
+ struct iavf_rss_meta *rss_meta_ptr;
+ int ret = 0;
+
+ rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
+ if (!rss_meta_ptr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "No memory for rss_meta_ptr");
+ return -ENOMEM;
+ }
+
+ /* Check rss supported pattern and find matched pattern. */
+ pattern_match_item =
+ iavf_search_pattern_match_item(pattern, array, array_len,
+ error);
+ if (!pattern_match_item) {
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = iavf_hash_check_inset(pattern, error);
+ if (ret)
+ goto error;
+
+ /* Check rss action. */
+ ret = iavf_hash_parse_action(pattern_match_item, pattern, actions,
+ (void **)&rss_meta_ptr, error);
+
+error:
+ if (!ret && meta)
+ *meta = rss_meta_ptr;
+ else
+ rte_free(rss_meta_ptr);
+
+ rte_free(pattern_match_item);
+
+ return ret;
+}
+
+static int
+iavf_hash_create(__rte_unused struct iavf_adapter *ad,
+ __rte_unused struct rte_flow *flow, void *meta,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)meta;
+ struct virtchnl_rss_cfg *rss_cfg;
+ int ret = 0;
+
+ rss_cfg = rte_zmalloc("iavf rss rule",
+ sizeof(struct virtchnl_rss_cfg), 0);
+ if (!rss_cfg) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "No memory for rss rule");
+ return -ENOMEM;
+ }
+
+ rss_cfg->proto_hdrs = *rss_meta->proto_hdrs;
+ rss_cfg->rss_algorithm = rss_meta->rss_algorithm;
+
+ ret = iavf_add_del_rss_cfg(ad, rss_cfg, true);
+ if (!ret) {
+ flow->rule = rss_cfg;
+ } else {
+ PMD_DRV_LOG(ERR, "fail to add RSS configure");
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to add rss rule.");
+ rte_free(rss_cfg);
+ return -rte_errno;
+ }
+
+ rte_free(meta);
+
+ return ret;
+}
+
+static int
+iavf_hash_destroy(__rte_unused struct iavf_adapter *ad,
+ struct rte_flow *flow,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct virtchnl_rss_cfg *rss_cfg;
+ int ret = 0;
+
+ rss_cfg = (struct virtchnl_rss_cfg *)flow->rule;
+
+ ret = iavf_add_del_rss_cfg(ad, rss_cfg, false);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to del RSS configure");
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to delete rss rule.");
+ return -rte_errno;
+ }
+ return ret;
+}
+
+static void
+iavf_hash_uninit(struct iavf_adapter *ad)
+{
+ if (iavf_hash_default_set(ad, false))
+ PMD_DRV_LOG(ERR, "fail to delete default RSS");
+
+ iavf_unregister_parser(&iavf_hash_parser, ad);
+}
+
+static void
+iavf_hash_free(struct rte_flow *flow)
+{
+ rte_free(flow->rule);
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_log.h b/src/spdk/dpdk/drivers/net/iavf/iavf_log.h
new file mode 100644
index 000000000..1088ec75f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_log.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IAVF_LOG_H_
+#define _IAVF_LOG_H_
+
+extern int iavf_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, iavf_logtype_init, "%s(): " fmt "\n", \
+ __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int iavf_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, iavf_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, " >>")
+
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_RX
+extern int iavf_logtype_rx;
+#define PMD_RX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, iavf_logtype_rx, \
+ "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+extern int iavf_logtype_tx;
+#define PMD_TX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, iavf_logtype_tx, \
+ "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE
+extern int iavf_logtype_tx_free;
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, iavf_logtype_tx_free, \
+ "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _IAVF_LOG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c
new file mode 100644
index 000000000..05a7dd898
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c
@@ -0,0 +1,2869 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+ /* The following constraints must be satisfied:
+ * thresh < rxq->nb_rx_desc
+ */
+ if (thresh >= nb_desc) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+ thresh, nb_desc);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+ uint16_t tx_free_thresh)
+{
+ /* TX descriptors will have their RS bit set after tx_rs_thresh
+ * descriptors have been used. The TX descriptor ring will be cleaned
+ * after tx_free_thresh descriptors are used or if the number of
+ * descriptors required to transmit a packet is greater than the
+ * number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 2",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 3.",
+ tx_free_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+ "equal to tx_free_thresh (%u).",
+ tx_rs_thresh, tx_free_thresh);
+ return -EINVAL;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+ "number of TX descriptors (%u).",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool
+check_rx_vec_allow(struct iavf_rx_queue *rxq)
+{
+ if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
+ rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
+ PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
+ return true;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
+ return false;
+}
+
+static inline bool
+check_tx_vec_allow(struct iavf_tx_queue *txq)
+{
+ if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
+ txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
+ txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
+ PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
+ return true;
+ }
+ PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
+ return false;
+}
+
+static inline bool
+check_rx_bulk_allow(struct iavf_rx_queue *rxq)
+{
+ int ret = true;
+
+ if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "IAVF_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
+ ret = false;
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = false;
+ }
+ return ret;
+}
+
+static inline void
+reset_rx_queue(struct iavf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < IAVF_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+ /* for rx bulk */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+static inline void
+reset_tx_queue(struct iavf_tx_queue *txq)
+{
+ struct iavf_tx_entry *txe;
+ uint32_t i, size;
+ uint16_t prev;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i].cmd_type_offset_bsz =
+ rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_used = 0;
+
+ txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+ txq->nb_free = txq->nb_tx_desc - 1;
+
+ txq->next_dd = txq->rs_thresh - 1;
+ txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
+{
+ volatile union iavf_rx_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct iavf_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (!rxq->sw_ring)
+ return;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+ }
+
+ /* for rx bulk */
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mbuf);
+ }
+ rxq->rx_nb_avail = 0;
+}
+
+static inline void
+release_txq_mbufs(struct iavf_tx_queue *txq)
+{
+ uint16_t i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static const struct iavf_rxq_ops def_rxq_ops = {
+ .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct iavf_txq_ops def_txq_ops = {
+ .release_mbufs = release_txq_mbufs,
+};
+
+int
+iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf =
+ IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_vsi *vsi = &vf->vsi;
+ struct iavf_rx_queue *rxq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t len;
+ uint16_t rx_free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IAVF_MAX_RING_DESC ||
+ nb_desc < IAVF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Check free threshold */
+ rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+ IAVF_DEFAULT_RX_FREE_THRESH :
+ rx_conf->rx_free_thresh;
+ if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
+ return -EINVAL;
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("iavf rxq",
+ sizeof(struct iavf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure");
+ return -ENOMEM;
+ }
+
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
+ rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
+ } else {
+ rxq->rxdid = IAVF_RXDID_LEGACY_1;
+ }
+
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = 0; /* crc stripping by default */
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->rx_hdr_len = 0;
+ rxq->vsi = vsi;
+
+ len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
+
+ /* Allocate the software ring. */
+ len = nb_desc + IAVF_RX_MAX_BURST;
+ rxq->sw_ring =
+ rte_zmalloc_socket("iavf rx sw ring",
+ sizeof(struct rte_mbuf *) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate the maximun number of RX ring hardware descriptor with
+ * a liitle more to support bulk allocate.
+ */
+ len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
+ IAVF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, IAVF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ rxq->rx_ring_phys_addr = mz->iova;
+ rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
+
+ rxq->mz = mz;
+ reset_rx_queue(rxq);
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
+ rxq->ops = &def_rxq_ops;
+
+ if (check_rx_bulk_allow(rxq) == true) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested "
+ "on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+
+ if (check_rx_vec_allow(rxq) == false)
+ ad->rx_vec_allowed = false;
+
+ return 0;
+}
+
+int
+iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_tx_queue *txq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IAVF_MAX_RING_DESC ||
+ nb_desc < IAVF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("iavf txq",
+ sizeof(struct iavf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->rs_thresh = tx_rs_thresh;
+ txq->free_thresh = tx_free_thresh;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("iavf tx sw ring",
+ sizeof(struct iavf_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, IAVF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
+
+ txq->mz = mz;
+ reset_tx_queue(txq);
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
+ txq->ops = &def_txq_ops;
+
+ if (check_tx_vec_allow(txq) == false) {
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ ad->tx_vec_allowed = false;
+ }
+
+ return 0;
+}
+
+int
+iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_rx_queue *rxq;
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = alloc_rxq_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ IAVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ else
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+int
+iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_tx_queue *txq;
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Init the RX tail register. */
+ IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IAVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+int
+iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_rx_queue *rxq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ err = iavf_switch_queue(adapter, rx_queue_id, true, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxq->ops->release_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_tx_queue *txq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ err = iavf_switch_queue(adapter, tx_queue_id, false, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txq->ops->release_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+iavf_dev_rx_queue_release(void *rxq)
+{
+ struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
+
+ if (!q)
+ return;
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+void
+iavf_dev_tx_queue_release(void *txq)
+{
+ struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+
+ if (!q)
+ return;
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+void
+iavf_stop_queues(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_rx_queue *rxq;
+ struct iavf_tx_queue *txq;
+ int ret, i;
+
+ /* Stop All queues */
+ ret = iavf_disable_queues(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ txq->ops->release_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ rxq->ops->release_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+}
+
+static inline void
+iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ } else {
+ mb->vlan_tci = 0;
+ }
+}
+
+static inline void
+iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
+ volatile union iavf_rx_flex_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.l2tag1);
+ } else {
+ mb->vlan_tci = 0;
+ }
+}
+
+/* Translate the rx descriptor status and error fields to pkt flags */
+static inline uint64_t
+iavf_rxd_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags;
+ uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
+
+#define IAVF_RX_ERR_BITS 0x3f
+
+ /* Check if RSS_HASH */
+ flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+
+ /* Check if FDIR Match */
+ flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
+ PKT_RX_FDIR : 0);
+
+ if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return flags;
+ }
+
+ if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* TODO: Oversize error bit is not processed here */
+
+ return flags;
+}
+
+static inline uint64_t
+iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+ uint64_t flags = 0;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ uint16_t flexbh;
+
+ flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+
+ if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+ }
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+#endif
+ return flags;
+}
+
+
+/* Translate the rx flex descriptor status to pkt flags */
+static inline void
+iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
+ volatile union iavf_rx_flex_desc *rxdp)
+{
+ volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
+ (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+#endif
+
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+}
+
+#define IAVF_RX_FLEX_ERR0_BITS \
+ ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
+
+/* Rx L3/L4 checksum */
+static inline uint64_t
+iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
+{
+ uint64_t flags = 0;
+
+ /* check if HW has decoded the packet and checksum */
+ if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
+ return 0;
+
+ if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return flags;
+ }
+
+ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
+ flags |= PKT_RX_EIP_CKSUM_BAD;
+
+ return flags;
+}
+
+/* If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situation
+ * from the hardware point of view.
+ */
+static inline void
+iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
+{
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG,
+ "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold);
+ rx_id = (uint16_t)((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+}
+
+/* implement recv_pkts */
+uint16_t
+iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ volatile union iavf_rx_desc *rx_ring;
+ volatile union iavf_rx_desc *rxdp;
+ struct iavf_rx_queue *rxq;
+ union iavf_rx_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_eth_dev *dev;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+ const uint32_t *ptype_tbl;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+ IAVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit first */
+ if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+ rxm = rxe;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+ rxm->ol_flags = 0;
+ iavf_rxd_to_vlan_tci(rxm, &rxd);
+ pkt_flags = iavf_rxd_to_pkt_flags(qword1);
+ rxm->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
+
+ rxm->ol_flags |= pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+ return nb_rx;
+}
+
+/* implement recv_pkts for flexible Rx descriptor */
+uint16_t
+iavf_recv_pkts_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ volatile union iavf_rx_desc *rx_ring;
+ volatile union iavf_rx_flex_desc *rxdp;
+ struct iavf_rx_queue *rxq;
+ union iavf_rx_flex_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_eth_dev *dev;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint16_t rx_stat_err0;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+ const uint32_t *ptype_tbl;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
+ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+ /* Check the DD bit first */
+ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+ break;
+ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+ rxm = rxe;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
+ IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+ rxm->ol_flags = 0;
+ rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+ iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+ iavf_rxd_to_pkt_fields(rxm, &rxd);
+ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ rxm->ol_flags |= pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+ return nb_rx;
+}
+
+/* implement recv_scattered_pkts for flexible Rx descriptor */
+uint16_t
+iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ union iavf_rx_flex_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
+ uint16_t rx_stat_err0;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union iavf_rx_flex_desc *rxdp;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
+ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+ /* Check the DD bit */
+ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+ break;
+ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+
+ rxm = rxe;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
+ IAVF_RX_FLX_DESC_PKT_LEN_M;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /* If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /* This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else {
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ RTE_ETHER_CRC_LEN);
+ }
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+ iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+ iavf_rxd_to_pkt_fields(first_seg, &rxd);
+ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+ return nb_rx;
+}
+
+/* implement recv_scattered_pkts */
+uint16_t
+iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ union iavf_rx_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union iavf_rx_desc *rxdp;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+ IAVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit */
+ if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+
+ rxm = rxe;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /* If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /* This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ RTE_ETHER_CRC_LEN);
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ iavf_rxd_to_vlan_tci(first_seg, &rxd);
+ pkt_flags = iavf_rxd_to_pkt_flags(qword1);
+ first_seg->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ first_seg->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
+
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+ return nb_rx;
+}
+
+#define IAVF_LOOK_AHEAD 8
+static inline int
+iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
+{
+ volatile union iavf_rx_flex_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t stat_err0;
+ uint16_t pkt_len;
+ int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+ return 0;
+
+ /* Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
+ rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
+ s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
+ rxq->rx_tail +
+ i * IAVF_LOOK_AHEAD + j);
+
+ mb = rxep[j];
+ pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
+ IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->ol_flags = 0;
+
+ mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
+ iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+ iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
+ stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
+ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+
+ mb->ol_flags |= pkt_flags;
+ }
+
+ for (j = 0; j < IAVF_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j];
+
+ if (nb_dd != IAVF_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i] = NULL;
+
+ return nb_rx;
+}
+
+static inline int
+iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
+{
+ volatile union iavf_rx_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t qword1;
+ uint32_t rx_status;
+ int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+ IAVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
+ rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
+ qword1 = rte_le_to_cpu_64(
+ rxdp[j].wb.qword1.status_error_len);
+ s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+ IAVF_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
+ rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
+
+ mb = rxep[j];
+ qword1 = rte_le_to_cpu_64
+ (rxdp[j].wb.qword1.status_error_len);
+ pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->ol_flags = 0;
+ iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
+ pkt_flags = iavf_rxd_to_pkt_flags(qword1);
+ mb->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ IAVF_RXD_QW1_PTYPE_MASK) >>
+ IAVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.qword0.hi_dword.rss);
+
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
+
+ mb->ol_flags |= pkt_flags;
+ }
+
+ for (j = 0; j < IAVF_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j];
+
+ if (nb_dd != IAVF_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i] = NULL;
+
+ return nb_rx;
+}
+
+static inline uint16_t
+iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ for (i = 0; i < nb_pkts; i++)
+ rx_pkts[i] = stage[i];
+
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline int
+iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
+{
+ volatile union iavf_rx_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx, i;
+ uint64_t dma_addr;
+ int diag;
+
+ /* Allocate buffers in bulk */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0)) {
+ PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
+ return -ENOMEM;
+ }
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; i++) {
+ if (likely(i < (rxq->rx_free_thresh - 1)))
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxep[i + 1]);
+
+ mb = rxep[i];
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update rx tail register */
+ rte_wmb();
+ IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+
+ rxq->rx_free_trigger =
+ (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ return 0;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ if (!nb_pkts)
+ return 0;
+
+ if (rxq->rx_nb_avail)
+ return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
+ nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
+ else
+ nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (iavf_rx_alloc_bufs(rxq) != 0) {
+ uint16_t i, j;
+
+ /* TODO: count rx_mbuf_alloc_failed here */
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+ rxq->sw_ring[j] = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rxq->rx_tail, nb_rx);
+
+ if (rxq->rx_nb_avail)
+ return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+iavf_recv_pkts_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx = 0, n, count;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
+ count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + count);
+ nb_pkts = (uint16_t)(nb_pkts - count);
+ if (count < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+static inline int
+iavf_xmit_cleanup(struct iavf_tx_queue *txq)
+{
+ struct iavf_tx_entry *sw_ring = txq->sw_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ volatile struct iavf_tx_desc *txd = txq->tx_ring;
+
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ return -1;
+ }
+
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
+
+ return 0;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+iavf_calc_context_desc(uint64_t flags)
+{
+ static uint64_t mask = PKT_TX_TCP_SEG;
+
+ return (flags & mask) ? 1 : 0;
+}
+
+static inline void
+iavf_txd_enable_checksum(uint64_t ol_flags,
+ uint32_t *td_cmd,
+ uint32_t *td_offset,
+ union iavf_tx_offload tx_offload)
+{
+ /* Set MACLEN */
+ *td_offset |= (tx_offload.l2_len >> 1) <<
+ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L3 checksum offloads */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/* set TSO context descriptor
+ * support IP -> L4 and IP -> IP -> L4
+ */
+static inline uint64_t
+iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_TX_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ hdr_len = tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = IAVF_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset <<
+ IAVF_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size <<
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag <<
+ IAVF_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/* TX function */
+uint16_t
+iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ volatile struct iavf_tx_desc *txd;
+ volatile struct iavf_tx_desc *txr;
+ struct iavf_tx_queue *txq;
+ struct iavf_tx_entry *sw_ring;
+ struct iavf_tx_entry *txe, *txn;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint16_t tx_id;
+ uint16_t nb_tx;
+ uint32_t td_cmd;
+ uint32_t td_offset;
+ uint32_t td_tag;
+ uint64_t ol_flags;
+ uint16_t nb_used;
+ uint16_t nb_ctx;
+ uint16_t tx_last;
+ uint16_t slen;
+ uint64_t buf_dma_addr;
+ union iavf_tx_offload tx_offload = {0};
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Check if the descriptor ring needs to be cleaned. */
+ if (txq->nb_free < txq->free_thresh)
+ iavf_xmit_cleanup(txq);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
+
+ tx_pkt = *tx_pkts++;
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ ol_flags = tx_pkt->ol_flags;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+ /* Calculate the number of context descriptors needed. */
+ nb_ctx = iavf_calc_context_desc(ol_flags);
+
+ /* The number of descriptors that must be allocated for
+ * a packet equals to the number of the segments of that
+ * packet plus 1 context descriptor if needed.
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
+ " tx_first=%u tx_last=%u",
+ txq->port_id, txq->queue_id, tx_id, tx_last);
+
+ if (nb_used > txq->nb_free) {
+ if (iavf_xmit_cleanup(txq)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ if (unlikely(nb_used > txq->rs_thresh)) {
+ while (nb_used > txq->nb_free) {
+ if (iavf_xmit_cleanup(txq)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
+ td_tag = tx_pkt->vlan_tci;
+ }
+
+ /* According to datasheet, the bit2 is reserved and must be
+ * set to 1.
+ */
+ td_cmd |= 0x04;
+
+ /* Enable checksum offloading */
+ if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
+ iavf_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
+
+ if (nb_ctx) {
+ /* Setup TX context descriptor if required */
+ uint64_t cd_type_cmd_tso_mss =
+ IAVF_TX_DESC_DTYPE_CONTEXT;
+ volatile struct iavf_tx_context_desc *ctx_txd =
+ (volatile struct iavf_tx_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ if (txe->mbuf) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ /* TSO enabled */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cd_type_cmd_tso_mss |=
+ iavf_set_tso_ctx(tx_pkt, tx_offload);
+
+ ctx_txd->type_cmd_tso_mss =
+ rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+ IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /* Setup TX Descriptor */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
+ td_offset,
+ slen,
+ td_tag);
+
+ IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg);
+
+ /* The last packet data descriptor needs End Of Packet (EOP) */
+ td_cmd |= IAVF_TX_DESC_CMD_EOP;
+ txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+
+ if (txq->nb_used >= txq->rs_thresh) {
+ PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ td_cmd |= IAVF_TX_DESC_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_used = 0;
+ }
+
+ txd->cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+ IAVF_TXD_QW1_CMD_SHIFT);
+ IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+ }
+
+end_of_tx:
+ rte_wmb();
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ txq->port_id, txq->queue_id, tx_id, nb_tx);
+
+ IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/* TX prep functions */
+uint16_t
+iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
+ (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
+ /* MSS outside the range are considered malicious */
+ rte_errno = EINVAL;
+ return i;
+ }
+
+ if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = -ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/* choose rx function*/
+void
+iavf_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+#ifdef RTE_ARCH_X86
+ struct iavf_rx_queue *rxq;
+ int i;
+ bool use_avx2 = false;
+
+ if (!iavf_rx_vec_dev_check(dev)) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ (void)iavf_rxq_vec_setup(rxq);
+ }
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
+
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG,
+ "Using %sVector Scattered Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ dev->rx_pkt_burst = use_avx2 ?
+ iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
+ iavf_recv_scattered_pkts_vec_flex_rxd;
+ else
+ dev->rx_pkt_burst = use_avx2 ?
+ iavf_recv_scattered_pkts_vec_avx2 :
+ iavf_recv_scattered_pkts_vec;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ dev->rx_pkt_burst = use_avx2 ?
+ iavf_recv_pkts_vec_avx2_flex_rxd :
+ iavf_recv_pkts_vec_flex_rxd;
+ else
+ dev->rx_pkt_burst = use_avx2 ?
+ iavf_recv_pkts_vec_avx2 :
+ iavf_recv_pkts_vec;
+ }
+
+ return;
+ }
+#endif
+
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
+ dev->data->port_id);
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+ else
+ dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
+ dev->data->port_id);
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+ else
+ dev->rx_pkt_burst = iavf_recv_pkts;
+ }
+}
+
+/* choose tx function*/
+void
+iavf_set_tx_function(struct rte_eth_dev *dev)
+{
+#ifdef RTE_ARCH_X86
+ struct iavf_tx_queue *txq;
+ int i;
+ bool use_avx2 = false;
+
+ if (!iavf_tx_vec_dev_check(dev)) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ iavf_txq_vec_setup(txq);
+ }
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
+
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = use_avx2 ?
+ iavf_xmit_pkts_vec_avx2 :
+ iavf_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+
+ return;
+ }
+#endif
+
+ PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = iavf_xmit_pkts;
+ dev->tx_pkt_prepare = iavf_prep_pkts;
+}
+
+void
+iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct iavf_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = true;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct iavf_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/* Get the number of used descriptors of a rx queue */
+uint32_t
+iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define IAVF_RXQ_SCAN_INTERVAL 4
+ volatile union iavf_rx_desc *rxdp;
+ struct iavf_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ rxq = dev->data->rx_queues[queue_id];
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
+ (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
+ /* Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += IAVF_RXQ_SCAN_INTERVAL;
+ rxdp += IAVF_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
+ << IAVF_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
+{
+ struct iavf_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
+ txq->rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+const uint32_t *
+iavf_get_default_ptype_table(void)
+{
+ static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
+ __rte_cache_aligned = {
+ /* L2 types */
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+ /* [3] - [5] reserved */
+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
+ /* [7] - [10] reserved */
+ [11] = RTE_PTYPE_L2_ETHER_ARP,
+ /* [12] - [21] reserved */
+
+ /* Non tunneled IPv4 */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv4 --> IPv4 */
+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [32] reserved */
+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> IPv6 */
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [39] reserved */
+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [47] reserved */
+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [54] reserved */
+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [62] reserved */
+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [69] reserved */
+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+ /* [73] - [87] reserved */
+
+ /* Non tunneled IPv6 */
+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [91] reserved */
+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv6 --> IPv4 */
+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [98] reserved */
+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> IPv6 */
+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [105] reserved */
+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [113] reserved */
+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [120] reserved */
+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [128] reserved */
+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [135] reserved */
+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+ /* [139] - [299] reserved */
+
+ /* PPPoE */
+ [300] = RTE_PTYPE_L2_ETHER_PPPOE,
+ [301] = RTE_PTYPE_L2_ETHER_PPPOE,
+
+ /* PPPoE --> IPv4 */
+ [302] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [303] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [304] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [305] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [306] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [307] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* PPPoE --> IPv6 */
+ [308] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [309] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [310] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [311] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [312] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [313] = RTE_PTYPE_L2_ETHER_PPPOE |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* [314] - [324] reserved */
+
+ /* IPv4/IPv6 --> GTPC/GTPU */
+ [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPC,
+ [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPC,
+ [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPC,
+ [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPC,
+ [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU,
+ [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU,
+
+ /* IPv4 --> GTPU --> IPv4 */
+ [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GTPU --> IPv4 */
+ [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GTPU --> IPv6 */
+ [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GTPU --> IPv6 */
+ [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GTPU |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+ /* All others reserved */
+ };
+
+ return ptype_tbl;
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h
new file mode 100644
index 000000000..59625a979
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IAVF_RXTX_H_
+#define _IAVF_RXTX_H_
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define IAVF_ALIGN_RING_DESC 32
+#define IAVF_MIN_RING_DESC 64
+#define IAVF_MAX_RING_DESC 4096
+#define IAVF_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define IAVF_RING_BASE_ALIGN 128
+
+/* used for Rx Bulk Allocate */
+#define IAVF_RX_MAX_BURST 32
+
+/* used for Vector PMD */
+#define IAVF_VPMD_RX_MAX_BURST 32
+#define IAVF_VPMD_TX_MAX_BURST 32
+#define IAVF_RXQ_REARM_THRESH 32
+#define IAVF_VPMD_DESCS_PER_LOOP 4
+#define IAVF_VPMD_TX_MAX_FREE_BUF 64
+
+#define IAVF_NO_VECTOR_FLAGS ( \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+
+#define IAVF_MIN_TSO_MSS 256
+#define IAVF_MAX_TSO_MSS 9668
+#define IAVF_TSO_MAX_SEG UINT8_MAX
+#define IAVF_TX_MAX_MTU_SEG 8
+
+#define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define IAVF_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
+
+/* HW desc structure, both 16-byte and 32-byte types are supported */
+#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#define iavf_rx_desc iavf_16byte_rx_desc
+#define iavf_rx_flex_desc iavf_16b_rx_flex_desc
+#else
+#define iavf_rx_desc iavf_32byte_rx_desc
+#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
+#endif
+
+struct iavf_rxq_ops {
+ void (*release_mbufs)(struct iavf_rx_queue *rxq);
+};
+
+struct iavf_txq_ops {
+ void (*release_mbufs)(struct iavf_tx_queue *txq);
+};
+
+/* Structure associated with each Rx queue. */
+struct iavf_rx_queue {
+ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
+ const struct rte_memzone *mz; /* memzone for Rx ring */
+ volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */
+ uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
+ struct rte_mbuf **sw_ring; /* address of SW ring */
+ uint16_t nb_rx_desc; /* ring length */
+ uint16_t rx_tail; /* current value of tail */
+ volatile uint8_t *qrx_tail; /* register address of tail */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t nb_rx_hold; /* number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
+ struct rte_mbuf fake_mbuf; /* dummy mbuf */
+ uint8_t rxdid;
+
+ /* used for VPMD */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /* value to init mbufs */
+
+ /* for rx bulk */
+ uint16_t rx_nb_avail; /* number of staged packets ready */
+ uint16_t rx_next_avail; /* index of next staged packets */
+ uint16_t rx_free_trigger; /* triggers rx buffer allocation */
+ struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */
+
+ uint16_t port_id; /* device port ID */
+ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */
+ uint16_t queue_id; /* Rx queue index */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
+
+ bool q_set; /* if rx queue has been configured */
+ bool rx_deferred_start; /* don't start this queue in dev start */
+ const struct iavf_rxq_ops *ops;
+};
+
+struct iavf_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct iavf_tx_queue {
+ const struct rte_memzone *mz; /* memzone for Tx ring */
+ volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
+ uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
+ struct iavf_tx_entry *sw_ring; /* address array of SW ring */
+ uint16_t nb_tx_desc; /* ring length */
+ uint16_t tx_tail; /* current value of tail */
+ volatile uint8_t *qtx_tail; /* register address of tail */
+ /* number of used desc since RS bit set */
+ uint16_t nb_used;
+ uint16_t nb_free;
+ uint16_t last_desc_cleaned; /* last desc have been cleaned*/
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint64_t offloads;
+ uint16_t next_dd; /* next to set RS, for VPMD */
+ uint16_t next_rs; /* next to check DD, for VPMD */
+
+ bool q_set; /* if rx queue has been configured */
+ bool tx_deferred_start; /* don't start this queue in dev start */
+ const struct iavf_txq_ops *ops;
+};
+
+/* Offload features */
+union iavf_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /* L3 (IP) Header Length. */
+ uint64_t l4_len:8; /* L4 Header Length. */
+ uint64_t tso_segsz:16; /* TCP TSO segment size */
+ /* uint64_t unused : 24; */
+ };
+};
+
+/* Rx Flex Descriptors
+ * These descriptors are used instead of the legacy version descriptors
+ */
+union iavf_16b_rx_flex_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ } read;
+ struct {
+ /* Qword 0 */
+ u8 rxdid; /* descriptor builder profile ID */
+ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+ __le16 pkt_len; /* [15:14] are reserved */
+ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+ /* sph=[11:11] */
+ /* ff1/ext=[15:12] */
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 flex_meta0;
+ __le16 flex_meta1;
+ } wb; /* writeback */
+};
+
+union iavf_32b_rx_flex_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ /* Qword 0 */
+ u8 rxdid; /* descriptor builder profile ID */
+ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+ __le16 pkt_len; /* [15:14] are reserved */
+ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+ /* sph=[11:11] */
+ /* ff1/ext=[15:12] */
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 flex_meta0;
+ __le16 flex_meta1;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 time_stamp_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flex_meta2;
+ __le16 flex_meta3;
+ union {
+ struct {
+ __le16 flex_meta4;
+ __le16 flex_meta5;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+ } wb; /* writeback */
+};
+
+/* Rx Flex Descriptor
+ * RxDID Profile ID 16-21
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: AUX0
+ * Flex-field 5: AUX1
+ */
+struct iavf_32b_rx_flex_desc_comms {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le32 flow_id;
+ union {
+ struct {
+ __le16 aux0;
+ __le16 aux1;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/* Rx Flex Descriptor
+ * RxDID Profile ID 22-23 (swap Hash and FlowID)
+ * Flex-field 0: Flow ID lower 16-bits
+ * Flex-field 1: Flow ID upper 16-bits
+ * Flex-field 2: RSS hash lower 16-bits
+ * Flex-field 3: RSS hash upper 16-bits
+ * Flex-field 4: AUX0
+ * Flex-field 5: AUX1
+ */
+struct iavf_32b_rx_flex_desc_comms_ovs {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 flow_id;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le32 rss_hash;
+ union {
+ struct {
+ __le16 aux0;
+ __le16 aux1;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/* Receive Flex Descriptor profile IDs: There are a total
+ * of 64 profiles where profile IDs 0/1 are for legacy; and
+ * profiles 2-63 are flex profiles that can be programmed
+ * with a specific metadata (profile 7 reserved for HW)
+ */
+enum iavf_rxdid {
+ IAVF_RXDID_LEGACY_0 = 0,
+ IAVF_RXDID_LEGACY_1 = 1,
+ IAVF_RXDID_FLEX_NIC = 2,
+ IAVF_RXDID_FLEX_NIC_2 = 6,
+ IAVF_RXDID_HW = 7,
+ IAVF_RXDID_COMMS_GENERIC = 16,
+ IAVF_RXDID_COMMS_AUX_VLAN = 17,
+ IAVF_RXDID_COMMS_AUX_IPV4 = 18,
+ IAVF_RXDID_COMMS_AUX_IPV6 = 19,
+ IAVF_RXDID_COMMS_AUX_IPV6_FLOW = 20,
+ IAVF_RXDID_COMMS_AUX_TCP = 21,
+ IAVF_RXDID_COMMS_OVS_1 = 22,
+ IAVF_RXDID_COMMS_OVS_2 = 23,
+ IAVF_RXDID_LAST = 63,
+};
+
+enum iavf_rx_flex_desc_status_error_0_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
+ IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
+ IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
+ IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
+ IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+ IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+ IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+ IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+ IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
+ IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+ IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
+ IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
+ IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+ IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+ IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+ IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+ IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
+};
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
+
+/* for iavf_32b_rx_flex_desc.pkt_len member */
+#define IAVF_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
+
+int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+
+int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void iavf_dev_rx_queue_release(void *rxq);
+
+int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void iavf_dev_tx_queue_release(void *txq);
+void iavf_stop_queues(struct rte_eth_dev *dev);
+uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void iavf_set_rx_function(struct rte_eth_dev *dev);
+void iavf_set_tx_function(struct rte_eth_dev *dev);
+void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
+int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
+
+uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
+int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
+int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
+int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+
+const uint32_t *iavf_get_default_ptype_table(void);
+
+static inline
+void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
+ const volatile void *desc,
+ uint16_t rx_id)
+{
+#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ const volatile union iavf_16byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
+ rx_desc->read.hdr_addr);
+#else
+ const volatile union iavf_32byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
+ " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
+ rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
+ rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+#endif
+}
+
+/* All the descriptors are 16 bytes, so just use one of them
+ * to print the qwords
+ */
+static inline
+void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
+ const volatile void *desc, uint16_t tx_id)
+{
+ const char *name;
+ const volatile struct iavf_tx_desc *tx_desc = desc;
+ enum iavf_tx_desc_dtype_value type;
+
+ type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
+ tx_desc->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+ switch (type) {
+ case IAVF_TX_DESC_DTYPE_DATA:
+ name = "Tx_data_desc";
+ break;
+ case IAVF_TX_DESC_DTYPE_CONTEXT:
+ name = "Tx_context_desc";
+ break;
+ default:
+ name = "unknown_desc";
+ break;
+ }
+
+ printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+ tx_desc->cmd_type_offset_bsz);
+}
+
+#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
+ int i; \
+ for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
+ struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+ if (!rxq) \
+ continue; \
+ rxq->fdir_enabled = on; \
+ } \
+ PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
+} while (0)
+
+/* Enable/disable flow director Rx processing in data path. */
+static inline
+void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
+{
+ if (on) {
+ /* enable flow director processing */
+ FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+ ad->fdir_ref_cnt++;
+ } else {
+ if (ad->fdir_ref_cnt >= 1) {
+ ad->fdir_ref_cnt--;
+
+ if (ad->fdir_ref_cnt == 0)
+ FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+ }
+ }
+}
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
+#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
+ iavf_dump_rx_descriptor(rxq, desc, rx_id)
+#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \
+ iavf_dump_tx_descriptor(txq, desc, tx_id)
+#else
+#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
+#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
+#endif
+
+#endif /* _IAVF_RXTX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c
new file mode 100644
index 000000000..e5e0fd309
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -0,0 +1,1541 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include "iavf_rxtx_vec_common.h"
+
+#include <x86intrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+iavf_rxq_rearm(struct iavf_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union iavf_rx_desc *rxdp;
+ struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxp,
+ IAVF_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ __m128i dma_addr0;
+
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
+ rxp[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ IAVF_RXQ_REARM_THRESH;
+ return;
+ }
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ struct rte_mbuf *mb0, *mb1;
+ __m128i dma_addr0, dma_addr1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxp += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxp[0];
+ mb1 = rxp[1];
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+#else
+ struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
+ __m256i dma_addr0_1, dma_addr2_3;
+ __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
+ /* Initialize the mbufs in vector, process 4 mbufs in one loop */
+ for (i = 0; i < IAVF_RXQ_REARM_THRESH;
+ i += 4, rxp += 4, rxdp += 4) {
+ __m128i vaddr0, vaddr1, vaddr2, vaddr3;
+ __m256i vaddr0_1, vaddr2_3;
+
+ mb0 = rxp[0];
+ mb1 = rxp[1];
+ mb2 = rxp[2];
+ mb3 = rxp[3];
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+ vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
+ vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
+
+ /**
+ * merge 0 & 1, by casting 0 to 256-bit and inserting 1
+ * into the high lanes. Similarly for 2 & 3
+ */
+ vaddr0_1 =
+ _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
+ vaddr1, 1);
+ vaddr2_3 =
+ _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
+ vaddr3, 1);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
+ dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
+
+ /* add headroom to pa values */
+ dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
+ dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
+ _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
+ }
+
+#endif
+
+ rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline uint16_t
+_iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+#define IAVF_DESCS_PER_LOOP_AVX 8
+
+ /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
+ const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+
+ const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+ 0, rxq->mbuf_initializer);
+ /* struct iavf_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; */
+ struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
+ volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+ const int avx_aligned = ((rxq->rx_tail & 1) == 0);
+
+ rte_prefetch0(rxdp);
+
+ /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ iavf_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* constants used in processing loop */
+ const __m256i crc_adjust =
+ _mm256_set_epi16
+ (/* first descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0, /* ignore pkt_type field */
+ /* second descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+
+ /* 8 packets DD mask, LSB in each 32-bit value */
+ const __m256i dd_check = _mm256_set1_epi32(1);
+
+ /* 8 packets EOP mask, second-LSB in each 32-bit value */
+ const __m256i eop_check = _mm256_slli_epi32(dd_check,
+ IAVF_RX_DESC_STATUS_EOF_SHIFT);
+
+ /* mask to shuffle from desc. to mbuf (2 descriptors)*/
+ const __m256i shuf_msk =
+ _mm256_set_epi8
+ (/* first descriptor */
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /*pkt_type set as unknown */
+ /* second descriptor */
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+ /**
+ * compile-time check the above crc and shuffle layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi
+ * calls above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Status/Error flag masks */
+ /**
+ * mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication. Bits 3-5 of error
+ * field (bits 22-24) are for IP/L4 checksum errors
+ */
+ const __m256i flags_mask =
+ _mm256_set1_epi32((1 << 2) | (1 << 11) |
+ (3 << 12) | (7 << 22));
+ /**
+ * data to be shuffled by result of flag mask. If VLAN bit is set,
+ * (bit 2), then position 4 in this array will be used in the
+ * destination
+ */
+ const __m256i vlan_flags_shuf =
+ _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
+ 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ /**
+ * data to be shuffled by result of flag mask, shifted down 11.
+ * If RSS/FDIR bits are set, shuffle moves appropriate flags in
+ * place.
+ */
+ const __m256i rss_flags_shuf =
+ _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
+ 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
+ 0, 0, 0, 0, PKT_RX_FDIR, 0);
+
+ /**
+ * data to be shuffled by the result of the flags mask shifted by 22
+ * bits. This gives use the l3_l4 flags.
+ */
+ const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ /* second 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ const __m256i cksum_mask =
+ _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
+
+ uint16_t i, received;
+
+ for (i = 0, received = 0; i < nb_pkts;
+ i += IAVF_DESCS_PER_LOOP_AVX,
+ rxdp += IAVF_DESCS_PER_LOOP_AVX) {
+ /* step 1, copy over 8 mbuf pointers to rx_pkts array */
+ _mm256_storeu_si256((void *)&rx_pkts[i],
+ _mm256_loadu_si256((void *)&sw_ring[i]));
+#ifdef RTE_ARCH_X86_64
+ _mm256_storeu_si256
+ ((void *)&rx_pkts[i + 4],
+ _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+#endif
+
+ __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ /* for AVX we need alignment otherwise loads are not atomic */
+ if (avx_aligned) {
+ /* load in descriptors, 2 at a time, in reverse order */
+ raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
+ } else
+#endif
+ {
+ const __m128i raw_desc7 =
+ _mm_load_si128((void *)(rxdp + 7));
+ rte_compiler_barrier();
+ const __m128i raw_desc6 =
+ _mm_load_si128((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ const __m128i raw_desc5 =
+ _mm_load_si128((void *)(rxdp + 5));
+ rte_compiler_barrier();
+ const __m128i raw_desc4 =
+ _mm_load_si128((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ const __m128i raw_desc3 =
+ _mm_load_si128((void *)(rxdp + 3));
+ rte_compiler_barrier();
+ const __m128i raw_desc2 =
+ _mm_load_si128((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ const __m128i raw_desc1 =
+ _mm_load_si128((void *)(rxdp + 1));
+ rte_compiler_barrier();
+ const __m128i raw_desc0 =
+ _mm_load_si128((void *)(rxdp + 0));
+
+ raw_desc6_7 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc6),
+ raw_desc7, 1);
+ raw_desc4_5 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc4),
+ raw_desc5, 1);
+ raw_desc2_3 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc2),
+ raw_desc3, 1);
+ raw_desc0_1 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc0),
+ raw_desc1, 1);
+ }
+
+ if (split_packet) {
+ int j;
+
+ for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
+ rte_mbuf_prefetch_part2(rx_pkts[i + j]);
+ }
+
+ /**
+ * convert descriptors 4-7 into mbufs, adjusting length and
+ * re-arranging fields. Then write into the mbuf
+ */
+ const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7,
+ PKTLEN_SHIFT);
+ const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5,
+ PKTLEN_SHIFT);
+ const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7,
+ len6_7, 0x80);
+ const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5,
+ len4_5, 0x80);
+ __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
+ __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
+
+ mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
+ mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
+ /**
+ * to get packet types, shift 64-bit values down 30 bits
+ * and so ptype is in lower 8-bits in each
+ */
+ const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
+ const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
+ const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
+ const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
+ const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
+ const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
+
+ mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype7], 4);
+ mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype6], 0);
+ mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype5], 4);
+ mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype4], 0);
+ /* merge the status bits into one register */
+ const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
+ desc4_5);
+
+ /**
+ * convert descriptors 0-3 into mbufs, adjusting length and
+ * re-arranging fields. Then write into the mbuf
+ */
+ const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3,
+ PKTLEN_SHIFT);
+ const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1,
+ PKTLEN_SHIFT);
+ const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3,
+ len2_3, 0x80);
+ const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1,
+ len0_1, 0x80);
+ __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
+ __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
+
+ mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
+ mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
+ /* get the packet types */
+ const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
+ const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
+ const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
+ const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
+ const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
+ const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
+
+ mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype3], 4);
+ mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype2], 0);
+ mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype1], 4);
+ mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype0], 0);
+ /* merge the status bits into one register */
+ const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
+ desc0_1);
+
+ /**
+ * take the two sets of status bits and merge to one
+ * After merge, the packets status flags are in the
+ * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
+ */
+ __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
+ status0_3);
+
+ /* now do flag manipulation */
+
+ /* get only flag/error bits we want */
+ const __m256i flag_bits =
+ _mm256_and_si256(status0_7, flags_mask);
+ /* set vlan and rss flags */
+ const __m256i vlan_flags =
+ _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
+ const __m256i rss_flags =
+ _mm256_shuffle_epi8(rss_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 11));
+ /**
+ * l3_l4_error flags, shuffle, then shift to correct adjustment
+ * of flags in flags_shuf, and finally mask out extra bits
+ */
+ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 22));
+ l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+
+ /* merge flags */
+ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ _mm256_or_si256(rss_flags, vlan_flags));
+ /**
+ * At this point, we have the 8 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init
+ * data so we can do a single write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend
+ * for each mbuf before we do the write. However, we can also
+ * add in the previously computed rx_descriptor fields to
+ * make a single 256-bit write per mbuf
+ */
+ /* check the structure matches expectations */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf,
+ rearm_data),
+ 16));
+ /* build up data and do writes */
+ __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
+ rearm6, rearm7;
+ rearm6 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(mbuf_flags, 8),
+ 0x04);
+ rearm4 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(mbuf_flags, 4),
+ 0x04);
+ rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
+ rearm0 = _mm256_blend_epi32(mbuf_init,
+ _mm256_srli_si256(mbuf_flags, 4),
+ 0x04);
+ /* permute to add in the rx_descriptor e.g. rss fields */
+ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
+ rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
+ rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
+ rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
+ /* write to mbuf */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+ rearm6);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+ rearm4);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+ rearm2);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+ rearm0);
+
+ /* repeat for the odd mbufs */
+ const __m256i odd_flags =
+ _mm256_castsi128_si256
+ (_mm256_extracti128_si256(mbuf_flags, 1));
+ rearm7 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(odd_flags, 8),
+ 0x04);
+ rearm5 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(odd_flags, 4),
+ 0x04);
+ rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
+ rearm1 = _mm256_blend_epi32(mbuf_init,
+ _mm256_srli_si256(odd_flags, 4),
+ 0x04);
+ /* since odd mbufs are already in hi 128-bits use blend */
+ rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
+ rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
+ rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
+ rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
+ /* again write to mbufs */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+ rearm7);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+ rearm5);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+ rearm3);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+ rearm1);
+
+ /* extract and record EOP bit */
+ if (split_packet) {
+ const __m128i eop_mask =
+ _mm_set1_epi16(1 << IAVF_RX_DESC_STATUS_EOF_SHIFT);
+ const __m256i eop_bits256 = _mm256_and_si256(status0_7,
+ eop_check);
+ /* pack status bits into a single 128-bit register */
+ const __m128i eop_bits =
+ _mm_packus_epi32
+ (_mm256_castsi256_si128(eop_bits256),
+ _mm256_extractf128_si256(eop_bits256,
+ 1));
+ /**
+ * flip bits, and mask out the EOP bit, which is now
+ * a split-packet bit i.e. !EOP, rather than EOP one.
+ */
+ __m128i split_bits = _mm_andnot_si128(eop_bits,
+ eop_mask);
+ /**
+ * eop bits are out of order, so we need to shuffle them
+ * back into order again. In doing so, only use low 8
+ * bits, which acts like another pack instruction
+ * The original order is (hi->lo): 1,3,5,7,0,2,4,6
+ * [Since we use epi8, the 16-bit positions are
+ * multiplied by 2 in the eop_shuffle value.]
+ */
+ __m128i eop_shuffle =
+ _mm_set_epi8(/* zero hi 64b */
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ /* move values to lo 64b */
+ 8, 0, 10, 2,
+ 12, 4, 14, 6);
+ split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
+ *(uint64_t *)split_packet =
+ _mm_cvtsi128_si64(split_bits);
+ split_packet += IAVF_DESCS_PER_LOOP_AVX;
+ }
+
+ /* perform dd_check */
+ status0_7 = _mm256_and_si256(status0_7, dd_check);
+ status0_7 = _mm256_packs_epi32(status0_7,
+ _mm256_setzero_si256());
+
+ uint64_t burst = __builtin_popcountll
+ (_mm_cvtsi128_si64
+ (_mm256_extracti128_si256
+ (status0_7, 1)));
+ burst += __builtin_popcountll
+ (_mm_cvtsi128_si64
+ (_mm256_castsi256_si128(status0_7)));
+ received += burst;
+ if (burst != IAVF_DESCS_PER_LOOP_AVX)
+ break;
+ }
+
+ /* update tail pointers */
+ rxq->rx_tail += received;
+ rxq->rx_tail &= (rxq->nb_rx_desc - 1);
+ if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
+ rxq->rx_tail--;
+ received--;
+ }
+ rxq->rxrearm_nb += received;
+ return received;
+}
+
+static inline __m256i
+flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
+ PKT_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
+ __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
+ const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
+
+static inline uint16_t
+_iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+#define IAVF_DESCS_PER_LOOP_AVX 8
+
+ const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+
+ const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+ 0, rxq->mbuf_initializer);
+ struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
+ volatile union iavf_rx_flex_desc *rxdp =
+ (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ iavf_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.status_error0 &
+ rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+ return 0;
+
+ /* constants used in processing loop */
+ const __m256i crc_adjust =
+ _mm256_set_epi16
+ (/* first descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0, /* ignore pkt_type field */
+ /* second descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+
+ /* 8 packets DD mask, LSB in each 32-bit value */
+ const __m256i dd_check = _mm256_set1_epi32(1);
+
+ /* 8 packets EOP mask, second-LSB in each 32-bit value */
+ const __m256i eop_check = _mm256_slli_epi32(dd_check,
+ IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
+
+ /* mask to shuffle from desc. to mbuf (2 descriptors)*/
+ const __m256i shuf_msk =
+ _mm256_set_epi8
+ (/* first descriptor */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss hash parsed separately */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
+ 5, 4, /* octet 4~5, 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /*pkt_type set as unknown */
+ /* second descriptor */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss hash parsed separately */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
+ 5, 4, /* octet 4~5, 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+ /**
+ * compile-time check the above crc and shuffle layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi
+ * calls above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Status/Error flag masks */
+ /**
+ * mask everything except Checksum Reports, RSS indication
+ * and VLAN indication.
+ * bit6:4 for IP/L4 checksum errors.
+ * bit12 is for RSS indication.
+ * bit13 is for VLAN indication.
+ */
+ const __m256i flags_mask =
+ _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
+ /**
+ * data to be shuffled by the result of the flags mask shifted by 4
+ * bits. This gives use the l3_l4 flags.
+ */
+ const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ /* second 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ const __m256i cksum_mask =
+ _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+ /**
+ * data to be shuffled by result of flag mask, shifted down 12.
+ * If RSS(bit12)/VLAN(bit13) are set,
+ * shuffle moves appropriate flags in place.
+ */
+ const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0,
+ /* end up 128-bits */
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0);
+
+ uint16_t i, received;
+
+ for (i = 0, received = 0; i < nb_pkts;
+ i += IAVF_DESCS_PER_LOOP_AVX,
+ rxdp += IAVF_DESCS_PER_LOOP_AVX) {
+ /* step 1, copy over 8 mbuf pointers to rx_pkts array */
+ _mm256_storeu_si256((void *)&rx_pkts[i],
+ _mm256_loadu_si256((void *)&sw_ring[i]));
+#ifdef RTE_ARCH_X86_64
+ _mm256_storeu_si256
+ ((void *)&rx_pkts[i + 4],
+ _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+#endif
+
+ __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+
+ const __m128i raw_desc7 =
+ _mm_load_si128((void *)(rxdp + 7));
+ rte_compiler_barrier();
+ const __m128i raw_desc6 =
+ _mm_load_si128((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ const __m128i raw_desc5 =
+ _mm_load_si128((void *)(rxdp + 5));
+ rte_compiler_barrier();
+ const __m128i raw_desc4 =
+ _mm_load_si128((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ const __m128i raw_desc3 =
+ _mm_load_si128((void *)(rxdp + 3));
+ rte_compiler_barrier();
+ const __m128i raw_desc2 =
+ _mm_load_si128((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ const __m128i raw_desc1 =
+ _mm_load_si128((void *)(rxdp + 1));
+ rte_compiler_barrier();
+ const __m128i raw_desc0 =
+ _mm_load_si128((void *)(rxdp + 0));
+
+ raw_desc6_7 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc6),
+ raw_desc7, 1);
+ raw_desc4_5 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc4),
+ raw_desc5, 1);
+ raw_desc2_3 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc2),
+ raw_desc3, 1);
+ raw_desc0_1 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc0),
+ raw_desc1, 1);
+
+ if (split_packet) {
+ int j;
+
+ for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
+ rte_mbuf_prefetch_part2(rx_pkts[i + j]);
+ }
+
+ /**
+ * convert descriptors 4-7 into mbufs, re-arrange fields.
+ * Then write into the mbuf.
+ */
+ __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
+ __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
+
+ mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
+ mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
+ /**
+ * to get packet types, ptype is located in bit16-25
+ * of each 128bits
+ */
+ const __m256i ptype_mask =
+ _mm256_set1_epi16(IAVF_RX_FLEX_DESC_PTYPE_M);
+ const __m256i ptypes6_7 =
+ _mm256_and_si256(raw_desc6_7, ptype_mask);
+ const __m256i ptypes4_5 =
+ _mm256_and_si256(raw_desc4_5, ptype_mask);
+ const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
+ const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
+ const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
+ const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
+
+ mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype7], 4);
+ mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype6], 0);
+ mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype5], 4);
+ mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype4], 0);
+ /* merge the status bits into one register */
+ const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
+ raw_desc4_5);
+
+ /**
+ * convert descriptors 0-3 into mbufs, re-arrange fields.
+ * Then write into the mbuf.
+ */
+ __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
+ __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
+
+ mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
+ mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
+ /**
+ * to get packet types, ptype is located in bit16-25
+ * of each 128bits
+ */
+ const __m256i ptypes2_3 =
+ _mm256_and_si256(raw_desc2_3, ptype_mask);
+ const __m256i ptypes0_1 =
+ _mm256_and_si256(raw_desc0_1, ptype_mask);
+ const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
+ const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
+ const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
+ const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
+
+ mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype3], 4);
+ mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype2], 0);
+ mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype1], 4);
+ mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype0], 0);
+ /* merge the status bits into one register */
+ const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
+ raw_desc0_1);
+
+ /**
+ * take the two sets of status bits and merge to one
+ * After merge, the packets status flags are in the
+ * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
+ */
+ __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
+ status0_3);
+
+ /* now do flag manipulation */
+
+ /* get only flag/error bits we want */
+ const __m256i flag_bits =
+ _mm256_and_si256(status0_7, flags_mask);
+ /**
+ * l3_l4_error flags, shuffle, then shift to correct adjustment
+ * of flags in flags_shuf, and finally mask out extra bits
+ */
+ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 4));
+ l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+ /* set rss and vlan flags */
+ const __m256i rss_vlan_flag_bits =
+ _mm256_srli_epi32(flag_bits, 12);
+ const __m256i rss_vlan_flags =
+ _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+ rss_vlan_flag_bits);
+
+ /* merge flags */
+ __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ rss_vlan_flags);
+
+ if (rxq->fdir_enabled) {
+ const __m256i fdir_id4_7 =
+ _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+ const __m256i fdir_id0_3 =
+ _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+ const __m256i fdir_id0_7 =
+ _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+ const __m256i fdir_flags =
+ flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
+
+ /* merge with fdir_flags */
+ mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+
+ /* write to mbuf: have to use scalar store here */
+ rx_pkts[i + 0]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 3);
+
+ rx_pkts[i + 1]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 7);
+
+ rx_pkts[i + 2]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 2);
+
+ rx_pkts[i + 3]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 6);
+
+ rx_pkts[i + 4]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 1);
+
+ rx_pkts[i + 5]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 5);
+
+ rx_pkts[i + 6]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 0);
+
+ rx_pkts[i + 7]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 4);
+ } /* if() on fdir_enabled */
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ /**
+ * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * will cause performance drop to get into this context.
+ */
+ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_RSS_HASH) {
+ /* load bottom half of every 32B desc */
+ const __m128i raw_desc_bh7 =
+ _mm_load_si128
+ ((void *)(&rxdp[7].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh6 =
+ _mm_load_si128
+ ((void *)(&rxdp[6].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh5 =
+ _mm_load_si128
+ ((void *)(&rxdp[5].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh4 =
+ _mm_load_si128
+ ((void *)(&rxdp[4].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh3 =
+ _mm_load_si128
+ ((void *)(&rxdp[3].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh2 =
+ _mm_load_si128
+ ((void *)(&rxdp[2].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh1 =
+ _mm_load_si128
+ ((void *)(&rxdp[1].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh0 =
+ _mm_load_si128
+ ((void *)(&rxdp[0].wb.status_error1));
+
+ __m256i raw_desc_bh6_7 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh6),
+ raw_desc_bh7, 1);
+ __m256i raw_desc_bh4_5 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh4),
+ raw_desc_bh5, 1);
+ __m256i raw_desc_bh2_3 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh2),
+ raw_desc_bh3, 1);
+ __m256i raw_desc_bh0_1 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh0),
+ raw_desc_bh1, 1);
+
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m256i rss_hash6_7 =
+ _mm256_slli_epi64(raw_desc_bh6_7, 32);
+ __m256i rss_hash4_5 =
+ _mm256_slli_epi64(raw_desc_bh4_5, 32);
+ __m256i rss_hash2_3 =
+ _mm256_slli_epi64(raw_desc_bh2_3, 32);
+ __m256i rss_hash0_1 =
+ _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+ __m256i rss_hash_msk =
+ _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+ 0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash6_7 = _mm256_and_si256
+ (rss_hash6_7, rss_hash_msk);
+ rss_hash4_5 = _mm256_and_si256
+ (rss_hash4_5, rss_hash_msk);
+ rss_hash2_3 = _mm256_and_si256
+ (rss_hash2_3, rss_hash_msk);
+ rss_hash0_1 = _mm256_and_si256
+ (rss_hash0_1, rss_hash_msk);
+
+ mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+ mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+ mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+ mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+ } /* if() on RSS hash parsing */
+#endif
+
+ /**
+ * At this point, we have the 8 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init
+ * data so we can do a single write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend
+ * for each mbuf before we do the write. However, we can also
+ * add in the previously computed rx_descriptor fields to
+ * make a single 256-bit write per mbuf
+ */
+ /* check the structure matches expectations */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf,
+ rearm_data),
+ 16));
+ /* build up data and do writes */
+ __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
+ rearm6, rearm7;
+ rearm6 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(mbuf_flags, 8),
+ 0x04);
+ rearm4 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(mbuf_flags, 4),
+ 0x04);
+ rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
+ rearm0 = _mm256_blend_epi32(mbuf_init,
+ _mm256_srli_si256(mbuf_flags, 4),
+ 0x04);
+ /* permute to add in the rx_descriptor e.g. rss fields */
+ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
+ rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
+ rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
+ rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
+ /* write to mbuf */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+ rearm6);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+ rearm4);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+ rearm2);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+ rearm0);
+
+ /* repeat for the odd mbufs */
+ const __m256i odd_flags =
+ _mm256_castsi128_si256
+ (_mm256_extracti128_si256(mbuf_flags, 1));
+ rearm7 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(odd_flags, 8),
+ 0x04);
+ rearm5 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(odd_flags, 4),
+ 0x04);
+ rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
+ rearm1 = _mm256_blend_epi32(mbuf_init,
+ _mm256_srli_si256(odd_flags, 4),
+ 0x04);
+ /* since odd mbufs are already in hi 128-bits use blend */
+ rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
+ rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
+ rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
+ rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
+ /* again write to mbufs */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+ rearm7);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+ rearm5);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+ rearm3);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+ rearm1);
+
+ /* extract and record EOP bit */
+ if (split_packet) {
+ const __m128i eop_mask =
+ _mm_set1_epi16(1 <<
+ IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
+ const __m256i eop_bits256 = _mm256_and_si256(status0_7,
+ eop_check);
+ /* pack status bits into a single 128-bit register */
+ const __m128i eop_bits =
+ _mm_packus_epi32
+ (_mm256_castsi256_si128(eop_bits256),
+ _mm256_extractf128_si256(eop_bits256,
+ 1));
+ /**
+ * flip bits, and mask out the EOP bit, which is now
+ * a split-packet bit i.e. !EOP, rather than EOP one.
+ */
+ __m128i split_bits = _mm_andnot_si128(eop_bits,
+ eop_mask);
+ /**
+ * eop bits are out of order, so we need to shuffle them
+ * back into order again. In doing so, only use low 8
+ * bits, which acts like another pack instruction
+ * The original order is (hi->lo): 1,3,5,7,0,2,4,6
+ * [Since we use epi8, the 16-bit positions are
+ * multiplied by 2 in the eop_shuffle value.]
+ */
+ __m128i eop_shuffle =
+ _mm_set_epi8(/* zero hi 64b */
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ /* move values to lo 64b */
+ 8, 0, 10, 2,
+ 12, 4, 14, 6);
+ split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
+ *(uint64_t *)split_packet =
+ _mm_cvtsi128_si64(split_bits);
+ split_packet += IAVF_DESCS_PER_LOOP_AVX;
+ }
+
+ /* perform dd_check */
+ status0_7 = _mm256_and_si256(status0_7, dd_check);
+ status0_7 = _mm256_packs_epi32(status0_7,
+ _mm256_setzero_si256());
+
+ uint64_t burst = __builtin_popcountll
+ (_mm_cvtsi128_si64
+ (_mm256_extracti128_si256
+ (status0_7, 1)));
+ burst += __builtin_popcountll
+ (_mm_cvtsi128_si64
+ (_mm256_castsi256_si128(status0_7)));
+ received += burst;
+ if (burst != IAVF_DESCS_PER_LOOP_AVX)
+ break;
+ }
+
+ /* update tail pointers */
+ rxq->rx_tail += received;
+ rxq->rx_tail &= (rxq->nb_rx_desc - 1);
+ if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
+ rxq->rx_tail--;
+ received--;
+ }
+ rxq->rxrearm_nb += received;
+ return received;
+}
+
+/**
+ * Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _iavf_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rx_queue, rx_pkts,
+ nb_pkts, NULL);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ uint16_t burst = iavf_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < IAVF_VPMD_RX_MAX_BURST)
+ return retval;
+ }
+ return retval + iavf_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, nb_pkts);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of
+ * 32 scattered packets for flex RxD
+ * Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rxq,
+ rx_pkts, nb_pkts, split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles scattered packets for flex RxD.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ uint16_t burst =
+ iavf_recv_scattered_burst_vec_avx2_flex_rxd
+ (rx_queue, rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < IAVF_VPMD_RX_MAX_BURST)
+ return retval;
+ }
+ return retval + iavf_recv_scattered_burst_vec_avx2_flex_rxd(rx_queue,
+ rx_pkts + retval, nb_pkts);
+}
+
+static inline void
+iavf_vtx1(volatile struct iavf_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw =
+ (IAVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+iavf_vtx(volatile struct iavf_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ const uint64_t hi_qw_tmpl = (IAVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT));
+
+ /* if unaligned on 32-bit boundary, do one to align */
+ if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
+ iavf_vtx1(txdp, *pkt, flags);
+ nb_pkts--, txdp++, pkt++;
+ }
+
+ /* do two at a time while possible, in bursts */
+ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
+ uint64_t hi_qw3 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[3]->data_len <<
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw2 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[2]->data_len <<
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw1 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[1]->data_len <<
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw0 =
+ hi_qw_tmpl |
+ ((uint64_t)pkt[0]->data_len <<
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
+
+ __m256i desc2_3 =
+ _mm256_set_epi64x
+ (hi_qw3,
+ pkt[3]->buf_physaddr + pkt[3]->data_off,
+ hi_qw2,
+ pkt[2]->buf_physaddr + pkt[2]->data_off);
+ __m256i desc0_1 =
+ _mm256_set_epi64x
+ (hi_qw1,
+ pkt[1]->buf_physaddr + pkt[1]->data_off,
+ hi_qw0,
+ pkt[0]->buf_physaddr + pkt[0]->data_off);
+ _mm256_store_si256((void *)(txdp + 2), desc2_3);
+ _mm256_store_si256((void *)txdp, desc0_1);
+ }
+
+ /* do any last ones */
+ while (nb_pkts) {
+ iavf_vtx1(txdp, *pkt, flags);
+ txdp++, pkt++, nb_pkts--;
+ }
+}
+
+static inline uint16_t
+iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ volatile struct iavf_tx_desc *txdp;
+ struct iavf_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ /* bit2 is reserved and must be set to 1 according to Spec */
+ uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
+ uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ iavf_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ iavf_vtx(txdp, tx_pkts, n - 1, flags);
+ tx_pkts += (n - 1);
+ txdp += (n - 1);
+
+ iavf_vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ iavf_vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
+ IAVF_TXD_QW1_CMD_SHIFT);
+ txq->next_rs =
+ (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = iavf_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h
new file mode 100644
index 000000000..25bb502de
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IAVF_RXTX_VEC_COMMON_H_
+#define _IAVF_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[IAVF_VPMD_RX_MAX_BURST];
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->vlan_tci = end->vlan_tci;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = NULL;
+ end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+iavf_tx_free_bufs(struct iavf_tx_queue *txq)
+{
+ struct iavf_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_tx_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct iavf_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i])
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i])
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq)
+{
+ unsigned i;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (!txq->sw_ring || txq->nb_free == max_desc)
+ return;
+
+ i = txq->next_dd - txq->rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+}
+
+static inline int
+iavf_rxq_vec_setup_default(struct iavf_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
+{
+ if (!rxq)
+ return -1;
+
+ if (!rte_is_power_of_2(rxq->nb_rx_desc))
+ return -1;
+
+ if (rxq->rx_free_thresh < IAVF_VPMD_RX_MAX_BURST)
+ return -1;
+
+ if (rxq->nb_rx_desc % rxq->rx_free_thresh)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
+{
+ if (!txq)
+ return -1;
+
+ if (txq->offloads & IAVF_NO_VECTOR_FLAGS)
+ return -1;
+
+ if (txq->rs_thresh < IAVF_VPMD_TX_MAX_BURST ||
+ txq->rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+iavf_rx_vec_dev_check_default(struct rte_eth_dev *dev)
+{
+ int i;
+ struct iavf_rx_queue *rxq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (iavf_rx_vec_queue_default(rxq))
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline int
+iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev)
+{
+ int i;
+ struct iavf_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (iavf_tx_vec_queue_default(txq))
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c
new file mode 100644
index 000000000..85c5bd4af
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -0,0 +1,1191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_rxtx_vec_common.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+iavf_rxq_rearm(struct iavf_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+
+ volatile union iavf_rx_desc *rxdp;
+ struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp, (void *)rxp,
+ rxq->rx_free_thresh) < 0) {
+ if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
+ rxp[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < rxq->rx_free_thresh; i += 2, rxp += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxp[0];
+ mb1 = rxp[1];
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += rxq->rx_free_thresh;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= rxq->rx_free_thresh;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "rearm_start=%u rearm_nb=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
+
+ /* Update the tail pointer on the NIC */
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
+ struct rte_mbuf **rx_pkts)
+{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ __m128i vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi32(
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
+
+ const __m128i cksum_mask = _mm_set_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi32(vlan1, 11);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ l3_l4e = _mm_srli_epi32(vlan1, 22);
+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ /* then we shift left 1 bit */
+ l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
+
+ /* At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+static inline __m128i
+flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
+ PKT_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
+ __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
+ const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
+
+static inline void
+flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
+ struct rte_mbuf **rx_pkts)
+{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ __m128i tmp_desc, flags, rss_vlan;
+
+ /* mask everything except checksum, RSS and VLAN flags.
+ * bit6:4 for checksum.
+ * bit12 for RSS indication.
+ * bit13 for VLAN indication.
+ */
+ const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
+ 0x3070, 0x3070);
+
+ const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
+ PKT_RX_L4_CKSUM_MASK |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_MASK |
+ PKT_RX_L4_CKSUM_MASK |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_MASK |
+ PKT_RX_L4_CKSUM_MASK |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_MASK |
+ PKT_RX_L4_CKSUM_MASK |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ /* map the checksum, rss and vlan fields to the checksum, rss
+ * and vlan flag
+ */
+ const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+
+ const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_RSS_HASH, 0);
+
+ /* merge 4 descriptors */
+ flags = _mm_unpackhi_epi32(descs[0], descs[1]);
+ tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]);
+ tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc);
+ tmp_desc = _mm_and_si128(flags, desc_mask);
+
+ /* checksum flags */
+ tmp_desc = _mm_srli_epi32(tmp_desc, 4);
+ flags = _mm_shuffle_epi8(cksum_flags, tmp_desc);
+ /* then we shift left 1 bit */
+ flags = _mm_slli_epi32(flags, 1);
+ /* we need to mask out the redundant bits introduced by RSS or
+ * VLAN fields.
+ */
+ flags = _mm_and_si128(flags, cksum_mask);
+
+ /* RSS, VLAN flag */
+ tmp_desc = _mm_srli_epi32(tmp_desc, 8);
+ rss_vlan = _mm_shuffle_epi8(rss_vlan_flags, tmp_desc);
+
+ /* merge the flags */
+ flags = _mm_or_si128(flags, rss_vlan);
+
+ if (rxq->fdir_enabled) {
+ const __m128i fdir_id0_1 =
+ _mm_unpackhi_epi32(descs[0], descs[1]);
+
+ const __m128i fdir_id2_3 =
+ _mm_unpackhi_epi32(descs[2], descs[3]);
+
+ const __m128i fdir_id0_3 =
+ _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
+
+ const __m128i fdir_flags =
+ flex_rxd_to_fdir_flags_vec(fdir_id0_3);
+
+ /* merge with fdir_flags */
+ flags = _mm_or_si128(flags, fdir_flags);
+
+ /* write fdir_id to mbuf */
+ rx_pkts[0]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 0);
+
+ rx_pkts[1]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 1);
+
+ rx_pkts[2]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 2);
+
+ rx_pkts[3]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 3);
+ } /* if() on fdir_enabled */
+
+ /**
+ * At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in flags.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ const uint32_t *type_table)
+{
+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+
+ ptype0 = _mm_srli_epi64(ptype0, 30);
+ ptype1 = _mm_srli_epi64(ptype1, 30);
+
+ rx_pkts[0]->packet_type = type_table[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = type_table[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = type_table[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)];
+}
+
+static inline void
+flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ const uint32_t *type_table)
+{
+ const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
+ 0, IAVF_RX_FLEX_DESC_PTYPE_M,
+ 0, IAVF_RX_FLEX_DESC_PTYPE_M,
+ 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+ __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
+ __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
+ __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
+
+ ptype_all = _mm_and_si128(ptype_all, ptype_mask);
+
+ rx_pkts[0]->packet_type = type_table[_mm_extract_epi16(ptype_all, 1)];
+ rx_pkts[1]->packet_type = type_table[_mm_extract_epi16(ptype_all, 3)];
+ rx_pkts[2]->packet_type = type_table[_mm_extract_epi16(ptype_all, 5)];
+ rx_pkts[3]->packet_type = type_table[_mm_extract_epi16(ptype_all, 7)];
+}
+
+/* Notice:
+ * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union iavf_rx_desc *rxdp;
+ struct rte_mbuf **sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ /* compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST);
+
+ /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > rxq->rx_free_thresh)
+ iavf_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* pkt_type set as unknown */
+ );
+ /* Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += IAVF_VPMD_DESCS_PER_LOOP,
+ rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
+ __m128i descs[IAVF_VPMD_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
+#endif
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
+#endif
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+ const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+ descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 status err info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+ const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+ descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts status err value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += IAVF_VPMD_DESCS_PER_LOOP;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* Notice:
+ * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union iavf_rx_flex_desc *rxdp;
+ struct rte_mbuf **sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ __m128i crc_adjust = _mm_set_epi16
+ (0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ const __m128i zero = _mm_setzero_si128();
+ /* mask to shuffle from desc. to mbuf */
+ const __m128i shuf_msk = _mm_set_epi8
+ (0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss hash parsed separately */
+ 11, 10, /* octet 10~11, 16 bits vlan_macip */
+ 5, 4, /* octet 4~5, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 5, 4, /* octet 4~5, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /* pkt_type set as unknown */
+ );
+ const __m128i eop_shuf_mask = _mm_set_epi8(0xFF, 0xFF,
+ 0xFF, 0xFF,
+ 0xFF, 0xFF,
+ 0xFF, 0xFF,
+ 0xFF, 0xFF,
+ 0xFF, 0xFF,
+ 0x04, 0x0C,
+ 0x00, 0x08);
+
+ /**
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+
+ /* 4 packets DD mask */
+ const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL,
+ 0x0000000100000001LL);
+ /* 4 packets EOP mask */
+ const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
+ 0x0000000200000002LL);
+
+ /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST);
+
+ /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > rxq->rx_free_thresh)
+ iavf_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.status_error0 &
+ rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+ return 0;
+
+ /**
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += IAVF_VPMD_DESCS_PER_LOOP,
+ rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
+ __m128i descs[IAVF_VPMD_DESCS_PER_LOOP];
+ __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
+ __m128i staterr, sterr_tmp1, sterr_tmp2;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
+#endif
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
+#endif
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ flex_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+ pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ /**
+ * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * will cause performance drop to get into this context.
+ */
+ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_RSS_HASH) {
+ /* load bottom half of every 32B desc */
+ const __m128i raw_desc_bh3 =
+ _mm_load_si128
+ ((void *)(&rxdp[3].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh2 =
+ _mm_load_si128
+ ((void *)(&rxdp[2].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh1 =
+ _mm_load_si128
+ ((void *)(&rxdp[1].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh0 =
+ _mm_load_si128
+ ((void *)(&rxdp[0].wb.status_error1));
+
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m128i rss_hash3 =
+ _mm_slli_epi64(raw_desc_bh3, 32);
+ __m128i rss_hash2 =
+ _mm_slli_epi64(raw_desc_bh2, 32);
+ __m128i rss_hash1 =
+ _mm_slli_epi64(raw_desc_bh1, 32);
+ __m128i rss_hash0 =
+ _mm_slli_epi64(raw_desc_bh0, 32);
+
+ __m128i rss_hash_msk =
+ _mm_set_epi32(0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash3 = _mm_and_si128
+ (rss_hash3, rss_hash_msk);
+ rss_hash2 = _mm_and_si128
+ (rss_hash2, rss_hash_msk);
+ rss_hash1 = _mm_and_si128
+ (rss_hash1, rss_hash_msk);
+ rss_hash0 = _mm_and_si128
+ (rss_hash0, rss_hash_msk);
+
+ pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3);
+ pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2);
+ pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1);
+ pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0);
+ } /* if() on RSS hash parsing */
+#endif
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128
+ ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb3);
+ _mm_storeu_si128
+ ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb2);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += IAVF_VPMD_DESCS_PER_LOOP;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128
+ ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb1);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb0);
+ flex_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ /* C.4 calc available number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/* Notice:
+ * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ unsigned int i = 0;
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/* vPMD receive routine that reassembles scattered packets for flex RxD
+ * Notice:
+ * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ unsigned int i = 0;
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec_flex_rxd(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct iavf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw =
+ (IAVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len <<
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_iova + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+iavf_vtx(volatile struct iavf_tx_desc *txdp, struct rte_mbuf **pkt,
+ uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ volatile struct iavf_tx_desc *txdp;
+ struct iavf_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = IAVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
+ uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ iavf_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ nb_commit = nb_pkts;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ iavf_vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
+ IAVF_TXD_QW1_CMD_SHIFT);
+ txq->next_rs =
+ (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
+ txq->port_id, txq->queue_id, tx_id, nb_pkts);
+
+ IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = iavf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static void __rte_cold
+iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
+{
+ _iavf_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __rte_cold
+iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
+{
+ _iavf_tx_queue_release_mbufs_vec(txq);
+}
+
+static const struct iavf_rxq_ops sse_vec_rxq_ops = {
+ .release_mbufs = iavf_rx_queue_release_mbufs_sse,
+};
+
+static const struct iavf_txq_ops sse_vec_txq_ops = {
+ .release_mbufs = iavf_tx_queue_release_mbufs_sse,
+};
+
+int __rte_cold
+iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+{
+ txq->ops = &sse_vec_txq_ops;
+ return 0;
+}
+
+int __rte_cold
+iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+{
+ rxq->ops = &sse_vec_rxq_ops;
+ return iavf_rxq_vec_setup_default(rxq);
+}
+
+int __rte_cold
+iavf_rx_vec_dev_check(struct rte_eth_dev *dev)
+{
+ return iavf_rx_vec_dev_check_default(dev);
+}
+
+int __rte_cold
+iavf_tx_vec_dev_check(struct rte_eth_dev *dev)
+{
+ return iavf_tx_vec_dev_check_default(dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c b/src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c
new file mode 100644
index 000000000..33acea54a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c
@@ -0,0 +1,1077 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_dev.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS 10
+
+/* Read data in admin queue to get msg from pf driver */
+static enum iavf_status
+iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
+ uint8_t *buf)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_arq_event_info event;
+ enum virtchnl_ops opcode;
+ int ret;
+
+ event.buf_len = buf_len;
+ event.msg_buf = buf;
+ ret = iavf_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
+ return ret;
+ }
+
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32(
+ event.desc.cookie_low);
+
+ PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
+ opcode, vf->cmd_retval);
+
+ if (opcode != vf->pend_cmd) {
+ PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+ vf->pend_cmd, opcode);
+ return IAVF_ERR_OPCODE_MISMATCH;
+ }
+
+ return IAVF_SUCCESS;
+}
+
+static int
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ enum iavf_status ret;
+ int err = 0;
+ int i = 0;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
+ return err;
+ }
+
+ switch (args->ops) {
+ case VIRTCHNL_OP_RESET_VF:
+ /*no need to wait for response */
+ _clear_cmd(vf);
+ break;
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ /* for init virtchnl ops, need to poll the response */
+ do {
+ ret = iavf_read_msg_from_pf(adapter, args->out_size,
+ args->out_buffer);
+ if (ret == IAVF_SUCCESS)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ } while (i++ < MAX_TRY_TIMES);
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ }
+ _clear_cmd(vf);
+ break;
+
+ default:
+ /* For other virtchnl ops in running time,
+ * wait for the cmd done flag.
+ */
+ do {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ _clear_cmd(vf);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static uint32_t
+iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
+{
+ uint32_t speed;
+
+ switch (virt_link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ speed = 100;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ speed = 20000;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ speed = 25000;
+ break;
+ case VIRTCHNL_LINK_SPEED_2_5GB:
+ speed = 2500;
+ break;
+ case VIRTCHNL_LINK_SPEED_5GB:
+ speed = 5000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ return speed;
+}
+
+static void
+iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
+ uint16_t msglen)
+{
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (msglen < sizeof(struct virtchnl_pf_event)) {
+ PMD_DRV_LOG(DEBUG, "Error event");
+ return;
+ }
+ switch (pf_msg->event) {
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_LINK_CHANGE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
+ vf->link_up = pf_msg->event_data.link_event.link_status;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ vf->link_speed =
+ pf_msg->event_data.link_event_adv.link_speed;
+ } else {
+ enum virtchnl_link_speed speed;
+ speed = pf_msg->event_data.link_event.link_speed;
+ vf->link_speed = iavf_convert_link_speed(speed);
+ }
+ iavf_dev_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
+ break;
+ }
+}
+
+void
+iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_arq_event_info info;
+ uint16_t pending, aq_opc;
+ enum virtchnl_ops msg_opc;
+ enum iavf_status msg_ret;
+ int ret;
+
+ info.buf_len = IAVF_AQ_BUF_SZ;
+ if (!vf->aq_resp) {
+ PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+ return;
+ }
+ info.msg_buf = vf->aq_resp;
+
+ pending = 1;
+ while (pending) {
+ ret = iavf_clean_arq_element(hw, &info, &pending);
+
+ if (ret != IAVF_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+ "ret: %d", ret);
+ break;
+ }
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct iavf_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by PF driver.
+ */
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum iavf_status)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
+ case iavf_aqc_opc_send_msg_to_vf:
+ if (msg_opc == VIRTCHNL_OP_EVENT) {
+ iavf_handle_pf_event_msg(dev, info.msg_buf,
+ info.msg_len);
+ } else {
+ /* read message and it's expected one */
+ if (msg_opc == vf->pend_cmd)
+ _notify_cmd(vf, msg_ret);
+ else
+ PMD_DRV_LOG(ERR, "command mismatch,"
+ "expect %u, get %u",
+ vf->pend_cmd, msg_opc);
+ PMD_DRV_LOG(DEBUG,
+ "adminq response is received,"
+ " opcode = %d", msg_opc);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ aq_opc);
+ break;
+ }
+ }
+}
+
+int
+iavf_enable_vlan_strip(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " OP_ENABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+int
+iavf_disable_vlan_strip(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+#define VIRTCHNL_VERSION_MAJOR_START 1
+#define VIRTCHNL_VERSION_MINOR_START 1
+
+/* Check API version with sync wait until version read from admin queue */
+int
+iavf_check_api_version(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_version_info version, *pver;
+ struct iavf_cmd_info args;
+ int err;
+
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
+
+ args.ops = VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
+ return err;
+ }
+
+ pver = (struct virtchnl_version_info *)args.out_buffer;
+ vf->virtchnl_version = *pver;
+
+ if (vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
+ vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
+ PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
+ " than (%u.%u) to support Adapative VF",
+ VIRTCHNL_VERSION_MAJOR_START,
+ VIRTCHNL_VERSION_MAJOR_START);
+ return -1;
+ } else if (vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR &&
+ vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR)) {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->virtchnl_version.major,
+ vf->virtchnl_version.minor,
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Peer is supported PF host");
+ return 0;
+}
+
+int
+iavf_get_vf_resource(struct iavf_adapter *adapter)
+{
+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ uint32_t caps, len;
+ int err, i;
+
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF |
+ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+
+ args.in_args = (uint8_t *)&caps;
+ args.in_args_size = sizeof(caps);
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_GET_VF_RESOURCE");
+ return -1;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource) +
+ IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+
+ rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ /* parse VF config message back from PF*/
+ iavf_vf_parse_hw_config(hw, vf->vf_res);
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ return -1;
+ }
+
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = adapter;
+
+ return 0;
+}
+
+int
+iavf_get_supported_rxdid(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ int ret;
+
+ args.ops = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
+ return ret;
+ }
+
+ vf->supported_rxdid =
+ ((struct virtchnl_supported_rxdids *)args.out_buffer)->supported_rxdids;
+
+ return 0;
+}
+
+int
+iavf_enable_queues(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct iavf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_ENABLE_QUEUES");
+ return err;
+ }
+ return 0;
+}
+
+int
+iavf_disable_queues(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct iavf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES");
+ return err;
+ }
+ return 0;
+}
+
+int
+iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct iavf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+ if (rx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
+ return err;
+}
+
+int
+iavf_configure_rss_lut(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_lut *rss_lut;
+ struct iavf_cmd_info args;
+ int len, err = 0;
+
+ len = sizeof(*rss_lut) + vf->vf_res->rss_lut_size - 1;
+ rss_lut = rte_zmalloc("rss_lut", len, 0);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ rss_lut->vsi_id = vf->vsi_res->vsi_id;
+ rss_lut->lut_entries = vf->vf_res->rss_lut_size;
+ rte_memcpy(rss_lut->lut, vf->rss_lut, vf->vf_res->rss_lut_size);
+
+ args.ops = VIRTCHNL_OP_CONFIG_RSS_LUT;
+ args.in_args = (u8 *)rss_lut;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_CONFIG_RSS_LUT");
+
+ rte_free(rss_lut);
+ return err;
+}
+
+int
+iavf_configure_rss_key(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_key *rss_key;
+ struct iavf_cmd_info args;
+ int len, err = 0;
+
+ len = sizeof(*rss_key) + vf->vf_res->rss_key_size - 1;
+ rss_key = rte_zmalloc("rss_key", len, 0);
+ if (!rss_key)
+ return -ENOMEM;
+
+ rss_key->vsi_id = vf->vsi_res->vsi_id;
+ rss_key->key_len = vf->vf_res->rss_key_size;
+ rte_memcpy(rss_key->key, vf->rss_key, vf->vf_res->rss_key_size);
+
+ args.ops = VIRTCHNL_OP_CONFIG_RSS_KEY;
+ args.in_args = (u8 *)rss_key;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_CONFIG_RSS_KEY");
+
+ rte_free(rss_key);
+ return err;
+}
+
+int
+iavf_configure_queues(struct iavf_adapter *adapter)
+{
+ struct iavf_rx_queue **rxq =
+ (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
+ struct iavf_tx_queue **txq =
+ (struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vsi_queue_config_info *vc_config;
+ struct virtchnl_queue_pair_info *vc_qp;
+ struct iavf_cmd_info args;
+ uint16_t i, size;
+ int err;
+
+ size = sizeof(*vc_config) +
+ sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
+ vc_config = rte_zmalloc("cfg_queue", size, 0);
+ if (!vc_config)
+ return -ENOMEM;
+
+ vc_config->vsi_id = vf->vsi_res->vsi_id;
+ vc_config->num_queue_pairs = vf->num_queue_pairs;
+
+ for (i = 0, vc_qp = vc_config->qpair;
+ i < vf->num_queue_pairs;
+ i++, vc_qp++) {
+ vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
+ vc_qp->txq.queue_id = i;
+ /* Virtchnnl configure queues by pairs */
+ if (i < adapter->eth_dev->data->nb_tx_queues) {
+ vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
+ vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
+ }
+ vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
+ vc_qp->rxq.queue_id = i;
+ vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
+ /* Virtchnnl configure queues by pairs */
+ if (i < adapter->eth_dev->data->nb_rx_queues) {
+ vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+ vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+ }
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
+ PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
+ "Queue[%d]", vc_qp->rxq.rxdid, i);
+ } else {
+ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+ PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
+ "Queue[%d]", vc_qp->rxq.rxdid, i);
+ }
+#else
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
+ PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
+ "Queue[%d]", vc_qp->rxq.rxdid, i);
+ } else {
+ PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
+ return -1;
+ }
+#endif
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.in_args = (uint8_t *)vc_config;
+ args.in_args_size = size;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+ rte_free(vc_config);
+ return err;
+}
+
+int
+iavf_config_irq_map(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_irq_map_info *map_info;
+ struct virtchnl_vector_map *vecmap;
+ struct iavf_cmd_info args;
+ int len, i, err;
+
+ len = sizeof(struct virtchnl_irq_map_info) +
+ sizeof(struct virtchnl_vector_map) * vf->nb_msix;
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->num_vectors = vf->nb_msix;
+ for (i = 0; i < vf->nb_msix; i++) {
+ vecmap = &map_info->vecmap[i];
+ vecmap->vsi_id = vf->vsi_res->vsi_id;
+ vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
+ vecmap->vector_id = vf->msix_base + i;
+ vecmap->txq_map = 0;
+ vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ }
+
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
+
+ rte_free(map_info);
+ return err;
+}
+
+void
+iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct rte_ether_addr *addr;
+ struct iavf_cmd_info args;
+ int len, err, i, j;
+ int next_begin = 0;
+ int begin = 0;
+
+ do {
+ j = 0;
+ len = sizeof(struct virtchnl_ether_addr_list);
+ for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) {
+ addr = &adapter->eth_dev->data->mac_addrs[i];
+ if (rte_is_zero_ether_addr(addr))
+ continue;
+ len += sizeof(struct virtchnl_ether_addr);
+ if (len >= IAVF_AQ_BUF_SZ) {
+ next_begin = i + 1;
+ break;
+ }
+ }
+
+ list = rte_zmalloc("iavf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
+
+ for (i = begin; i < next_begin; i++) {
+ addr = &adapter->eth_dev->data->mac_addrs[i];
+ if (rte_is_zero_ether_addr(addr))
+ continue;
+ rte_memcpy(list->list[j].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ j++;
+ }
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = j;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = (uint8_t *)list;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ begin = next_begin;
+ } while (begin < IAVF_NUM_MACADDR_MAX);
+}
+
+int
+iavf_query_stats(struct iavf_adapter *adapter,
+ struct virtchnl_eth_stats **pstats)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select q_stats;
+ struct iavf_cmd_info args;
+ int err;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = vf->vsi_res->vsi_id;
+ args.ops = VIRTCHNL_OP_GET_STATS;
+ args.in_args = (uint8_t *)&q_stats;
+ args.in_args_size = sizeof(q_stats);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ *pstats = NULL;
+ return err;
+ }
+ *pstats = (struct virtchnl_eth_stats *)args.out_buffer;
+ return 0;
+}
+
+int
+iavf_config_promisc(struct iavf_adapter *adapter,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_promisc_info promisc;
+ struct iavf_cmd_info args;
+ int err;
+
+ promisc.flags = 0;
+ promisc.vsi_id = vf->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
+
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.in_args = (uint8_t *)&promisc;
+ args.in_args_size = sizeof(promisc);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "fail to execute command CONFIG_PROMISCUOUS_MODE");
+ return err;
+}
+
+int
+iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
+ bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ sizeof(struct virtchnl_ether_addr)];
+ struct iavf_cmd_info args;
+ int err;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+}
+
+int
+iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
+{
+ struct virtchnl_vlan_filter_list *vlan_list;
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ struct iavf_cmd_info args;
+ int err;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
+
+ return err;
+}
+
+int
+iavf_fdir_add(struct iavf_adapter *adapter,
+ struct iavf_fdir_conf *filter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_fdir_add *fdir_ret;
+
+ struct iavf_cmd_info args;
+ int err;
+
+ filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+ filter->add_fltr.validate_only = 0;
+
+ args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+ args.in_args = (uint8_t *)(&filter->add_fltr);
+ args.in_args_size = sizeof(*(&filter->add_fltr));
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
+ return err;
+ }
+
+ fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+ filter->flow_id = fdir_ret->flow_id;
+
+ if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+ PMD_DRV_LOG(INFO,
+ "Succeed in adding rule request by PF");
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add rule request due to no hw resource");
+ return -1;
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add rule request due to the rule is already existed");
+ return -1;
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add rule request due to the rule is conflict with existing rule");
+ return -1;
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add rule request due to the hw doesn't support");
+ return -1;
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add rule request due to time out for programming");
+ return -1;
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Failed to add rule request due to other reasons");
+ return -1;
+ }
+
+ return 0;
+};
+
+int
+iavf_fdir_del(struct iavf_adapter *adapter,
+ struct iavf_fdir_conf *filter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_fdir_del *fdir_ret;
+
+ struct iavf_cmd_info args;
+ int err;
+
+ filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
+ filter->del_fltr.flow_id = filter->flow_id;
+
+ args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
+ args.in_args = (uint8_t *)(&filter->del_fltr);
+ args.in_args_size = sizeof(filter->del_fltr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
+ return err;
+ }
+
+ fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
+
+ if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+ PMD_DRV_LOG(INFO,
+ "Succeed in deleting rule request by PF");
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete rule request due to this rule doesn't exist");
+ return -1;
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete rule request due to time out for programming");
+ return -1;
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete rule request due to other reasons");
+ return -1;
+ }
+
+ return 0;
+};
+
+int
+iavf_fdir_check(struct iavf_adapter *adapter,
+ struct iavf_fdir_conf *filter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_fdir_add *fdir_ret;
+
+ struct iavf_cmd_info args;
+ int err;
+
+ filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
+ filter->add_fltr.validate_only = 1;
+
+ args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
+ args.in_args = (uint8_t *)(&filter->add_fltr);
+ args.in_args_size = sizeof(*(&filter->add_fltr));
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+ return err;
+ }
+
+ fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
+
+ if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
+ PMD_DRV_LOG(INFO,
+ "Succeed in checking rule request by PF");
+ } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
+ PMD_DRV_LOG(ERR,
+ "Failed to check rule request due to parameters validation"
+ " or HW doesn't support");
+ return -1;
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Failed to check rule request due to other reasons");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
+ struct virtchnl_rss_cfg *rss_cfg, bool add)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ int err;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = add ? VIRTCHNL_OP_ADD_RSS_CFG :
+ VIRTCHNL_OP_DEL_RSS_CFG;
+ args.in_args = (u8 *)rss_cfg;
+ args.in_args_size = sizeof(*rss_cfg);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of %s",
+ add ? "OP_ADD_RSS_CFG" :
+ "OP_DEL_RSS_INPUT_CFG");
+
+ return err;
+}
diff --git a/src/spdk/dpdk/drivers/net/iavf/meson.build b/src/spdk/dpdk/drivers/net/iavf/meson.build
new file mode 100644
index 000000000..a3fad363d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+cflags += ['-Wno-strict-aliasing']
+
+includes += include_directories('../../common/iavf')
+deps += ['common_iavf']
+
+sources = files(
+ 'iavf_ethdev.c',
+ 'iavf_rxtx.c',
+ 'iavf_vchnl.c',
+ 'iavf_generic_flow.c',
+ 'iavf_fdir.c',
+ 'iavf_hash.c',
+)
+
+if arch_subdir == 'x86'
+ sources += files('iavf_rxtx_vec_sse.c')
+
+ # compile AVX2 version if either:
+ # a. we have AVX supported in minimum instruction set baseline
+ # b. it's not minimum instruction set, but supported by compiler
+ if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2')
+ cflags += ['-DCC_AVX2_SUPPORT']
+ sources += files('iavf_rxtx_vec_avx2.c')
+ elif cc.has_argument('-mavx2')
+ cflags += ['-DCC_AVX2_SUPPORT']
+ iavf_avx2_lib = static_library('iavf_avx2_lib',
+ 'iavf_rxtx_vec_avx2.c',
+ dependencies: [static_rte_ethdev,
+ static_rte_kvargs, static_rte_hash],
+ include_directories: includes,
+ c_args: [cflags, '-mavx2'])
+ objs += iavf_avx2_lib.extract_objects('iavf_rxtx_vec_avx2.c')
+ endif
+endif
diff --git a/src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map b/src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};