summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/hinic
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/net/hinic')
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/Makefile67
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h279
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h135
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c1041
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h271
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c244
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h145
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h469
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c855
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h243
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c490
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h98
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c1531
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h491
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c554
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h125
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c933
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h93
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c804
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h119
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c2121
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h944
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c907
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h264
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c180
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h137
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/base/meson.build37
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c3257
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h352
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c3272
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c1089
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h131
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c1334
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h148
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map3
36 files changed, 23177 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/hinic/Makefile b/src/spdk/dpdk/drivers/net/hinic/Makefile
new file mode 100644
index 000000000..87fd843e4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/Makefile
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Huawei Technologies Co., Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_hinic.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+CFLAGS += -D__ARM64_NEON__
+else ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+CFLAGS += -D__X86_64_SSE__
+endif
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_hash
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lpthread
+
+EXPORT_MAP := rte_pmd_hinic_version.map
+
+#
+# CFLAGS for 32-bits platforms
+#
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS += -diag-disable 2259
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+endif
+endif
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_api_cmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_cfg.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_cmdq.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_eqs.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_hwdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_hwif.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mgmt.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_niccfg.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_nicio.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_wq.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_flow.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_tx.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h
new file mode 100644
index 000000000..921b83012
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_COMPAT_H_
+#define _HINIC_COMPAT_H_
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_memzone.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+#ifndef dma_addr_t
+typedef uint64_t dma_addr_t;
+#endif
+
+#ifndef gfp_t
+#define gfp_t unsigned
+#endif
+
+#ifndef bool
+#define bool int
+#endif
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+
+#ifndef false
+#define false (0)
+#endif
+
+#ifndef true
+#define true (1)
+#endif
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#define HINIC_ERROR (-1)
+#define HINIC_OK (0)
+
+#ifndef BIT
+#define BIT(n) (1 << (n))
+#endif
+
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((u32)(n))
+
+/* Returns X / Y, rounding up. X must be nonnegative to round correctly. */
+#define DIV_ROUND_UP(X, Y) (((X) + ((Y) - 1)) / (Y))
+
+/* Returns X rounded up to the nearest multiple of Y. */
+#define ROUND_UP(X, Y) (DIV_ROUND_UP(X, Y) * (Y))
+
+#undef ALIGN
+#define ALIGN(x, a) RTE_ALIGN(x, a)
+
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+
+/* Reported driver name. */
+#define HINIC_DRIVER_NAME "net_hinic"
+
+extern int hinic_logtype;
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hinic_logtype, \
+ HINIC_DRIVER_NAME": " fmt "\n", ##args)
+
+/* common definition */
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+#define ETH_HLEN 14
+#define ETH_CRC_LEN 4
+#define VLAN_PRIO_SHIFT 13
+#define VLAN_N_VID 4096
+
+/* bit order interface */
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+/* virt memory and dma phy memory */
+#define __iomem
+#define GFP_KERNEL RTE_MEMZONE_IOVA_CONTIG
+#define HINIC_PAGE_SHIFT 12
+#define HINIC_PAGE_SIZE RTE_PGSIZE_4K
+#define HINIC_MEM_ALLOC_ALIGN_MIN 8
+
+#define HINIC_PAGE_SIZE_DPDK 6
+
+static inline int hinic_test_bit(int nr, volatile unsigned long *addr)
+{
+ int res;
+
+ res = ((*addr) & (1UL << nr)) != 0;
+ return res;
+}
+
+static inline void hinic_set_bit(unsigned int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+static inline void hinic_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+static inline int hinic_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+
+ return __sync_fetch_and_and(addr, ~mask) & mask;
+}
+
+static inline int hinic_test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+
+ return __sync_fetch_and_or(addr, mask) & mask;
+}
+
+void *dma_zalloc_coherent(void *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned int socket_id);
+
+void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int socket_id);
+
+void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int socket_id);
+
+void dma_free_coherent(void *dev, size_t size, void *virt, dma_addr_t phys);
+
+/* dma pool alloc and free */
+#define pci_pool dma_pool
+#define pci_pool_alloc(pool, handle) dma_pool_alloc(pool, handle)
+#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
+
+struct dma_pool *dma_pool_create(const char *name, void *dev, size_t size,
+ size_t align, size_t boundary);
+void dma_pool_destroy(struct dma_pool *pool);
+void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr);
+void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma);
+
+#define kzalloc(size, flag) rte_zmalloc(NULL, size, HINIC_MEM_ALLOC_ALIGN_MIN)
+#define kzalloc_aligned(size, flag) rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE)
+#define kfree(ptr) rte_free(ptr)
+
+/* mmio interface */
+static inline void writel(u32 value, volatile void *addr)
+{
+ *(volatile u32 *)addr = value;
+}
+
+static inline u32 readl(const volatile void *addr)
+{
+ return *(const volatile u32 *)addr;
+}
+
+#define __raw_writel(value, reg) writel((value), (reg))
+#define __raw_readl(reg) readl((reg))
+
+/* Spinlock related interface */
+#define hinic_spinlock_t rte_spinlock_t
+
+#define spinlock_t rte_spinlock_t
+#define spin_lock_init(spinlock_prt) rte_spinlock_init(spinlock_prt)
+#define spin_lock_deinit(lock)
+#define spin_lock(spinlock_prt) rte_spinlock_lock(spinlock_prt)
+#define spin_unlock(spinlock_prt) rte_spinlock_unlock(spinlock_prt)
+
+static inline unsigned long get_timeofday_ms(void)
+{
+ struct timeval tv;
+
+ (void)gettimeofday(&tv, NULL);
+
+ return (unsigned long)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+#define jiffies get_timeofday_ms()
+#define msecs_to_jiffies(ms) (ms)
+#define time_before(now, end) ((now) < (end))
+
+/* misc kernel utils */
+static inline u16 ilog2(u32 n)
+{
+ u16 res = 0;
+
+ while (n > 1) {
+ n >>= 1;
+ res++;
+ }
+
+ return res;
+}
+
+static inline int hinic_mutex_init(pthread_mutex_t *pthreadmutex,
+ const pthread_mutexattr_t *mattr)
+{
+ int err;
+
+ err = pthread_mutex_init(pthreadmutex, mattr);
+ if (unlikely(err))
+ PMD_DRV_LOG(ERR, "Fail to initialize mutex, error: %d", err);
+
+ return err;
+}
+
+static inline int hinic_mutex_destroy(pthread_mutex_t *pthreadmutex)
+{
+ int err;
+
+ err = pthread_mutex_destroy(pthreadmutex);
+ if (unlikely(err))
+ PMD_DRV_LOG(ERR, "Fail to destroy mutex, error: %d", err);
+
+ return err;
+}
+
+static inline int hinic_mutex_lock(pthread_mutex_t *pthreadmutex)
+{
+ int err;
+
+ err = pthread_mutex_lock(pthreadmutex);
+ if (!err) {
+ return err;
+ } else if (err == EOWNERDEAD) {
+ PMD_DRV_LOG(ERR, "Mutex lock failed. (ErrorNo=%d)", errno);
+#if defined(__GLIBC__)
+#if __GLIBC_PREREQ(2, 12)
+ (void)pthread_mutex_consistent(pthreadmutex);
+#else
+ (void)pthread_mutex_consistent_np(pthreadmutex);
+#endif
+#else
+ (void)pthread_mutex_consistent(pthreadmutex);
+#endif
+ } else {
+ PMD_DRV_LOG(ERR, "Mutex lock failed. (ErrorNo=%d)", errno);
+ }
+
+ return err;
+}
+
+static inline int hinic_mutex_unlock(pthread_mutex_t *pthreadmutex)
+{
+ return pthread_mutex_unlock(pthreadmutex);
+}
+
+#endif /* _HINIC_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h
new file mode 100644
index 000000000..2626f6960
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_CSR_H_
+#define _HINIC_CSR_H_
+
+#define HINIC_CSR_GLOBAL_BASE_ADDR 0x4000
+
+/* HW interface registers */
+#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
+#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
+#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
+#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+#define HINIC_CSR_DMA_ATTR_TBL_BASE 0xC80
+
+#define HINIC_ELECTION_BASE 0x200
+
+#define HINIC_CSR_DMA_ATTR_TBL_STRIDE 0x4
+#define HINIC_CSR_DMA_ATTR_TBL_ADDR(idx) \
+ (HINIC_CSR_DMA_ATTR_TBL_BASE \
+ + (idx) * HINIC_CSR_DMA_ATTR_TBL_STRIDE)
+
+#define HINIC_PPF_ELECTION_STRIDE 0x4
+#define HINIC_CSR_MAX_PORTS 4
+#define HINIC_CSR_PPF_ELECTION_ADDR \
+ (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE)
+
+/* MSI-X registers */
+#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
+#define HINIC_CSR_MSIX_CNT_BASE 0x2004
+
+#define HINIC_CSR_MSIX_STRIDE 0x8
+
+#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
+ (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
+ (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+/* EQ registers */
+#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
+
+#define HINIC_EQ_MTT_OFF_STRIDE 0x40
+
+#define HINIC_CSR_AEQ_MTT_OFF(id) \
+ (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)(HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)(HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
+#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
+#define HINIC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08
+#define HINIC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C
+
+#define HINIC_EQ_OFF_STRIDE 0x80
+
+#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
+ (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
+ (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+/* API CMD registers */
+#define HINIC_CSR_API_CMD_BASE 0xF000
+
+#define HINIC_CSR_API_CMD_STRIDE 0x100
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_0_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+/* VF control registers in pf */
+#define HINIC_PF_CSR_VF_FLUSH_BASE 0x1F400
+#define HINIC_PF_CSR_VF_FLUSH_STRIDE 0x4
+
+#define HINIC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C
+
+#define HINIC_ICPL_RESERVD_ADDR 0x9204
+
+#define HINIC_PF_CSR_VF_FLUSH_OFF(idx) \
+ (HINIC_PF_CSR_VF_FLUSH_BASE + (idx) * HINIC_PF_CSR_VF_FLUSH_STRIDE)
+
+#endif /* _HINIC_CSR_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c
new file mode 100644
index 000000000..b72edc065
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c
@@ -0,0 +1,1041 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_cmd.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_api_cmd.h"
+
+#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U
+
+#define API_CMD_CELL_DESC_SIZE 8
+#define API_CMD_CELL_DATA_ADDR_SIZE 8
+
+#define API_CHAIN_NUM_CELLS 32
+#define API_CHAIN_CELL_SIZE 128
+#define API_CHAIN_RSP_DATA_SIZE 128
+
+#define API_CHAIN_CELL_ALIGNMENT 8
+
+#define API_CMD_TIMEOUT 10000
+
+#define API_CMD_BUF_SIZE 2048UL
+
+#define API_CMD_NODE_ALIGN_SIZE 512UL
+#define API_PAYLOAD_ALIGN_SIZE 64
+
+#define API_CHAIN_RESP_ALIGNMENT 64ULL
+
+#define COMPLETION_TIMEOUT_DEFAULT 1000UL
+#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U
+
+#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val)))
+
+#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token))
+#define WRITE_API_CMD_PRIV_DATA(id) ((id) << 16)
+
+#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
+
+#undef SIZE_4BYTES
+#undef SIZE_8BYTES
+#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2)
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3)
+
+enum api_cmd_data_format {
+ SGL_DATA = 1,
+};
+
+enum api_cmd_type {
+ API_CMD_WRITE_TYPE = 0,
+ API_CMD_READ_TYPE = 1,
+};
+
+enum api_cmd_bypass {
+ NOT_BYPASS = 0,
+ BYPASS = 1,
+};
+
+enum api_cmd_resp_aeq {
+ NOT_TRIGGER = 0,
+ TRIGGER = 1,
+};
+
+static u8 xor_chksum_set(void *data)
+{
+ int idx;
+ u8 checksum = 0;
+ u8 *val = (u8 *)data;
+
+ for (idx = 0; idx < 7; idx++)
+ checksum ^= val[idx];
+
+ return checksum;
+}
+
+static void set_prod_idx(struct hinic_api_cmd_chain *chain)
+{
+ enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 hw_prod_idx_addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
+ u32 prod_idx = chain->prod_idx;
+
+ hinic_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx);
+}
+
+static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+}
+
+static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ PMD_DRV_LOG(ERR, "chain type: 0x%x", chain->chain_type);
+ PMD_DRV_LOG(ERR, "chain hw cpld error: 0x%x",
+ HINIC_API_CMD_STATUS_GET(val, CPLD_ERR));
+ PMD_DRV_LOG(ERR, "chain hw check error: 0x%x",
+ HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR));
+ PMD_DRV_LOG(ERR, "chain hw current fsm: 0x%x",
+ HINIC_API_CMD_STATUS_GET(val, FSM));
+ PMD_DRV_LOG(ERR, "chain hw current ci: 0x%x",
+ HINIC_API_CMD_STATUS_GET(val, CONS_IDX));
+
+ addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+ PMD_DRV_LOG(ERR, "Chain hw current pi: 0x%x", val);
+}
+
+/**
+ * chain_busy - check if the chain is still processing last requests
+ * @chain: chain to check
+ */
+static int chain_busy(struct hinic_api_cmd_chain *chain)
+{
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ chain->cons_idx = get_hw_cons_idx(chain);
+ if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) {
+ PMD_DRV_LOG(ERR, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d",
+ chain->chain_type, chain->cons_idx,
+ chain->prod_idx);
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown Chain type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * get_cell_data_size - get the data size of specific cell type
+ * @type: chain type
+ */
+static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type,
+ __rte_unused u16 cmd_size)
+{
+ u16 cell_data_size = 0;
+
+ switch (type) {
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+ default:
+ break;
+ }
+
+ return cell_data_size;
+}
+
+/**
+ * prepare_cell_ctrl - prepare the ctrl of the cell for the command
+ * @cell_ctrl: the control of the cell to set the control into it
+ * @cell_len: the size of the cell
+ */
+static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len)
+{
+ u64 ctrl;
+ u8 chksum;
+
+ /* Read Modify Write */
+ ctrl = be64_to_cpu(*cell_ctrl);
+ ctrl = HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, CELL_LEN) &
+ HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, RD_DMA_ATTR_OFF) &
+ HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, WR_DMA_ATTR_OFF) &
+ HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, XOR_CHKSUM);
+
+ ctrl |= HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) |
+ HINIC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) |
+ HINIC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF);
+
+ chksum = xor_chksum_set(&ctrl);
+
+ ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ *cell_ctrl = cpu_to_be64(ctrl);
+}
+
+/**
+ * prepare_api_cmd - prepare API CMD command
+ * @chain: chain for the command
+ * @cell: the cell of the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ */
+static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ u32 priv;
+
+ cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ /* Clear all the members before changes */
+ cell->desc = HINIC_API_CMD_DESC_CLEAR(cell->desc, API_TYPE) &
+ HINIC_API_CMD_DESC_CLEAR(cell->desc, RD_WR) &
+ HINIC_API_CMD_DESC_CLEAR(cell->desc, MGMT_BYPASS) &
+ HINIC_API_CMD_DESC_CLEAR(cell->desc, RESP_AEQE_EN) &
+ HINIC_API_CMD_DESC_CLEAR(cell->desc, DEST) &
+ HINIC_API_CMD_DESC_CLEAR(cell->desc, SIZE) &
+ HINIC_API_CMD_DESC_CLEAR(cell->desc, XOR_CHKSUM);
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown Chain type");
+ return;
+ }
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
+ HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
+ cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
+ XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ cell->desc = cpu_to_be64(cell->desc);
+
+ memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
+}
+
+/**
+ * prepare_cell - prepare cell ctrl and cmd in the current producer cell
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ */
+static void prepare_cell(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell *curr_node;
+ u16 cell_size;
+
+ curr_node = chain->curr_node;
+
+ cell_size = get_cell_data_size(chain->chain_type, cmd_size);
+
+ prepare_cell_ctrl(&curr_node->ctrl, cell_size);
+ prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size);
+}
+
+static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
+{
+ chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
+}
+
+static void issue_api_cmd(struct hinic_api_cmd_chain *chain)
+{
+ set_prod_idx(chain);
+}
+
+/**
+ * api_cmd_status_update - update the status of the chain
+ * @chain: chain to update
+ */
+static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_status *wb_status;
+ enum hinic_api_cmd_chain_type chain_type;
+ u64 status_header;
+ u32 buf_desc;
+
+ wb_status = chain->wb_status;
+
+ buf_desc = be32_to_cpu(wb_status->buf_desc);
+ if (HINIC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) {
+ PMD_DRV_LOG(ERR, "API CMD status Xor check error");
+ return;
+ }
+
+ status_header = be64_to_cpu(wb_status->header);
+ chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
+ if (chain_type >= HINIC_API_CMD_MAX)
+ return;
+
+ if (chain_type != chain->chain_type)
+ return;
+
+ chain->cons_idx = HINIC_API_CMD_STATUS_GET(buf_desc, CONS_IDX);
+}
+
+/**
+ * wait_for_status_poll - wait for write to mgmt command to complete
+ * @chain: the chain of the command
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
+{
+ unsigned long end;
+ int err = -ETIMEDOUT;
+
+ end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
+ do {
+ api_cmd_status_update(chain);
+
+ /* SYNC API CMD cmd should start after prev cmd finished */
+ if (chain->cons_idx == chain->prod_idx) {
+ err = 0;
+ break;
+ }
+
+ rte_delay_us(10);
+ } while (time_before(jiffies, end));
+
+ return err;
+}
+
+/**
+ * wait_for_api_cmd_completion - wait for command to complete
+ * @chain: chain for the command
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain,
+ __rte_unused struct hinic_api_cmd_cell_ctxt *ctxt,
+ __rte_unused void *ack, __rte_unused u16 ack_size)
+{
+ int err = 0;
+
+ /* poll api cmd status for debug*/
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ err = wait_for_status_poll(chain);
+ if (err)
+ PMD_DRV_LOG(ERR, "API CMD poll status timeout");
+ break;
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown API CMD chain type");
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ dump_api_chain_reg(chain);
+
+ return err;
+}
+
+static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell_ctxt *ctxt)
+{
+ ctxt->status = 1;
+ ctxt->saved_prod_idx = chain->prod_idx;
+}
+
+/**
+ * api_cmd - API CMD command
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ * @ack: pointer to messages to response
+ * @ack_size: the size of ack message
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size, void *ack, u16 ack_size)
+{
+ struct hinic_api_cmd_cell_ctxt *ctxt;
+
+ spin_lock(&chain->async_lock);
+
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+ if (chain_busy(chain)) {
+ spin_unlock(&chain->async_lock);
+ return -EBUSY;
+ }
+ update_api_cmd_ctxt(chain, ctxt);
+
+ prepare_cell(chain, dest, cmd, cmd_size);
+
+ cmd_chain_prod_idx_inc(chain);
+
+ rte_wmb();/* issue the command */
+
+ issue_api_cmd(chain);
+
+ /* incremented prod idx, update ctxt */
+ chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr;
+
+ spin_unlock(&chain->async_lock);
+
+ return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size);
+}
+
+/**
+ * hinic_api_cmd_write - Write API CMD command
+ * @chain: chain for write command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ * Return: 0 - success, negative - failure
+ */
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size)
+{
+ /* Verify the chain type */
+ return api_cmd(chain, dest, cmd, size, NULL, 0);
+}
+
+/**
+ * api_cmd_hw_restart - restart the chain in the HW
+ * @chain: the API CMD specific chain to restart
+ */
+static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ unsigned long end;
+ u32 reg_addr, val;
+ int err;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
+ val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
+
+ hinic_hwif_write_reg(hwif, reg_addr, val);
+
+ end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
+ err = -ETIMEDOUT;
+ do {
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
+ err = 0;
+ break;
+ }
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ return err;
+}
+
+/**
+ * api_cmd_ctrl_init - set the control register of a chain
+ * @chain: the API CMD specific chain to set control register for
+ */
+static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 reg_addr, ctrl;
+ u32 cell_size;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ cell_size = (u32)ilog2(chain->cell_size >>
+ API_CMD_CHAIN_CELL_SIZE_SHIFT);
+
+ ctrl = hinic_hwif_read_reg(hwif, reg_addr);
+
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) |
+ HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, reg_addr, ctrl);
+}
+
+/**
+ * api_cmd_set_status_addr - set the status address of a chain in the HW
+ * @chain: the API CMD specific chain to set status address for
+ */
+static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_set_num_cells - set the number cells of a chain in the HW
+ * @chain: the API CMD specific chain to set the number of cells for
+ */
+static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
+ val = chain->num_cells;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_head_init - set the head cell of a chain in the HW
+ * @chain: the API CMD specific chain to set the head for
+ */
+static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * wait_for_ready_chain - wait for the chain to be ready
+ * @chain: the API CMD specific chain to wait for
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ unsigned long end;
+ u32 addr, val;
+ u32 hw_cons_idx;
+ int err;
+
+ end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ err = -ETIMEDOUT;
+ do {
+ val = hinic_hwif_read_reg(hwif, addr);
+ hw_cons_idx = HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+
+ /* Wait for HW cons idx to be updated */
+ if (hw_cons_idx == chain->cons_idx) {
+ err = 0;
+ break;
+ }
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ return err;
+}
+
+/**
+ * api_cmd_chain_hw_clean - clean the HW
+ * @chain: the API CMD specific chain
+ */
+static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, ctrl;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ ctrl = hinic_hwif_read_reg(hwif, addr);
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl);
+}
+
+/**
+ * api_cmd_chain_hw_init - initialize the chain in the HW
+ *(initialize API command csr)
+ * @chain: the API CMD specific chain to initialize in HW
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
+{
+ api_cmd_chain_hw_clean(chain);
+
+ api_cmd_set_status_addr(chain);
+
+ if (api_cmd_hw_restart(chain)) {
+ PMD_DRV_LOG(ERR, "Restart api_cmd_hw failed");
+ return -EBUSY;
+ }
+
+ api_cmd_ctrl_init(chain);
+ api_cmd_set_num_cells(chain);
+ api_cmd_head_init(chain);
+
+ return wait_for_ready_chain(chain);
+}
+
+/**
+ * free_cmd_buf - free the dma buffer of API CMD command
+ * @chain: the API CMD specific chain of the cmd
+ * @cell_idx: the cell index of the cmd
+ */
+static void free_cmd_buf(struct hinic_api_cmd_chain *chain, u32 cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ void *dev = chain->hwdev;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ dma_free_coherent(dev, (API_CMD_BUF_SIZE + API_PAYLOAD_ALIGN_SIZE),
+ cell_ctxt->api_cmd_vaddr_free,
+ cell_ctxt->api_cmd_paddr_free);
+}
+
+/**
+ * alloc_cmd_buf - allocate a dma buffer for API CMD command
+ * @chain: the API CMD specific chain for the cmd
+ * @cell: the cell in the HW for the cmd
+ * @cell_idx: the index of the cell
+ * Return: 0 - success, negative - failure
+ */
+static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell, u32 cell_idx)
+{
+ void *dev = chain->hwdev;
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ dma_addr_t cmd_paddr = 0;
+ void *cmd_vaddr;
+ void *cmd_vaddr_alloc;
+ int err = 0;
+
+ cmd_vaddr_alloc = dma_zalloc_coherent(dev, (API_CMD_BUF_SIZE +
+ API_PAYLOAD_ALIGN_SIZE),
+ &cmd_paddr, SOCKET_ID_ANY);
+ if (!cmd_vaddr_alloc) {
+ PMD_DRV_LOG(ERR, "Allocate API CMD dma memory failed");
+ return -ENOMEM;
+ }
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->api_cmd_paddr_free = cmd_paddr;
+ cell_ctxt->api_cmd_vaddr_free = cmd_vaddr_alloc;
+ cmd_vaddr = PTR_ALIGN(cmd_vaddr_alloc, API_PAYLOAD_ALIGN_SIZE);
+ cmd_paddr = cmd_paddr + ((u64)cmd_vaddr - (u64)cmd_vaddr_alloc);
+
+ cell_ctxt->api_cmd_vaddr = cmd_vaddr;
+ cell_ctxt->api_cmd_paddr = cmd_paddr;
+
+ /* set the cmd DMA address in the cell */
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown API CMD chain type");
+ free_cmd_buf(chain, cell_idx);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_create_cell - create API CMD cell of specific chain
+ * @chain: the API CMD specific chain to create its cell
+ * @cell_idx: the cell index to create
+ * @pre_node: previous cell
+ * @node_vaddr: the virt addr of the cell
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
+ u32 cell_idx,
+ struct hinic_api_cmd_cell *pre_node,
+ struct hinic_api_cmd_cell **node_vaddr)
+{
+ void *dev = chain->hwdev;
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *node;
+ dma_addr_t node_paddr = 0;
+ void *node_vaddr_alloc;
+ int err = 0;
+
+ node_vaddr_alloc = dma_zalloc_coherent(dev, (chain->cell_size +
+ API_CMD_NODE_ALIGN_SIZE),
+ &node_paddr, SOCKET_ID_ANY);
+ if (!node_vaddr_alloc) {
+ PMD_DRV_LOG(ERR, "Allocate dma API CMD cell failed");
+ return -ENOMEM;
+ }
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->cell_vaddr_free = node_vaddr_alloc;
+ cell_ctxt->cell_paddr_free = node_paddr;
+ node = (struct hinic_api_cmd_cell *)PTR_ALIGN(node_vaddr_alloc,
+ API_CMD_NODE_ALIGN_SIZE);
+ node_paddr = node_paddr + ((u64)node - (u64)node_vaddr_alloc);
+
+ node->read.hw_wb_resp_paddr = 0;
+
+ cell_ctxt->cell_vaddr = node;
+ cell_ctxt->cell_paddr = node_paddr;
+
+ if (!pre_node) {
+ chain->head_node = node;
+ chain->head_cell_paddr = node_paddr;
+ } else {
+ /* The data in the HW should be in Big Endian Format */
+ pre_node->next_cell_paddr = cpu_to_be64(node_paddr);
+ }
+
+ /* Driver software should make sure that there is an empty
+ * API command cell at the end the chain
+ */
+ node->next_cell_paddr = 0;
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ err = alloc_cmd_buf(chain, node, cell_idx);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
+ goto alloc_cmd_buf_err;
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported API CMD chain type");
+ err = -EINVAL;
+ goto alloc_cmd_buf_err;
+ }
+
+ *node_vaddr = node;
+
+ return 0;
+
+alloc_cmd_buf_err:
+ dma_free_coherent(dev, (chain->cell_size + API_CMD_NODE_ALIGN_SIZE),
+ node_vaddr_alloc, cell_ctxt->cell_paddr_free);
+
+ return err;
+}
+
+/**
+ * api_cmd_destroy_cell - destroy API CMD cell of specific chain
+ * @chain: the API CMD specific chain to destroy its cell
+ * @cell_idx: the cell to destroy
+ */
+static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain,
+ u32 cell_idx)
+{
+ void *dev = chain->hwdev;
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *node;
+ dma_addr_t node_paddr;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ node = (struct hinic_api_cmd_cell *)(cell_ctxt->cell_vaddr_free);
+ node_paddr = cell_ctxt->cell_paddr_free;
+
+ if (cell_ctxt->api_cmd_vaddr) {
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_PMD_WRITE_TO_MGMT:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ free_cmd_buf(chain, cell_idx);
+ break;
+ default:
+ break;
+ }
+
+ dma_free_coherent(dev, (chain->cell_size +
+ API_CMD_NODE_ALIGN_SIZE),
+ node, node_paddr);
+ }
+}
+
+/**
+ * api_cmd_destroy_cells - destroy API CMD cells of specific chain
+ * @chain: the API CMD specific chain to destroy its cells
+ * @num_cells: number of cells to destroy
+ */
+static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain,
+ u32 num_cells)
+{
+ u32 cell_idx;
+
+ for (cell_idx = 0; cell_idx < num_cells; cell_idx++)
+ api_cmd_destroy_cell(chain, cell_idx);
+}
+
+/**
+ * api_cmd_create_cells - create API CMD cells for specific chain
+ * @chain: the API CMD specific chain
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
+ u32 cell_idx;
+ int err;
+
+ for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
+ err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Create API CMD cell failed");
+ goto create_cell_err;
+ }
+
+ pre_node = node;
+ }
+
+ if (!node) {
+ err = -EFAULT;
+ goto create_cell_err;
+ }
+
+ /* set the Final node to point on the start */
+ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
+
+ /* set the current node to be the head */
+ chain->curr_node = chain->head_node;
+ return 0;
+
+create_cell_err:
+ api_cmd_destroy_cells(chain, cell_idx);
+ return err;
+}
+
+/**
+ * api_chain_init - initialize API CMD specific chain
+ * @chain: the API CMD specific chain to initialize
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ */
+static int api_chain_init(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ void *dev = chain->hwdev;
+ size_t cell_ctxt_size;
+ int err;
+
+ chain->chain_type = attr->chain_type;
+ chain->num_cells = attr->num_cells;
+ chain->cell_size = attr->cell_size;
+ chain->rsp_size = attr->rsp_size;
+
+ chain->prod_idx = 0;
+ chain->cons_idx = 0;
+
+ spin_lock_init(&chain->async_lock);
+
+ cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
+ chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL);
+ if (!chain->cell_ctxt) {
+ PMD_DRV_LOG(ERR, "Allocate cell contexts for a chain failed");
+ err = -ENOMEM;
+ goto alloc_cell_ctxt_err;
+ }
+
+ chain->wb_status = (struct hinic_api_cmd_status *)
+ dma_zalloc_coherent(dev, sizeof(*chain->wb_status),
+ &chain->wb_status_paddr, SOCKET_ID_ANY);
+ if (!chain->wb_status) {
+ PMD_DRV_LOG(ERR, "Allocate DMA wb status failed");
+ err = -ENOMEM;
+ goto alloc_wb_status_err;
+ }
+
+ return 0;
+
+alloc_wb_status_err:
+ kfree(chain->cell_ctxt);
+
+alloc_cell_ctxt_err:
+
+ return err;
+}
+
+/**
+ * api_chain_free - free API CMD specific chain
+ * @chain: the API CMD specific chain to free
+ */
+static void api_chain_free(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev;
+
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+ kfree(chain->cell_ctxt);
+}
+
+/**
+ * api_cmd_create_chain - create API CMD specific chain
+ * @cmd_chain: the API CMD specific chain to create
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ struct hinic_hwdev *hwdev = attr->hwdev;
+ struct hinic_api_cmd_chain *chain;
+ int err;
+
+ if (attr->num_cells & (attr->num_cells - 1)) {
+ PMD_DRV_LOG(ERR, "Invalid number of cells, must be power of 2");
+ return -EINVAL;
+ }
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (!chain) {
+ PMD_DRV_LOG(ERR, "Allocate memory for the chain failed");
+ return -ENOMEM;
+ }
+
+ chain->hwdev = hwdev;
+
+ err = api_chain_init(chain, attr);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize chain failed");
+ goto chain_init_err;
+ }
+
+ err = api_cmd_create_cells(chain);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Create cells for API CMD chain failed");
+ goto create_cells_err;
+ }
+
+ err = api_cmd_chain_hw_init(chain);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize chain hw info failed");
+ goto chain_hw_init_err;
+ }
+
+ *cmd_chain = chain;
+ return 0;
+
+chain_hw_init_err:
+ api_cmd_destroy_cells(chain, chain->num_cells);
+
+create_cells_err:
+ api_chain_free(chain);
+
+chain_init_err:
+ kfree(chain);
+ return err;
+}
+
+/**
+ * api_cmd_destroy_chain - destroy API CMD specific chain
+ * @chain: the API CMD specific chain to destroy
+ */
+static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
+{
+ api_cmd_destroy_cells(chain, chain->num_cells);
+ api_chain_free(chain);
+ kfree(chain);
+}
+
+/**
+ * hinic_api_cmd_init - Initialize all the API CMD chains
+ * @hwdev: the hardware interface of a pci function device
+ * @chain: the API CMD chains that will be initialized
+ * Return: 0 - success, negative - failure
+ */
+int hinic_api_cmd_init(struct hinic_hwdev *hwdev,
+ struct hinic_api_cmd_chain **chain)
+{
+ struct hinic_api_cmd_chain_attr attr;
+ enum hinic_api_cmd_chain_type chain_type, i;
+ int err;
+
+ attr.hwdev = hwdev;
+ attr.num_cells = API_CHAIN_NUM_CELLS;
+ attr.cell_size = API_CHAIN_CELL_SIZE;
+ attr.rsp_size = API_CHAIN_RSP_DATA_SIZE;
+
+ chain_type = HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU;
+ for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
+ attr.chain_type = chain_type;
+ err = api_cmd_create_chain(&chain[chain_type], &attr);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Create chain %d failed",
+ chain_type);
+ goto create_chain_err;
+ }
+ }
+
+ return 0;
+
+create_chain_err:
+ i = HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU;
+ for (; i < chain_type; i++)
+ api_cmd_destroy_chain(chain[i]);
+
+ return err;
+}
+
+/**
+ * hinic_api_cmd_free - free the API CMD chains
+ * @chain: the API CMD chains that will be freed
+ */
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
+{
+ enum hinic_api_cmd_chain_type chain_type;
+
+ chain_type = HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU;
+ for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++)
+ api_cmd_destroy_chain(chain[chain_type]);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h
new file mode 100644
index 000000000..a48c831bb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_API_CMD_H_
+#define _HINIC_PMD_API_CMD_H_
+
+#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU
+
+#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_CELL_CTRL_CLEAR(val, member) \
+ ((val) & (~((u64)HINIC_API_CMD_CELL_CTRL_##member##_MASK << \
+ HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)))
+
+#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
+#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
+#define HINIC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3
+#define HINIC_API_CMD_DESC_PRIV_DATA_SHIFT 8
+#define HINIC_API_CMD_DESC_DEST_SHIFT 32
+#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1U
+#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1U
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U
+#define HINIC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U
+#define HINIC_API_CMD_DESC_DEST_MASK 0x1FU
+#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FFU
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU
+#define HINIC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU
+
+#define HINIC_API_CMD_DESC_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
+ HINIC_API_CMD_DESC_##member##_SHIFT)
+
+#define HINIC_API_CMD_DESC_CLEAR(val, member) \
+ ((val) & (~((u64)HINIC_API_CMD_DESC_##member##_MASK << \
+ HINIC_API_CMD_DESC_##member##_SHIFT)))
+
+#define HINIC_API_CMD_STATUS_HEADER_VALID_SHIFT 0
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU
+
+#define HINIC_API_CMD_STATUS_VALID_CODE 0xFF
+
+#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
+#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U
+
+#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
+ (((val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
+ (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
+ HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK << \
+ HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U
+
+#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
+ (((val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK << \
+ HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
+
+#define HINIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
+#define HINIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
+
+#define HINIC_API_CMD_RESP_HEADER_VALID(val) \
+ (((val) & HINIC_API_CMD_RESP_HEAD_VALID_MASK) == \
+ HINIC_API_CMD_RESP_HEAD_VALID_CODE)
+
+#define HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8
+#define HINIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU
+
+#define HINIC_API_CMD_RESP_HEAD_ERR_CODE 0x1
+#define HINIC_API_CMD_RESP_HEAD_ERR(val) \
+ ((((val) >> HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_STATUS_MASK) == \
+ HINIC_API_CMD_RESP_HEAD_ERR_CODE)
+
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFF
+
+#define HINIC_API_CMD_RESP_RESERVED 3
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \
+ (((val) >> HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK)
+
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU
+
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \
+ (u16)(((val) >> HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)
+
+#define HINIC_API_CMD_STATUS_HEAD_VALID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEAD_VALID_SHIFT 0
+
+#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU
+#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
+
+#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU
+#define HINIC_API_CMD_STATUS_FSM_SHIFT 24
+
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
+
+#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
+#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
+
+#define HINIC_API_CMD_STATUS_CHAIN_ID(val) \
+ (((val) >> HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEAD_VALID_MASK)
+
+#define HINIC_API_CMD_STATUS_CONS_IDX(val) \
+ ((val) & HINIC_API_CMD_STATUS_CONS_IDX_MASK)
+
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR(val) \
+ (((val) >> HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \
+ HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK)
+
+#define HINIC_API_CMD_STATUS_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_##member##_MASK)
+
+enum hinic_api_cmd_chain_type {
+ /* read from mgmt cpu command with completion */
+ HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6,
+ /* PMD business api chain */
+ HINIC_API_CMD_PMD_WRITE_TO_MGMT = 7,
+ HINIC_API_CMD_MAX
+};
+
+enum hinic_node_id {
+ HINIC_NODE_ID_MGMT_HOST = 21,
+};
+
+struct hinic_api_cmd_status {
+ u64 header;
+ u32 buf_desc;
+ u32 cell_addr_hi;
+ u32 cell_addr_lo;
+ u32 rsvd0;
+ u64 rsvd1;
+};
+
+/* HW struct */
+struct hinic_api_cmd_cell {
+ u64 ctrl;
+
+ /* address is 64 bit in HW struct */
+ u64 next_cell_paddr;
+
+ u64 desc;
+
+ /* HW struct */
+ union {
+ struct {
+ u64 hw_cmd_paddr;
+ } write;
+
+ struct {
+ u64 hw_wb_resp_paddr;
+ u64 hw_cmd_paddr;
+ } read;
+ };
+};
+
+struct hinic_api_cmd_cell_ctxt {
+ dma_addr_t cell_paddr;
+ struct hinic_api_cmd_cell *cell_vaddr;
+
+ dma_addr_t cell_paddr_free;
+ void *cell_vaddr_free;
+
+ dma_addr_t api_cmd_paddr;
+ void *api_cmd_vaddr;
+
+ dma_addr_t api_cmd_paddr_free;
+ void *api_cmd_vaddr_free;
+
+ int status;
+
+ u32 saved_prod_idx;
+};
+
+struct hinic_api_cmd_chain_attr {
+ struct hinic_hwdev *hwdev;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 rsp_size;
+ u16 cell_size;
+};
+
+struct hinic_api_cmd_chain {
+ struct hinic_hwdev *hwdev;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 cell_size;
+ u16 rsp_size;
+
+ /* HW members is 24 bit format */
+ u32 prod_idx;
+ u32 cons_idx;
+
+ /* Async cmd can not be scheduled */
+ spinlock_t async_lock;
+
+ dma_addr_t wb_status_paddr;
+ struct hinic_api_cmd_status *wb_status;
+
+ dma_addr_t head_cell_paddr;
+ struct hinic_api_cmd_cell *head_node;
+
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *curr_node;
+};
+
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size);
+
+int hinic_api_cmd_init(struct hinic_hwdev *hwdev,
+ struct hinic_api_cmd_chain **chain);
+
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
+
+#endif /* _HINIC_PMD_API_CMD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c
new file mode 100644
index 000000000..2d25dc9d5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_eqs.h"
+#include "hinic_pmd_cfg.h"
+#include "hinic_pmd_mbox.h"
+
+bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap)
+{
+ if (!IS_NIC_TYPE(hwdev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &hwdev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap));
+
+ return true;
+}
+
+static void hinic_parse_shared_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ __rte_unused enum func_type type)
+{
+ struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap;
+
+ shared_cap->host_pctxs = dev_cap->host_pctx_num;
+
+ if (dev_cap->host_sf_en)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ shared_cap->host_cctxs = dev_cap->host_ccxt_num;
+ shared_cap->host_scqs = dev_cap->host_scq_num;
+ shared_cap->host_srqs = dev_cap->host_srq_num;
+ shared_cap->host_mpts = dev_cap->host_mpt_num;
+
+ PMD_DRV_LOG(INFO, "Get share resource capability:");
+ PMD_DRV_LOG(INFO, "host_pctxs: 0x%x, host_cctxs: 0x%x, host_scqs: 0x%x, host_srqs: 0x%x, host_mpts: 0x%x",
+ shared_cap->host_pctxs, shared_cap->host_cctxs,
+ shared_cap->host_scqs, shared_cap->host_srqs,
+ shared_cap->host_mpts);
+}
+
+static void hinic_parse_l2nic_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct nic_service_cap *nic_cap = &cap->nic_cap;
+
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ nic_cap->max_sqs = dev_cap->nic_max_sq + 1;
+ nic_cap->max_rqs = dev_cap->nic_max_rq + 1;
+ nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1;
+ nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1;
+ } else {
+ nic_cap->max_sqs = dev_cap->nic_max_sq;
+ nic_cap->max_rqs = dev_cap->nic_max_rq;
+ nic_cap->vf_max_sqs = 0;
+ nic_cap->vf_max_rqs = 0;
+ }
+
+ if (dev_cap->nic_lro_en)
+ nic_cap->lro_en = true;
+ else
+ nic_cap->lro_en = false;
+
+ nic_cap->lro_sz = dev_cap->nic_lro_sz;
+ nic_cap->tso_sz = dev_cap->nic_tso_sz;
+
+ PMD_DRV_LOG(INFO, "Get l2nic resource capability:");
+ PMD_DRV_LOG(INFO, "max_sqs: 0x%x, max_rqs: 0x%x, vf_max_sqs: 0x%x, vf_max_rqs: 0x%x",
+ nic_cap->max_sqs, nic_cap->max_rqs,
+ nic_cap->vf_max_sqs, nic_cap->vf_max_rqs);
+}
+
+u16 hinic_func_max_qnum(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ return dev->cfg_mgmt->svc_cap.max_sqs;
+}
+
+int init_cfg_mgmt(struct hinic_hwdev *hwdev)
+{
+ struct cfg_mgmt_info *cfg_mgmt;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ hwdev->cfg_mgmt = cfg_mgmt;
+ cfg_mgmt->hwdev = hwdev;
+
+ return 0;
+}
+
+void free_cfg_mgmt(struct hinic_hwdev *hwdev)
+{
+ kfree(hwdev->cfg_mgmt);
+ hwdev->cfg_mgmt = NULL;
+}
+
+static void hinic_parse_pub_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ cap->host_id = dev_cap->host_id;
+ cap->ep_id = dev_cap->ep_id;
+ cap->max_cos_id = dev_cap->max_cos_id;
+ cap->er_id = dev_cap->er_id;
+ cap->port_id = dev_cap->port_id;
+
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ cap->max_vf = dev_cap->max_vf;
+ cap->pf_num = dev_cap->pf_num;
+ cap->pf_id_start = dev_cap->pf_id_start;
+ cap->vf_num = dev_cap->vf_num;
+ cap->vf_id_start = dev_cap->vf_id_start;
+ cap->max_sqs = dev_cap->nic_max_sq + 1;
+ cap->max_rqs = dev_cap->nic_max_rq + 1;
+ } else {
+ cap->max_vf = 0;
+ cap->max_sqs = dev_cap->nic_max_sq;
+ cap->max_rqs = dev_cap->nic_max_rq;
+ }
+
+ cap->chip_svc_type = dev_cap->svc_cap_en;
+ cap->host_total_function = dev_cap->host_total_func;
+ cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
+
+ PMD_DRV_LOG(INFO, "Get public resource capability:");
+ PMD_DRV_LOG(INFO, "host_id: 0x%x, ep_id: 0x%x, intr_type: 0x%x, max_cos_id: 0x%x, er_id: 0x%x, port_id: 0x%x",
+ cap->host_id, cap->ep_id, cap->intr_chip_en,
+ cap->max_cos_id, cap->er_id, cap->port_id);
+ PMD_DRV_LOG(INFO, "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x",
+ cap->host_total_function, cap->host_oq_id_mask_val,
+ cap->max_vf);
+ PMD_DRV_LOG(INFO, "chip_svc_type: 0x%x", cap->chip_svc_type);
+ PMD_DRV_LOG(INFO, "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x",
+ cap->pf_num, cap->pf_id_start,
+ cap->vf_num, cap->vf_id_start);
+}
+
+static void parse_dev_cap(struct hinic_hwdev *dev,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+
+ /* Public resource */
+ hinic_parse_pub_res_cap(cap, dev_cap, type);
+
+ /* PPF managed dynamic resource */
+ if (type == TYPE_PPF)
+ hinic_parse_shared_res_cap(cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (IS_NIC_TYPE(dev))
+ hinic_parse_l2nic_res_cap(cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type)
+{
+ int err;
+ u16 in_len, out_len;
+ struct hinic_dev_cap dev_cap;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ in_len = sizeof(dev_cap);
+ out_len = in_len;
+ dev_cap.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP,
+ &dev_cap, in_len, &dev_cap, &out_len, 0);
+ if (err || dev_cap.mgmt_msg_head.status || !out_len) {
+ PMD_DRV_LOG(ERR, "Get capability from FW failed, err: %d, status: %d, out_len: %d",
+ err, dev_cap.mgmt_msg_head.status, out_len);
+ return -EFAULT;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type)
+{
+ int err;
+ u16 in_len, out_len;
+ struct hinic_dev_cap dev_cap;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ in_len = sizeof(dev_cap);
+ out_len = in_len;
+ err = hinic_mbox_to_pf(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP,
+ &dev_cap, in_len, &dev_cap, &out_len,
+ CFG_MAX_CMD_TIMEOUT);
+ if (err || dev_cap.mgmt_msg_head.status || !out_len) {
+ PMD_DRV_LOG(ERR, "Get capability from PF failed, err: %d, status: %d, out_len: %d",
+ err, dev_cap.mgmt_msg_head.status, out_len);
+ return -EFAULT;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static int get_dev_cap(struct hinic_hwdev *dev)
+{
+ int err;
+ enum func_type type = HINIC_FUNC_TYPE(dev);
+
+ switch (type) {
+ case TYPE_PF:
+ case TYPE_PPF:
+ err = get_cap_from_fw(dev, type);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Get PF/PPF capability failed");
+ return err;
+ }
+ break;
+ case TYPE_VF:
+ err = get_cap_from_pf(dev, type);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Get VF capability failed, err: %d",
+ err);
+ return err;
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported PCI function type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_init_capability(struct hinic_hwdev *hwdev)
+{
+ return get_dev_cap(hwdev);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h
new file mode 100644
index 000000000..1741ca44a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_CFG_H_
+#define _HINIC_PMD_CFG_H_
+
+#define CFG_MAX_CMD_TIMEOUT 8000 /* ms */
+
+#define IS_NIC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0)
+
+struct host_shared_resource_cap {
+ u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */
+ u32 host_cctxs; /* Child Context: max 8K */
+ u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ
+ * TOE/IOE/FCoE each uses 1 SCQ
+ * RoCE/IWARP uses multiple SCQs
+ * So 6 SCQ least
+ */
+ u32 host_srqs; /* SRQ number: 256K */
+ u32 host_mpts; /* MR number:1M */
+};
+
+struct nic_service_cap {
+ /* PF resources */
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* VF resources, VF obtain them through the MailBox mechanism from
+ * corresponding PF
+ */
+ u16 vf_max_sqs;
+ u16 vf_max_rqs;
+
+ bool lro_en; /* LRO feature enable bit */
+ u8 lro_sz; /* LRO context space: n*16B */
+ u8 tso_sz; /* TSO context space: n*16B */
+};
+
+/* service type relates define */
+enum cfg_svc_type_en {
+ CFG_SVC_NIC_BIT0 = (1 << 0),
+};
+
+/* device capability */
+struct service_cap {
+ enum cfg_svc_type_en chip_svc_type; /* HW supported service type */
+
+ /* Host global resources */
+ u16 host_total_function;
+ u8 host_oq_id_mask_val;
+ u8 host_id;
+ u8 ep_id;
+ u8 intr_chip_en;
+ u8 max_cos_id; /* PF/VF's max cos id */
+ u8 er_id; /* PF/VF's ER */
+ u8 port_id; /* PF/VF's physical port */
+ u8 max_vf; /* max VF number that PF supported */
+ bool sf_en; /* stateful business status */
+ u16 max_sqs;
+ u16 max_rqs;
+
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num;
+ u32 vf_id_start;
+
+ struct host_shared_resource_cap shared_res_cap; /* shared capability */
+ struct nic_service_cap nic_cap; /* NIC capability */
+};
+
+struct cfg_mgmt_info {
+ struct hinic_hwdev *hwdev;
+ struct service_cap svc_cap;
+};
+
+struct hinic_dev_cap {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ /* Public resource */
+ u8 sf_svc_attr;
+ u8 host_id;
+ u8 sf_en_pf;
+ u8 sf_en_vf;
+
+ u8 ep_id;
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u16 svc_cap_en;
+ u16 host_total_func;
+ u8 host_oq_id_mask_val;
+ u8 max_vf_cos_id;
+
+ u32 max_conn_num;
+ u16 max_stick2cache_num;
+ u16 max_bfilter_start_addr;
+ u16 bfilter_len;
+ u16 hash_bucket_num;
+ u8 cfg_file_ver;
+ u8 net_port_mode;
+ u8 valid_cos_bitmap; /* every bit indicate cos is valid */
+ u8 rsvd1;
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num;
+ u32 vf_id_start;
+
+ /* shared resource */
+ u32 host_pctx_num;
+ u8 host_sf_en;
+ u8 rsvd2[3];
+ u32 host_ccxt_num;
+ u32 host_scq_num;
+ u32 host_srq_num;
+ u32 host_mpt_num;
+
+ /* l2nic */
+ u16 nic_max_sq;
+ u16 nic_max_rq;
+ u16 nic_vf_max_sq;
+ u16 nic_vf_max_rq;
+ u8 nic_lro_en;
+ u8 nic_lro_sz;
+ u8 nic_tso_sz;
+ u8 rsvd3;
+
+ u32 rsvd4[50];
+};
+
+/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */
+u16 hinic_func_max_qnum(void *hwdev);
+
+int init_cfg_mgmt(struct hinic_hwdev *hwdev);
+
+void free_cfg_mgmt(struct hinic_hwdev *hwdev);
+
+int hinic_init_capability(struct hinic_hwdev *hwdev);
+
+bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap);
+
+#endif /* _HINIC_PMD_CFG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h
new file mode 100644
index 000000000..09918a76f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h
@@ -0,0 +1,469 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PORT_CMD_H_
+#define _HINIC_PORT_CMD_H_
+
+#define HINIC_AEQ 0
+
+enum hinic_resp_aeq_num {
+ HINIC_AEQ0 = 0,
+ HINIC_AEQ1 = 1,
+ HINIC_AEQ2 = 2,
+ HINIC_AEQ3 = 3,
+};
+
+enum hinic_mod_type {
+ HINIC_MOD_COMM = 0, /* HW communication module */
+ HINIC_MOD_L2NIC = 1, /* L2NIC module */
+ HINIC_MOD_CFGM = 7, /* Configuration module */
+ HINIC_MOD_HILINK = 14,
+ HINIC_MOD_MAX = 15
+};
+
+/* only used by VFD communicating with PFD to register or unregister,
+ * command mode type is HINIC_MOD_L2NIC
+ */
+#define HINIC_PORT_CMD_VF_REGISTER 0x0
+#define HINIC_PORT_CMD_VF_UNREGISTER 0x1
+
+/* cmd of mgmt CPU message for NIC module */
+enum hinic_port_cmd {
+ HINIC_PORT_CMD_MGMT_RESET = 0x0,
+
+ HINIC_PORT_CMD_CHANGE_MTU = 0x2,
+
+ HINIC_PORT_CMD_ADD_VLAN = 0x3,
+ HINIC_PORT_CMD_DEL_VLAN,
+
+ HINIC_PORT_CMD_SET_ETS = 0x7,
+ HINIC_PORT_CMD_GET_ETS,
+
+ HINIC_PORT_CMD_SET_MAC = 0x9,
+ HINIC_PORT_CMD_GET_MAC,
+ HINIC_PORT_CMD_DEL_MAC,
+
+ HINIC_PORT_CMD_SET_RX_MODE = 0xc,
+ HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xd,
+
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
+ HINIC_PORT_CMD_SET_PAUSE_INFO,
+
+ HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
+ HINIC_PORT_CMD_SET_LRO = 0x19,
+ HINIC_PORT_CMD_SET_RX_CSUM = 0x1a,
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1b,
+
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1c,
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS,
+ HINIC_PORT_CMD_GET_VPORT_STAT,
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL,
+
+ HINIC_PORT_CMD_SET_PORT_ENABLE = 0x29,
+ HINIC_PORT_CMD_GET_PORT_ENABLE,
+
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2b,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL,
+ HINIC_PORT_CMD_RSS_TEMP_MGR,
+
+ HINIC_PORT_CMD_RSS_CFG = 0x42,
+
+ HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
+ HINIC_PORT_CMD_INIT_FUNC = 0x45,
+
+ HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a,
+ HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE,
+
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
+
+ HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b,
+
+ HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c,
+ HINIC_PORT_CMD_SET_VPORT_ENABLE,
+
+ HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5e,
+
+ HINIC_PORT_CMD_GET_LRO = 0x63,
+
+ HINIC_PORT_CMD_GET_DMA_CS = 0x64,
+ HINIC_PORT_CMD_SET_DMA_CS,
+
+ HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
+
+ HINIC_PORT_CMD_SET_PFC_MISC = 0x67,
+ HINIC_PORT_CMD_GET_PFC_MISC,
+
+ HINIC_PORT_CMD_SET_VF_RATE = 0x69,
+ HINIC_PORT_CMD_SET_VF_VLAN,
+ HINIC_PORT_CMD_CLR_VF_VLAN,
+
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
+ HINIC_PORT_CMD_SET_PFC_THD = 0x75,
+
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xa0,
+
+ HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xa3,
+ HINIC_PORT_CMD_UPDATE_MAC = 0xa4,
+
+ HINIC_PORT_CMD_GET_PORT_INFO = 0xaa,
+
+ HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xaf,
+ HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xb0,
+ HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xb1,
+ HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xb2,
+ HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xb3,
+
+ HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb,
+ HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc,
+
+ HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
+
+ HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
+ HINIC_PORT_CMD_SET_SPEED = 0xDA,
+ HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
+
+ HINIC_PORT_CMD_CLEAR_QP_RES = 0xDD,
+ HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
+ HINIC_PORT_CMD_SET_VF_COS = 0xDF,
+ HINIC_PORT_CMD_GET_VF_COS = 0xE1,
+
+ HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
+ HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
+
+ HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
+
+ HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
+ HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
+
+ HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
+ HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
+ HINIC_PORT_CMD_Q_FILTER = 0xFC,
+ HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
+ HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF
+};
+
+/* cmd of mgmt CPU message for HW module */
+enum hinic_mgmt_cmd {
+ HINIC_MGMT_CMD_RESET_MGMT = 0x0,
+ HINIC_MGMT_CMD_START_FLR = 0x1,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL = 0x2,
+ HINIC_MGMT_CMD_GET_IO_STATUS = 0x3,
+ HINIC_MGMT_CMD_DMA_ATTR_SET = 0x4,
+
+ HINIC_MGMT_CMD_CMDQ_CTXT_SET = 0x10,
+ HINIC_MGMT_CMD_CMDQ_CTXT_GET,
+
+ HINIC_MGMT_CMD_VAT_SET = 0x12,
+ HINIC_MGMT_CMD_VAT_GET,
+
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14,
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET,
+
+ HINIC_MGMT_CMD_PPF_HT_GPA_SET = 0x23,
+ HINIC_MGMT_CMD_RES_STATE_SET = 0x24,
+ HINIC_MGMT_CMD_FUNC_CACHE_OUT = 0x25,
+ HINIC_MGMT_CMD_FFM_SET = 0x26,
+
+ HINIC_MGMT_CMD_FUNC_RES_CLEAR = 0x29,
+
+ HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+
+ HINIC_MGMT_CMD_VF_RANDOM_ID_SET = 0x36,
+ HINIC_MGMT_CMD_FAULT_REPORT = 0x37,
+
+ HINIC_MGMT_CMD_VPD_SET = 0x40,
+ HINIC_MGMT_CMD_VPD_GET,
+ HINIC_MGMT_CMD_LABEL_SET,
+ HINIC_MGMT_CMD_LABEL_GET,
+ HINIC_MGMT_CMD_SATIC_MAC_SET,
+ HINIC_MGMT_CMD_SATIC_MAC_GET,
+ HINIC_MGMT_CMD_SYNC_TIME = 0x46,
+ HINIC_MGMT_CMD_SET_LED_STATUS = 0x4A,
+ HINIC_MGMT_CMD_L2NIC_RESET = 0x4b,
+ HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET = 0x4d,
+ HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT = 0x4E,
+ HINIC_MGMT_CMD_ACTIVATE_FW = 0x4F,
+ HINIC_MGMT_CMD_PAGESIZE_SET = 0x50,
+ HINIC_MGMT_CMD_PAGESIZE_GET = 0x51,
+ HINIC_MGMT_CMD_GET_BOARD_INFO = 0x52,
+ HINIC_MGMT_CMD_WATCHDOG_INFO = 0x56,
+ HINIC_MGMT_CMD_FMW_ACT_NTC = 0x57,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61,
+ HINIC_MGMT_CMD_GET_PPF_STATE = 0x63,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC = 0x65,
+ HINIC_MGMT_CMD_PCIE_DFX_GET = 0x66,
+};
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+/* uCode related commands */
+enum hinic_ucode_cmd {
+ HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT = 0,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ HINIC_UCODE_CMD_ARM_SQ,
+ HINIC_UCODE_CMD_ARM_RQ,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_SET_IQ_ENABLE,
+ HINIC_UCODE_CMD_SET_RQ_FLUSH = 10
+};
+
+enum cfg_sub_cmd {
+ /* PPF(PF) <-> FW */
+ HINIC_CFG_NIC_CAP = 0,
+ CFG_FW_VERSION,
+ CFG_UCODE_VERSION,
+ HINIC_CFG_MBOX_CAP = 6
+};
+
+enum hinic_ack_type {
+ HINIC_ACK_TYPE_CMDQ,
+ HINIC_ACK_TYPE_SHARE_CQN,
+ HINIC_ACK_TYPE_APP_CQN,
+
+ HINIC_MOD_ACK_MAX = 15,
+};
+
+enum sq_l4offload_type {
+ OFFLOAD_DISABLE = 0,
+ TCP_OFFLOAD_ENABLE = 1,
+ SCTP_OFFLOAD_ENABLE = 2,
+ UDP_OFFLOAD_ENABLE = 3,
+};
+
+enum sq_vlan_offload_flag {
+ VLAN_OFFLOAD_DISABLE = 0,
+ VLAN_OFFLOAD_ENABLE = 1,
+};
+
+enum sq_pkt_parsed_flag {
+ PKT_NOT_PARSED = 0,
+ PKT_PARSED = 1,
+};
+
+enum sq_l3_type {
+ UNKNOWN_L3TYPE = 0,
+ IPV6_PKT = 1,
+ IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
+};
+
+enum sq_md_type {
+ UNKNOWN_MD_TYPE = 0,
+};
+
+enum sq_l2type {
+ ETHERNET = 0,
+};
+
+enum sq_tunnel_l4_type {
+ NOT_TUNNEL,
+ TUNNEL_UDP_NO_CSUM,
+ TUNNEL_UDP_CSUM,
+};
+
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
+#define HINIC_RSS_TYPE_VALID_SHIFT 23
+#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24
+#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25
+#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26
+#define HINIC_RSS_TYPE_IPV6_SHIFT 27
+#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28
+#define HINIC_RSS_TYPE_IPV4_SHIFT 29
+#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30
+#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31
+
+#define HINIC_RSS_TYPE_SET(val, member) \
+ (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT)
+
+#define HINIC_RSS_TYPE_GET(val, member) \
+ (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
+
+enum hinic_speed {
+ HINIC_SPEED_10MB_LINK = 0,
+ HINIC_SPEED_100MB_LINK,
+ HINIC_SPEED_1000MB_LINK,
+ HINIC_SPEED_10GB_LINK,
+ HINIC_SPEED_25GB_LINK,
+ HINIC_SPEED_40GB_LINK,
+ HINIC_SPEED_100GB_LINK,
+ HINIC_SPEED_UNKNOWN = 0xFF,
+};
+
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+#define HINIC_AF0_FUNC_GLOBAL_IDX_SHIFT 0
+#define HINIC_AF0_P2P_IDX_SHIFT 10
+#define HINIC_AF0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_AF0_VF_IN_PF_SHIFT 16
+#define HINIC_AF0_FUNC_TYPE_SHIFT 24
+
+#define HINIC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF
+#define HINIC_AF0_P2P_IDX_MASK 0xF
+#define HINIC_AF0_PCI_INTF_IDX_MASK 0x3
+#define HINIC_AF0_VF_IN_PF_MASK 0xFF
+#define HINIC_AF0_FUNC_TYPE_MASK 0x1
+
+#define HINIC_AF0_GET(val, member) \
+ (((val) >> HINIC_AF0_##member##_SHIFT) & HINIC_AF0_##member##_MASK)
+
+#define HINIC_AF1_PPF_IDX_SHIFT 0
+#define HINIC_AF1_AEQS_PER_FUNC_SHIFT 8
+#define HINIC_AF1_CEQS_PER_FUNC_SHIFT 12
+#define HINIC_AF1_IRQS_PER_FUNC_SHIFT 20
+#define HINIC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24
+#define HINIC_AF1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_AF1_PF_INIT_STATUS_SHIFT 31
+
+#define HINIC_AF1_PPF_IDX_MASK 0x1F
+#define HINIC_AF1_AEQS_PER_FUNC_MASK 0x3
+#define HINIC_AF1_CEQS_PER_FUNC_MASK 0x7
+#define HINIC_AF1_IRQS_PER_FUNC_MASK 0xF
+#define HINIC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7
+#define HINIC_AF1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_AF1_PF_INIT_STATUS_MASK 0x1
+
+#define HINIC_AF1_GET(val, member) \
+ (((val) >> HINIC_AF1_##member##_SHIFT) & HINIC_AF1_##member##_MASK)
+
+#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_AF2_GET(val, member) \
+ (((val) >> HINIC_AF2_##member##_SHIFT) & HINIC_AF2_##member##_MASK)
+
+#define HINIC_AF4_OUTBOUND_CTRL_SHIFT 0
+#define HINIC_AF4_DOORBELL_CTRL_SHIFT 1
+#define HINIC_AF4_OUTBOUND_CTRL_MASK 0x1
+#define HINIC_AF4_DOORBELL_CTRL_MASK 0x1
+
+#define HINIC_AF4_GET(val, member) \
+ (((val) >> HINIC_AF4_##member##_SHIFT) & HINIC_AF4_##member##_MASK)
+
+#define HINIC_AF4_SET(val, member) \
+ (((val) & HINIC_AF4_##member##_MASK) << HINIC_AF4_##member##_SHIFT)
+
+#define HINIC_AF4_CLEAR(val, member) \
+ ((val) & (~(HINIC_AF4_##member##_MASK << \
+ HINIC_AF4_##member##_SHIFT)))
+
+#define HINIC_AF5_PF_STATUS_SHIFT 0
+#define HINIC_AF5_PF_STATUS_MASK 0xFFFF
+
+#define HINIC_AF5_SET(val, member) \
+ (((val) & HINIC_AF5_##member##_MASK) << HINIC_AF5_##member##_SHIFT)
+
+#define HINIC_AF5_GET(val, member) \
+ (((val) >> HINIC_AF5_##member##_SHIFT) & HINIC_AF5_##member##_MASK)
+
+#define HINIC_AF5_CLEAR(val, member) \
+ ((val) & (~(HINIC_AF5_##member##_MASK << \
+ HINIC_AF5_##member##_SHIFT)))
+
+#define HINIC_PPF_ELECTION_IDX_SHIFT 0
+
+#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
+
+#define HINIC_PPF_ELECTION_SET(val, member) \
+ (((val) & HINIC_PPF_ELECTION_##member##_MASK) << \
+ HINIC_PPF_ELECTION_##member##_SHIFT)
+
+#define HINIC_PPF_ELECTION_GET(val, member) \
+ (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
+ HINIC_PPF_ELECTION_##member##_MASK)
+
+#define HINIC_PPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
+ << HINIC_PPF_ELECTION_##member##_SHIFT)))
+
+#define DB_IDX(db, db_base) \
+ ((u32)(((unsigned long)(db) - (unsigned long)(db_base)) / \
+ HINIC_DB_PAGE_SIZE))
+
+enum hinic_pcie_nosnoop {
+ HINIC_PCIE_SNOOP = 0,
+ HINIC_PCIE_NO_SNOOP = 1,
+};
+
+enum hinic_pcie_tph {
+ HINIC_PCIE_TPH_DISABLE = 0,
+ HINIC_PCIE_TPH_ENABLE = 1,
+};
+
+enum hinic_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic_doorbell_ctrl {
+ ENABLE_DOORBELL = 0x0,
+ DISABLE_DOORBELL = 0x1,
+};
+
+enum hinic_pf_status {
+ HINIC_PF_STATUS_INIT = 0X0,
+ HINIC_PF_STATUS_ACTIVE_FLAG = 0x11,
+ HINIC_PF_STATUS_FLR_START_FLAG = 0x12,
+ HINIC_PF_STATUS_FLR_FINISH_FLAG = 0x13,
+};
+
+/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128 */
+#define HINIC_DB_DWQE_SIZE 0x00080000
+
+/* db page size: 4K */
+#define HINIC_DB_PAGE_SIZE 0x00001000ULL
+
+#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE)
+
+#define HINIC_PCI_MSIX_ENTRY_SIZE 16
+#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
+struct hinic_mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 resp_aeq_num;
+ u8 rsvd0[5];
+};
+
+struct hinic_root_ctxt {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u8 lro_en;
+ u8 rsvd2;
+ u8 ppf_idx;
+ u8 rsvd3;
+ u16 rq_depth;
+ u16 rx_buf_sz;
+ u16 sq_depth;
+};
+
+#endif /* _HINIC_PORT_CMD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c
new file mode 100644
index 000000000..2e98b9c28
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c
@@ -0,0 +1,855 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_wq.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_mbox.h"
+#include "hinic_pmd_cmdq.h"
+
+#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */
+
+#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
+#define LOWER_8_BITS(data) ((data) & 0xFF)
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
+#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
+#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
+#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
+#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
+#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
+#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
+
+#define CMDQ_DB_INFO_SET(val, member) \
+ (((val) & CMDQ_DB_INFO_##member##_MASK) << \
+ CMDQ_DB_INFO_##member##_SHIFT)
+
+#define CMDQ_CTRL_PI_SHIFT 0
+#define CMDQ_CTRL_CMD_SHIFT 16
+#define CMDQ_CTRL_MOD_SHIFT 24
+#define CMDQ_CTRL_ACK_TYPE_SHIFT 29
+#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_CTRL_PI_MASK 0xFFFFU
+#define CMDQ_CTRL_CMD_MASK 0xFFU
+#define CMDQ_CTRL_MOD_MASK 0x1FU
+#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_CTRL_SET(val, member) \
+ (((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
+
+#define CMDQ_CTRL_GET(val, member) \
+ (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
+#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
+#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_WQE_HEADER_SET(val, member) \
+ (((val) & CMDQ_WQE_HEADER_##member##_MASK) << \
+ CMDQ_WQE_HEADER_##member##_SHIFT)
+
+#define CMDQ_WQE_HEADER_GET(val, member) \
+ (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \
+ CMDQ_WQE_HEADER_##member##_MASK)
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
+#define CMDQ_CTXT_EQ_ID_SHIFT 56
+#define CMDQ_CTXT_CEQ_ARM_SHIFT 61
+#define CMDQ_CTXT_CEQ_EN_SHIFT 62
+#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_EQ_ID_MASK 0x1F
+#define CMDQ_CTXT_CEQ_ARM_MASK 0x1
+#define CMDQ_CTXT_CEQ_EN_MASK 0x1
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
+
+#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
+ ((val) & (~((u64)CMDQ_CTXT_##member##_MASK << \
+ CMDQ_CTXT_##member##_SHIFT)))
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
+#define CMDQ_CTXT_CI_SHIFT 52
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_CI_MASK 0xFFF
+
+#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
+
+#define SAVED_DATA_ARM_SHIFT 31
+
+#define SAVED_DATA_ARM_MASK 0x1U
+
+#define SAVED_DATA_SET(val, member) \
+ (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
+
+#define SAVED_DATA_CLEAR(val, member) \
+ ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
+
+#define WQE_ERRCODE_VAL_SHIFT 20
+
+#define WQE_ERRCODE_VAL_MASK 0xF
+
+#define WQE_ERRCODE_GET(val, member) \
+ (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
+
+#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
+
+#define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
+
+#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
+
+#define CMDQ_DB_ADDR(db_base, pi) \
+ (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
+
+#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
+
+#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
+
+#define WQE_LCMD_SIZE 64
+#define WQE_SCMD_SIZE 64
+
+#define COMPLETE_LEN 3
+
+#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQEBB_SHIFT 6
+
+#define CMDQ_WQE_SIZE 64
+
+#define HINIC_CMDQ_WQ_BUF_SIZE 4096
+
+#define WQE_NUM_WQEBBS(wqe_size, wq) \
+ ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
+
+#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
+ struct hinic_cmdqs, cmdq[0])
+
+#define WAIT_CMDQ_ENABLE_TIMEOUT 300
+
+
+static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_ctxt *cmdq_ctxt);
+static void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
+
+bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
+{
+ struct hinic_wq *wq = cmdq->wq;
+
+ return ((wq->delta) == wq->q_depth ? true : false);
+}
+
+struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+ struct hinic_cmd_buf *cmd_buf;
+
+ cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
+ return NULL;
+ }
+
+ cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
+ goto alloc_pci_buf_err;
+ }
+
+ return cmd_buf;
+
+alloc_pci_buf_err:
+ kfree(cmd_buf);
+ return NULL;
+}
+
+void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
+{
+ struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+
+static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
+{
+ u32 wqe_size = 0;
+
+ switch (wqe_type) {
+ case WQE_LCMD_TYPE:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case WQE_SCMD_TYPE:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static int cmdq_get_wqe_size(enum bufdesc_len len)
+{
+ int wqe_size = 0;
+
+ switch (len) {
+ case BUFDESC_LCMD_LEN:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case BUFDESC_SCMD_LEN:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
+ struct hinic_cmd_buf *buf_out)
+{
+ struct hinic_sge_resp *sge_resp = &complete->sge_resp;
+
+ hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
+ HINIC_CMDQ_BUF_SIZE);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
+ struct hinic_cmd_buf *buf_in)
+{
+ hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
+}
+
+static void cmdq_fill_db(struct hinic_cmdq_db *db,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
+ CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
+ CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
+ CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
+}
+
+static void cmdq_set_db(struct hinic_cmdq *cmdq,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ struct hinic_cmdq_db db;
+
+ cmdq_fill_db(&db, cmdq_type, prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ db.db_info = cpu_to_be32(db.db_info);
+
+ rte_wmb(); /* write all before the doorbell */
+
+ writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
+}
+
+static void cmdq_wqe_fill(void *dst, void *src)
+{
+ memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
+ (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
+ CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
+
+ rte_wmb();/* The first 8 bytes should be written last */
+
+ *(u64 *)dst = *(u64 *)src;
+}
+
+static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
+ enum completion_format complete_format,
+ enum data_format local_data_format,
+ enum bufdesc_len buf_len)
+{
+ struct hinic_ctrl *ctrl;
+ enum ctrl_sect_len ctrl_len;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ u32 saved_data = WQE_HEADER(wqe)->saved_data;
+
+ if (local_data_format == DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD) |
+ CMDQ_CTRL_SET(ack_type, ACK_TYPE);
+
+ WQE_HEADER(wqe)->header_info =
+ CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT) |
+ CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
+ CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
+
+ if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data |
+ SAVED_DATA_SET(1, ARM);
+ } else {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data;
+ }
+}
+
+static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ break;
+ case ASYNC_CMD:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+
+ wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
+ prod_idx, complete_format, DATA_SGE,
+ BUFDESC_LCMD_LEN);
+
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static int cmdq_params_valid(struct hinic_cmd_buf *buf_in)
+{
+ if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
+ PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC_CMDQ_ENABLE)
+ return 0;
+
+ } while (time_before(jiffies, end));
+
+ return -EBUSY;
+}
+
+static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
+ int errcode)
+{
+ cmdq->errcode[prod_idx] = errcode;
+}
+
+static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_inline_wqe *inline_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ struct hinic_ctrl *ctrl;
+ u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
+ int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
+ int wqe_size = cmdq_get_wqe_size(buf_len);
+ u16 num_wqebbs;
+
+ if (wqe_size == WQE_LCMD_SIZE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ } else {
+ inline_wqe = &wqe->inline_wqe;
+ wqe_scmd = &inline_wqe->wqe_scmd;
+ ctrl = &wqe_scmd->ctrl;
+ }
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+
+ rte_wmb(); /* verify wqe is clear */
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
+ hinic_put_wqe(cmdq->wq, num_wqebbs);
+}
+
+static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic_cmdq_ctxt *cmdq_ctxt;
+ enum hinic_cmdq_type cmdq_type;
+ u16 in_size;
+ int err;
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_ctxt->resp_aeq_num = HINIC_AEQ1;
+ in_size = sizeof(*cmdq_ctxt);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_CMDQ_CTXT_SET,
+ cmdq_ctxt, in_size, NULL,
+ NULL, 0);
+ if (err) {
+ if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW ||
+ err == HINIC_DEV_BUSY_ACTIVE_FW) {
+ cmdqs->status |= HINIC_CMDQ_SET_FAIL;
+ PMD_DRV_LOG(ERR, "PF or VF fw is hot active");
+ }
+ PMD_DRV_LOG(ERR, "Set cmdq ctxt failed, err: %d", err);
+ return -EFAULT;
+ }
+ }
+
+ cmdqs->status &= ~HINIC_CMDQ_SET_FAIL;
+ cmdqs->status |= HINIC_CMDQ_ENABLE;
+
+ return 0;
+}
+
+void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
+{
+ hinic_cmdqs_free(hwdev);
+}
+
+int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic_cmdq_type cmdq_type;
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic_set_cmdq_ctxts(hwdev);
+}
+
+static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
+ struct hinic_wq *wq, enum hinic_cmdq_type q_type)
+{
+ void __iomem *db_base;
+ int err = 0;
+ size_t errcode_size;
+ size_t cmd_infos_size;
+
+ cmdq->wq = wq;
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ errcode_size = wq->q_depth * sizeof(*cmdq->errcode);
+ cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL);
+ if (!cmdq->errcode) {
+ PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
+ spin_lock_deinit(&cmdq->cmdq_lock);
+ return -ENOMEM;
+ }
+
+ cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos);
+ cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
+ err = -ENOMEM;
+ goto cmd_infos_err;
+ }
+
+ err = hinic_alloc_db_addr(hwdev, &db_base);
+ if (err)
+ goto alloc_db_err;
+
+ cmdq->db_base = (u8 *)db_base;
+ return 0;
+
+alloc_db_err:
+ kfree(cmdq->cmd_infos);
+
+cmd_infos_err:
+ kfree(cmdq->errcode);
+ spin_lock_deinit(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
+{
+ hinic_free_db_addr(hwdev, cmdq->db_base);
+ kfree(cmdq->cmd_infos);
+ kfree(cmdq->errcode);
+ spin_lock_deinit(&cmdq->cmdq_lock);
+}
+
+static int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmdq_ctxt *cmdq_ctxt;
+ enum hinic_cmdq_type type, cmdq_type;
+ size_t saved_wqs_size;
+ int err;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+
+ saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
+ cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
+ if (!cmdqs->saved_wqs) {
+ PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
+ err = -ENOMEM;
+ goto alloc_wqs_err;
+ }
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev,
+ HINIC_CMDQ_BUF_SIZE,
+ HINIC_CMDQ_BUF_SIZE, 0ULL);
+ if (!cmdqs->cmd_buf_pool) {
+ PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed");
+ err = -ENOMEM;
+ goto pool_create_err;
+ }
+
+ err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev,
+ HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE,
+ CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate cmdq failed");
+ goto cmdq_alloc_err;
+ }
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
+ &cmdqs->saved_wqs[cmdq_type], cmdq_type);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize cmdq failed");
+ goto init_cmdq_err;
+ }
+
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt);
+ }
+
+ err = hinic_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto init_cmdq_err;
+
+ return 0;
+
+init_cmdq_err:
+ type = HINIC_CMDQ_SYNC;
+ for ( ; type < cmdq_type; type++)
+ free_cmdq(hwdev, &cmdqs->cmdq[type]);
+
+ hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES);
+
+cmdq_alloc_err:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+pool_create_err:
+ kfree(cmdqs->saved_wqs);
+
+alloc_wqs_err:
+ kfree(cmdqs);
+
+ return err;
+}
+
+static void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
+
+ cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+ for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
+ free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
+
+ hinic_cmdq_free(hwdev, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+ kfree(cmdqs->saved_wqs);
+
+ kfree(cmdqs);
+}
+
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_root_ctxt root_ctxt;
+
+ memset(&root_ctxt, 0, sizeof(root_ctxt));
+ root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ root_ctxt.func_idx = hinic_global_func_id(hwdev);
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ NULL, NULL, 0);
+}
+
+int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_cmdqs_init(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init cmd queues failed");
+ return err;
+ }
+
+ err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set cmdq depth failed");
+ goto set_cmdq_depth_err;
+ }
+
+ return 0;
+
+set_cmdq_depth_err:
+ hinic_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq);
+ struct hinic_hwdev *hwdev = cmdqs->hwdev;
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 wq_first_page_paddr, pfn;
+
+ u16 start_ci = (u16)(wq->cons_idx);
+
+ /* The data in the HW is in Big Endian Format */
+ wq_first_page_paddr = wq->queue_buf_paddr;
+
+ pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE);
+ ctxt_info->curr_wqe_page_pfn =
+ CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
+ CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) |
+ CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
+ CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
+
+ ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
+ CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
+ cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
+}
+
+static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout)
+{
+ struct hinic_cmdq_wqe *wqe;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_ctrl *ctrl;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ u32 status_info, ctrl_info;
+ u16 ci;
+ int errcode;
+ unsigned long end;
+ int done = 0;
+ int rc = 0;
+
+ wqe = hinic_read_wqe(cmdq->wq, 1, &ci);
+ if (wqe == NULL) {
+ PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
+ return -EINVAL;
+ }
+
+ cmd_info = &cmdq->cmd_infos[ci];
+ /* this cmd has not been filled and send to hw, or get TMO msg ack*/
+ if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
+ PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u",
+ ci);
+ return -EINVAL;
+ }
+
+ /* only arm bit is using scmd wqe, the wqe is lcmd */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+ if (WQE_COMPLETED(ctrl_info)) {
+ done = 1;
+ break;
+ }
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ if (done) {
+ status_info = be32_to_cpu(wqe_lcmd->status.status_info);
+ errcode = WQE_ERRCODE_GET(status_info, VAL);
+ cmdq_update_errcode(cmdq, ci, errcode);
+ clear_wqe_complete_bit(cmdq, wqe);
+ rc = 0;
+ } else {
+ PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
+ rc = -ETIMEDOUT;
+ }
+
+ /* set this cmd invalid */
+ cmd_info->cmd_type = HINIC_CMD_TYPE_NONE;
+
+ return rc;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped;
+ u32 timeo, wqe_size;
+ int err;
+
+ wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. */
+ spin_lock(&cmdq->cmdq_lock);
+
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ err = -EBUSY;
+ goto cmdq_unlock;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ err = hinic_cmdq_poll_msg(cmdq, timeo);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
+ curr_prod_idx);
+ err = -ETIMEDOUT;
+ goto cmdq_unlock;
+ }
+
+ rte_smp_rmb(); /* read error code after completion */
+
+ if (out_param) {
+ wqe_lcmd = &curr_wqe->wqe_lcmd;
+ *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
+ }
+
+ if (cmdq->errcode[curr_prod_idx] > 1) {
+ err = cmdq->errcode[curr_prod_idx];
+ goto cmdq_unlock;
+ }
+
+cmdq_unlock:
+ spin_unlock(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout)
+{
+ struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+ int err = cmdq_params_valid(buf_in);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Invalid CMDQ parameters");
+ return err;
+ }
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq is disable");
+ return err;
+ }
+
+ return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
+ ack_type, mod, cmd, buf_in,
+ out_param, timeout);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h
new file mode 100644
index 000000000..0d5e38012
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_CMDQ_H_
+#define _HINIC_PMD_CMDQ_H_
+
+#define HINIC_DB_OFF 0x00000800
+
+#define HINIC_SCMD_DATA_LEN 16
+
+/* pmd driver uses 64, kernel l2nic use 4096 */
+#define HINIC_CMDQ_DEPTH 64
+
+#define HINIC_CMDQ_BUF_SIZE 2048U
+#define HINIC_CMDQ_BUF_HW_RSVD 8
+#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE \
+ - HINIC_CMDQ_BUF_HW_RSVD)
+
+#define HINIC_CEQ_ID_CMDQ 0
+
+enum cmdq_scmd_type {
+ CMDQ_SET_ARM_CMD = 2,
+};
+
+enum cmdq_wqe_type {
+ WQE_LCMD_TYPE,
+ WQE_SCMD_TYPE,
+};
+
+enum ctrl_sect_len {
+ CTRL_SECT_LEN = 1,
+ CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum bufdesc_len {
+ BUFDESC_LCMD_LEN = 2,
+ BUFDESC_SCMD_LEN = 3,
+};
+
+enum data_format {
+ DATA_SGE,
+};
+
+enum completion_format {
+ COMPLETE_DIRECT,
+ COMPLETE_SGE,
+};
+
+enum completion_request {
+ CEQ_SET = 1,
+};
+
+enum cmdq_cmd_type {
+ SYNC_CMD_DIRECT_RESP,
+ SYNC_CMD_SGE_RESP,
+ ASYNC_CMD,
+};
+
+enum hinic_cmdq_type {
+ HINIC_CMDQ_SYNC,
+ HINIC_CMDQ_ASYNC,
+ HINIC_MAX_CMDQ_TYPES,
+};
+
+enum hinic_db_src_type {
+ HINIC_DB_SRC_CMDQ_TYPE,
+ HINIC_DB_SRC_L2NIC_SQ_TYPE,
+};
+
+enum hinic_cmdq_db_type {
+ HINIC_DB_SQ_RQ_TYPE,
+ HINIC_DB_CMDQ_TYPE,
+};
+
+/* CMDQ WQE CTRLS */
+struct hinic_cmdq_header {
+ u32 header_info;
+ u32 saved_data;
+};
+
+struct hinic_scmd_bufdesc {
+ u32 buf_len;
+ u32 rsvd;
+ u8 data[HINIC_SCMD_DATA_LEN];
+};
+
+struct hinic_lcmd_bufdesc {
+ struct hinic_sge sge;
+ u32 rsvd1;
+ u64 saved_async_buf;
+ u64 rsvd3;
+};
+
+struct hinic_cmdq_db {
+ u32 db_info;
+ u32 rsvd;
+};
+
+struct hinic_status {
+ u32 status_info;
+};
+
+struct hinic_ctrl {
+ u32 ctrl_info;
+};
+
+struct hinic_sge_resp {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_cmdq_completion {
+ /* HW Format */
+ union {
+ struct hinic_sge_resp sge_resp;
+ u64 direct_resp;
+ };
+};
+
+struct hinic_cmdq_wqe_scmd {
+ struct hinic_cmdq_header header;
+ struct hinic_cmdq_db db;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_scmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_wqe_lcmd {
+ struct hinic_cmdq_header header;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_lcmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_inline_wqe {
+ struct hinic_cmdq_wqe_scmd wqe_scmd;
+};
+
+struct hinic_cmdq_wqe {
+ /* HW Format */
+ union{
+ struct hinic_cmdq_inline_wqe inline_wqe;
+ struct hinic_cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+struct hinic_cmdq_ctxt_info {
+ u64 curr_wqe_page_pfn;
+ u64 wq_block_pfn;
+};
+
+/* New interface */
+struct hinic_cmdq_ctxt {
+ u8 status;
+ u8 version;
+ u8 resp_aeq_num;
+ u8 rsvd0[5];
+
+ u16 func_idx;
+ u8 cmdq_id;
+ u8 ppf_idx;
+
+ u8 rsvd1[4];
+
+ struct hinic_cmdq_ctxt_info ctxt_info;
+};
+
+enum hinic_cmdq_status {
+ HINIC_CMDQ_ENABLE = BIT(0),
+ HINIC_CMDQ_SET_FAIL = BIT(1)
+};
+
+enum hinic_cmdq_cmd_type {
+ HINIC_CMD_TYPE_NONE,
+ HINIC_CMD_TYPE_SET_ARM,
+ HINIC_CMD_TYPE_NORMAL,
+};
+
+struct hinic_cmdq_cmd_info {
+ enum hinic_cmdq_cmd_type cmd_type;
+};
+
+struct hinic_cmdq {
+ struct hinic_wq *wq;
+
+ enum hinic_cmdq_type cmdq_type;
+ int wrapped;
+
+ hinic_spinlock_t cmdq_lock;
+
+ int *errcode;
+
+ /* doorbell area */
+ u8 __iomem *db_base;
+
+ struct hinic_cmdq_ctxt cmdq_ctxt;
+
+ struct hinic_cmdq_cmd_info *cmd_infos;
+};
+
+struct hinic_cmdqs {
+ struct hinic_hwdev *hwdev;
+
+ struct pci_pool *cmd_buf_pool;
+
+ struct hinic_wq *saved_wqs;
+
+ struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
+
+ u32 status;
+};
+
+struct hinic_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ struct rte_mbuf *mbuf;
+ u16 size;
+};
+
+int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev);
+
+bool hinic_cmdq_idle(struct hinic_cmdq *cmdq);
+
+struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev);
+
+void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf);
+
+/* PF/VF send cmd to ucode by cmdq, and return if success.
+ * timeout=0, use default timeout.
+ */
+int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout);
+
+int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev);
+
+void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev);
+
+#endif /* _HINIC_PMD_CMDQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c
new file mode 100644
index 000000000..79e1b20bc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c
@@ -0,0 +1,490 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_eqs.h"
+
+#define AEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
+#define AEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define AEQ_CTRL_0_SET(val, member) \
+ (((val) & AEQ_CTRL_0_##member##_MASK) << \
+ AEQ_CTRL_0_##member##_SHIFT)
+
+#define AEQ_CTRL_0_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_0_##member##_MASK \
+ << AEQ_CTRL_0_##member##_SHIFT)))
+
+#define AEQ_CTRL_1_LEN_SHIFT 0
+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U
+#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define AEQ_CTRL_1_SET(val, member) \
+ (((val) & AEQ_CTRL_1_##member##_MASK) << \
+ AEQ_CTRL_1_##member##_SHIFT)
+
+#define AEQ_CTRL_1_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_1_##member##_MASK \
+ << AEQ_CTRL_1_##member##_SHIFT)))
+
+#define EQ_CONS_IDX_CONS_IDX_SHIFT 0
+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24
+#define EQ_CONS_IDX_INT_ARMED_SHIFT 31
+
+#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU
+#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU
+#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U
+
+#define EQ_CONS_IDX_SET(val, member) \
+ (((val) & EQ_CONS_IDX_##member##_MASK) << \
+ EQ_CONS_IDX_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CLEAR(val, member) \
+ ((val) & (~(EQ_CONS_IDX_##member##_MASK \
+ << EQ_CONS_IDX_##member##_SHIFT)))
+
+#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT)
+
+#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \
+ ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
+
+#define EQ_CONS_IDX_REG_ADDR(eq) \
+ (HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq) \
+ (HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define GET_EQ_NUM_PAGES(eq, size) \
+ ((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \
+ / (size)))
+
+#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size)
+
+#define PAGE_IN_4K(page_size) ((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define AEQ_DMA_ATTR_DEFAULT 0
+
+#define EQ_WRAPPED_SHIFT 20
+
+#define EQ_VALID_SHIFT 31
+
+#define aeq_to_aeqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+ u8 checksum = 0;
+ u8 idx;
+
+ for (idx = 0; idx < 32; idx += 4)
+ checksum ^= ((val >> idx) & 0xF);
+
+ return (checksum & 0xF);
+}
+
+/**
+ * set_eq_cons_idx - write the cons idx to the hw
+ * @eq: The event queue to update the cons idx for
+ * @arm_state: indicate whether report interrupts when generate eq element
+ */
+static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
+{
+ u32 eq_cons_idx, eq_wrap_ci, val;
+ u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+ eq_wrap_ci = EQ_CONS_IDX(eq);
+
+ /* Read Modify Write */
+ val = hinic_hwif_read_reg(eq->hwdev->hwif, addr);
+
+ val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &
+ EQ_CONS_IDX_CLEAR(val, INT_ARMED) &
+ EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);
+
+ /* Just aeq0 use int_arm mode for pmd drv to recv
+ * asyn event&mbox recv data
+ */
+ if (eq->q_id == 0)
+ eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+ EQ_CONS_IDX_SET(arm_state, INT_ARMED);
+ else
+ eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+ EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);
+
+ val |= eq_cons_idx;
+
+ val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+ hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+/**
+ * eq_update_ci - update the cons idx of event queue
+ * @eq: the event queue to update the cons idx for
+ */
+void eq_update_ci(struct hinic_eq *eq)
+{
+ set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrls registers
+ * @eq: the event queue for setting
+ */
+static void set_aeq_ctrls(struct hinic_eq *eq)
+{
+ struct hinic_hwif *hwif = eq->hwdev->hwif;
+ struct irq_info *eq_irq = &eq->eq_irq;
+ u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
+ u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
+
+ /* set ctrl0 */
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+ AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+ AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+ AEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+ ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+ val |= ctrl0;
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* set ctrl1 */
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+ elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl1);
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ */
+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ struct hinic_aeq_elem *aeqe;
+ u16 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = GET_AEQ_ELEM(eq, i);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ rte_wmb(); /* Write the init values */
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ */
+static int alloc_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwif *hwif = eq->hwdev->hwif;
+ u32 init_val;
+ u64 dma_addr_size, virt_addr_size;
+ u16 pg_num, i;
+ int err;
+
+ dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
+ virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
+
+ eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr) {
+ PMD_DRV_LOG(ERR, "Allocate dma addr array failed");
+ return -ENOMEM;
+ }
+
+ eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr) {
+ PMD_DRV_LOG(ERR, "Allocate virt addr array failed");
+ err = -ENOMEM;
+ goto virt_addr_alloc_err;
+ }
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
+ eq->virt_addr[pg_num] =
+ (u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
+ eq->page_size, &eq->dma_addr[pg_num],
+ SOCKET_ID_ANY);
+ if (!eq->virt_addr[pg_num]) {
+ err = -ENOMEM;
+ goto dma_alloc_err;
+ }
+
+ hinic_hwif_write_reg(hwif,
+ HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,
+ eq->q_id, pg_num),
+ upper_32_bits(eq->dma_addr[pg_num]));
+
+ hinic_hwif_write_reg(hwif,
+ HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,
+ eq->q_id, pg_num),
+ lower_32_bits(eq->dma_addr[pg_num]));
+ }
+
+ init_val = EQ_WRAPPED(eq);
+
+ aeq_elements_init(eq, init_val);
+
+ return 0;
+
+dma_alloc_err:
+ for (i = 0; i < pg_num; i++)
+ dma_free_coherent(eq->hwdev, eq->page_size,
+ eq->virt_addr[i], eq->dma_addr[i]);
+
+virt_addr_alloc_err:
+ kfree(eq->dma_addr);
+ return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the event queue
+ */
+static void free_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 pg_num;
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
+ dma_free_coherent(hwdev, eq->page_size,
+ eq->virt_addr[pg_num],
+ eq->dma_addr[pg_num]);
+
+ kfree(eq->virt_addr);
+ kfree(eq->dma_addr);
+}
+
+#define MSIX_ENTRY_IDX_0 (0)
+
+/**
+ * init_aeq - initialize aeq
+ * @eq: the event queue
+ * @hwdev: the pointer to the private hardware device object
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @type: the type of the event queue, ceq or aeq
+ * @page_size: the page size of the event queue
+ * @entry: msix entry associated with the event queue
+ * Return: 0 - Success, Negative - failure
+ */
+static int init_aeq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
+ u16 q_len, u32 page_size,
+ __rte_unused struct irq_info *entry)
+{
+ int err = 0;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = HINIC_AEQ;
+ eq->page_size = page_size;
+ eq->eq_len = q_len;
+
+ /* clear eq_len to force eqe drop in hardware */
+ hinic_hwif_write_reg(eq->hwdev->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+
+ /* Clear PI and CI, also clear the ARM bit */
+ hinic_hwif_write_reg(eq->hwdev->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
+ hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ eq->elem_size = HINIC_AEQE_SIZE;
+ eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
+ eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);
+
+ if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
+ PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2");
+ return -EINVAL;
+ }
+
+ if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
+ PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d",
+ eq->num_pages);
+ return -EINVAL;
+ }
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate pages for eq failed");
+ return err;
+ }
+
+ /* pmd use MSIX_ENTRY_IDX_0 */
+ eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;
+
+ set_aeq_ctrls(eq);
+ set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+
+ if (eq->q_id == 0)
+ hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);
+
+ eq->poll_retry_nr = HINIC_RETRY_NUM;
+
+ return 0;
+}
+
+/**
+ * remove_aeq - remove aeq
+ * @eq: the event queue
+ */
+static void remove_aeq(struct hinic_eq *eq)
+{
+ struct irq_info *entry = &eq->eq_irq;
+
+ if (eq->q_id == 0)
+ hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+
+ /* clear eq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwdev->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+
+ /* update cons_idx to avoid invalid interrupt */
+ eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+
+ free_eq_pages(eq);
+}
+
+/**
+ * hinic_aeqs_init - init all the aeqs
+ * @hwdev: the pointer to the private hardware device object
+ * @num_aeqs: number of aeq
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ */
+static int
+hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries)
+{
+ struct hinic_aeqs *aeqs;
+ int err;
+ u16 i, q_id;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+
+ for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {
+ err = init_aeq(&aeqs->aeq[q_id], hwdev, q_id,
+ HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
+ &msix_entries[q_id]);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id);
+ goto init_aeq_err;
+ }
+ }
+
+ return 0;
+
+init_aeq_err:
+ for (i = 0; i < q_id; i++)
+ remove_aeq(&aeqs->aeq[i]);
+
+ kfree(aeqs);
+
+ return err;
+}
+
+/**
+ * hinic_aeqs_free - free all the aeqs
+ * @hwdev: the pointer to the private hardware device object
+ */
+static void hinic_aeqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ u16 q_id;
+
+ /* hinic pmd use aeq[1~3], aeq[0] used in kernel only */
+ for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)
+ remove_aeq(&aeqs->aeq[q_id]);
+
+ kfree(aeqs);
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+ eq = &hwdev->aeqs->aeq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x",
+ q_id, ci, pi);
+ }
+}
+
+int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
+{
+ int rc;
+ u16 num_aeqs;
+ struct irq_info aeq_irqs[HINIC_MAX_AEQS];
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
+ if (num_aeqs < HINIC_MAX_AEQS) {
+ PMD_DRV_LOG(ERR, "Warning: PMD need %d AEQs, Chip have %d",
+ HINIC_MAX_AEQS, num_aeqs);
+ return HINIC_ERROR;
+ }
+
+ memset(aeq_irqs, 0, sizeof(aeq_irqs));
+ rc = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
+ if (rc != HINIC_OK)
+ PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc);
+
+ return rc;
+}
+
+void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
+{
+ hinic_aeqs_free(hwdev);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h
new file mode 100644
index 000000000..16046ecde
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_EQS_H_
+#define _HINIC_PMD_EQS_H_
+
+#define HINIC_EQ_PAGE_SIZE 0x00001000
+
+#define HINIC_AEQN_START 0
+#define HINIC_MAX_AEQS 4
+
+#define HINIC_EQ_MAX_PAGES 8
+
+#define HINIC_AEQE_SIZE 64
+#define HINIC_CEQE_SIZE 4
+
+#define HINIC_AEQE_DESC_SIZE 4
+#define HINIC_AEQE_DATA_SIZE \
+ (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
+
+#define HINIC_DEFAULT_AEQ_LEN 64
+
+#define GET_EQ_ELEMENT(eq, idx) \
+ (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \
+ (((u32)(idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx) \
+ ((struct hinic_aeq_elem *)GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx)))
+
+enum hinic_eq_intr_mode {
+ HINIC_INTR_MODE_ARMED,
+ HINIC_INTR_MODE_ALWAYS,
+};
+
+enum hinic_eq_ci_arm_state {
+ HINIC_EQ_NOT_ARMED,
+ HINIC_EQ_ARMED,
+};
+
+enum hinic_aeq_type {
+ HINIC_HW_INTER_INT = 0,
+ HINIC_MBX_FROM_FUNC = 1,
+ HINIC_MSG_FROM_MGMT_CPU = 2,
+ HINIC_API_RSP = 3,
+ HINIC_API_CHAIN_STS = 4,
+ HINIC_MBX_SEND_RSLT = 5,
+ HINIC_MAX_AEQ_EVENTS
+};
+
+#define HINIC_RETRY_NUM (10)
+
+struct hinic_eq {
+ struct hinic_hwdev *hwdev;
+ u16 q_id;
+ u16 type;
+ u32 page_size;
+ u16 eq_len;
+
+ u16 cons_idx;
+ u16 wrapped;
+
+ u16 elem_size;
+ u16 num_pages;
+ u32 num_elem_in_pg;
+
+ struct irq_info eq_irq;
+
+ dma_addr_t *dma_addr;
+ u8 **virt_addr;
+
+ u16 poll_retry_nr;
+};
+
+struct hinic_aeq_elem {
+ u8 aeqe_data[HINIC_AEQE_DATA_SIZE];
+ u32 desc;
+};
+
+struct hinic_aeqs {
+ struct hinic_hwdev *hwdev;
+ u16 poll_retry_nr;
+
+ struct hinic_eq aeq[HINIC_MAX_AEQS];
+ u16 num_aeqs;
+};
+
+void eq_update_ci(struct hinic_eq *eq);
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+
+int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev);
+
+void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev);
+
+#endif /* _HINIC_PMD_EQS_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c
new file mode 100644
index 000000000..cc4207678
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -0,0 +1,1531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include<rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_wq.h"
+#include "hinic_pmd_cmdq.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_niccfg.h"
+#include "hinic_pmd_mbox.h"
+
+#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC_FLR_TIMEOUT 1000
+
+#define FFM_RECORD_NUM_MAX 32
+
+#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0
+#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8
+#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
+
+#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF
+#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3
+#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
+
+#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \
+ (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \
+ HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)
+
+#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \
+ ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \
+ << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)))
+
+#define HINIC_PCIE_ST_DISABLE 0
+#define HINIC_PCIE_AT_DISABLE 0
+#define HINIC_PCIE_PH_DISABLE 0
+#define PCIE_MSIX_ATTR_ENTRY 0
+
+#define HINIC_HASH_FUNC rte_jhash
+#define HINIC_HASH_KEY_LEN (sizeof(dma_addr_t))
+#define HINIC_HASH_FUNC_INIT_VAL 0
+
+static const char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {
+ "RS-FEC", "BASE-FEC", "NO-FEC"};
+
+static const char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
+ "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC",
+ "Back plane", "BaseT"
+};
+
+static const char *hinic_module_link_err[LINK_ERR_NUM] = {
+ "Unrecognized module",
+};
+
+struct hinic_vf_dma_attr_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 func_dma_entry_num;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u8 resv1[3];
+};
+
+/**
+ * hinic_cpu_to_be32 - convert data to big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert, must be Multiple of 4B
+ */
+void hinic_cpu_to_be32(void *data, u32 len)
+{
+ u32 i;
+ u32 *mem = (u32 *)data;
+
+ for (i = 0; i < (len >> 2); i++) {
+ *mem = cpu_to_be32(*mem);
+ mem++;
+ }
+}
+
+/**
+ * hinic_be32_to_cpu - convert data from big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert, must be Multiple of 4B
+ */
+void hinic_be32_to_cpu(void *data, u32 len)
+{
+ u32 i;
+ u32 *mem = (u32 *)data;
+
+ for (i = 0; i < (len >> 2); i++) {
+ *mem = be32_to_cpu(*mem);
+ mem++;
+ }
+}
+
+static void *hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int align,
+ unsigned int socket_id)
+{
+ int rc, alloc_cnt;
+ const struct rte_memzone *mz;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ hash_sig_t sig;
+ rte_iova_t iova;
+
+ if (dma_handle == NULL || 0 == size)
+ return NULL;
+
+ alloc_cnt = rte_atomic32_add_return(&hwdev->os_dep.dma_alloc_cnt, 1);
+ snprintf(z_name, sizeof(z_name), "%s_%d",
+ hwdev->pcidev_hdl->name, alloc_cnt);
+
+ mz = rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+ if (!mz) {
+ PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx",
+ rte_errno, z_name, size);
+ return NULL;
+ }
+
+ iova = mz->iova;
+
+ /* check if phys_addr already exist */
+ sig = HINIC_HASH_FUNC(&iova, HINIC_HASH_KEY_LEN,
+ HINIC_HASH_FUNC_INIT_VAL);
+ rc = rte_hash_lookup_with_hash(hwdev->os_dep.dma_addr_hash,
+ &iova, sig);
+ if (rc >= 0) {
+ PMD_DRV_LOG(ERR, "Dma addr: %p already in hash table, error: %d, mz_name: %s",
+ (void *)iova, rc, z_name);
+ goto phys_addr_hash_err;
+ }
+
+ /* record paddr in hash table */
+ rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
+ rc = rte_hash_add_key_with_hash_data(hwdev->os_dep.dma_addr_hash,
+ &iova, sig,
+ (void *)(u64)mz);
+ rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Insert dma addr: %p hash failed, error: %d, mz_name: %s",
+ (void *)iova, rc, z_name);
+ goto phys_addr_hash_err;
+ }
+ *dma_handle = iova;
+ memset(mz->addr, 0, size);
+
+ return mz->addr;
+
+phys_addr_hash_err:
+ (void)rte_memzone_free(mz);
+
+ return NULL;
+}
+
+static void
+hinic_dma_mem_free(struct hinic_hwdev *hwdev, size_t size,
+ void *virt, dma_addr_t phys)
+{
+ int rc;
+ struct rte_memzone *mz = NULL;
+ struct rte_hash *hash;
+ hash_sig_t sig;
+
+ if (virt == NULL || phys == 0)
+ return;
+
+ hash = hwdev->os_dep.dma_addr_hash;
+ sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
+ HINIC_HASH_FUNC_INIT_VAL);
+ rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
+ if (rc < 0) {
+ PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
+ (void *)phys, rc);
+ return;
+ }
+
+ if (virt != mz->addr || size > mz->len) {
+ PMD_DRV_LOG(ERR, "Match mz_info failed: "
+ "mz.name: %s, mz.phys: %p, mz.virt: %p, mz.len: %zu, "
+ "phys: %p, virt: %p, size: %zu",
+ mz->name, (void *)mz->iova, mz->addr, mz->len,
+ (void *)phys, virt, size);
+ }
+
+ rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
+ (void)rte_hash_del_key_with_hash(hash, &phys, sig);
+ rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
+
+ (void)rte_memzone_free(mz);
+}
+
+void *dma_zalloc_coherent(void *hwdev, size_t size, dma_addr_t *dma_handle,
+ unsigned int socket_id)
+{
+ return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
+ RTE_CACHE_LINE_SIZE, socket_id);
+}
+
+void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int socket_id)
+{
+ return hinic_dma_mem_zalloc(hwdev, size, dma_handle, HINIC_PAGE_SIZE,
+ socket_id);
+}
+
+void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
+ dma_addr_t *dma_handle,
+ unsigned int socket_id)
+{
+ return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
+ HINIC_PAGE_SIZE * 64, socket_id);
+}
+
+void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys)
+{
+ hinic_dma_mem_free(hwdev, size, virt, phys);
+}
+
+void dma_free_coherent_volatile(void *hwdev, size_t size,
+ volatile void *virt, dma_addr_t phys)
+{
+ int rc;
+ struct rte_memzone *mz = NULL;
+ struct hinic_hwdev *dev = hwdev;
+ struct rte_hash *hash;
+ hash_sig_t sig;
+
+ if (virt == NULL || phys == 0)
+ return;
+
+ hash = dev->os_dep.dma_addr_hash;
+ sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
+ HINIC_HASH_FUNC_INIT_VAL);
+ rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
+ if (rc < 0) {
+ PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
+ (void *)phys, rc);
+ return;
+ }
+
+ if (virt != mz->addr || size > mz->len) {
+ PMD_DRV_LOG(ERR, "Match mz_info failed: "
+ "mz.name:%s, mz.phys:%p, mz.virt:%p, mz.len:%zu, "
+ "phys:%p, virt:%p, size:%zu",
+ mz->name, (void *)mz->iova, mz->addr, mz->len,
+ (void *)phys, virt, size);
+ }
+
+ rte_spinlock_lock(&dev->os_dep.dma_hash_lock);
+ (void)rte_hash_del_key_with_hash(hash, &phys, sig);
+ rte_spinlock_unlock(&dev->os_dep.dma_hash_lock);
+
+ (void)rte_memzone_free(mz);
+}
+
+struct dma_pool *dma_pool_create(const char *name, void *dev,
+ size_t size, size_t align, size_t boundary)
+{
+ struct pci_pool *pool;
+
+ pool = rte_zmalloc(NULL, sizeof(*pool), HINIC_MEM_ALLOC_ALIGN_MIN);
+ if (!pool)
+ return NULL;
+
+ rte_atomic32_set(&pool->inuse, 0);
+ pool->elem_size = size;
+ pool->align = align;
+ pool->boundary = boundary;
+ pool->hwdev = dev;
+ strncpy(pool->name, name, (sizeof(pool->name) - 1));
+
+ return pool;
+}
+
+void dma_pool_destroy(struct dma_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (rte_atomic32_read(&pool->inuse) != 0) {
+ PMD_DRV_LOG(ERR, "Leak memory, dma_pool: %s, inuse_count: %d",
+ pool->name, rte_atomic32_read(&pool->inuse));
+ }
+
+ rte_free(pool);
+}
+
+void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr)
+{
+ void *buf;
+
+ buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size, dma_addr,
+ (u32)pool->align, SOCKET_ID_ANY);
+ if (buf)
+ rte_atomic32_inc(&pool->inuse);
+
+ return buf;
+}
+
+void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma)
+{
+ rte_atomic32_dec(&pool->inuse);
+ hinic_dma_mem_free(pool->hwdev, pool->elem_size, vaddr, dma);
+}
+
+#define HINIC_MAX_DMA_ENTRIES 8192
+int hinic_osdep_init(struct hinic_hwdev *hwdev)
+{
+ struct rte_hash_parameters dh_params = { 0 };
+ struct rte_hash *paddr_hash = NULL;
+
+ rte_atomic32_set(&hwdev->os_dep.dma_alloc_cnt, 0);
+ rte_spinlock_init(&hwdev->os_dep.dma_hash_lock);
+
+ dh_params.name = hwdev->pcidev_hdl->name;
+ dh_params.entries = HINIC_MAX_DMA_ENTRIES;
+ dh_params.key_len = HINIC_HASH_KEY_LEN;
+ dh_params.hash_func = HINIC_HASH_FUNC;
+ dh_params.hash_func_init_val = HINIC_HASH_FUNC_INIT_VAL;
+ dh_params.socket_id = SOCKET_ID_ANY;
+
+ paddr_hash = rte_hash_find_existing(dh_params.name);
+ if (paddr_hash == NULL) {
+ paddr_hash = rte_hash_create(&dh_params);
+ if (paddr_hash == NULL) {
+ PMD_DRV_LOG(ERR, "Create nic_dev phys_addr hash table failed");
+ return -ENOMEM;
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "Using existing dma hash table %s",
+ dh_params.name);
+ }
+ hwdev->os_dep.dma_addr_hash = paddr_hash;
+
+ return 0;
+}
+
+void hinic_osdep_deinit(struct hinic_hwdev *hwdev)
+{
+ uint32_t iter = 0;
+ dma_addr_t key_pa;
+ struct rte_memzone *data_mz = NULL;
+ struct rte_hash *paddr_hash = hwdev->os_dep.dma_addr_hash;
+
+ if (paddr_hash) {
+ /* iterate through the hash table */
+ while (rte_hash_iterate(paddr_hash, (const void **)&key_pa,
+ (void **)&data_mz, &iter) >= 0) {
+ if (data_mz) {
+ PMD_DRV_LOG(WARNING, "Free leaked dma_addr: %p, mz: %s",
+ (void *)key_pa, data_mz->name);
+ (void)rte_memzone_free(data_mz);
+ }
+ }
+
+ /* free phys_addr hash table */
+ rte_hash_free(paddr_hash);
+ }
+}
+
+/**
+ * hinic_set_ci_table - set ci attribute table
+ * @hwdev: the hardware interface of a nic device
+ * @q_id: Queue id of SQ
+ * @attr: Point to SQ CI attribute table
+ * @return
+ * 0 on success and ci attribute table is filled,
+ * negative error value otherwise.
+ */
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr)
+{
+ struct hinic_cons_idx_attr cons_idx_attr;
+
+ memset(&cons_idx_attr, 0, sizeof(cons_idx_attr));
+ cons_idx_attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ cons_idx_attr.func_idx = hinic_global_func_id(hwdev);
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.sq_id = q_id;
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
+ &cons_idx_attr, sizeof(cons_idx_attr),
+ NULL, NULL, 0);
+}
+
+/**
+ * hinic_set_pagesize - set page size to vat table
+ * @hwdev: the hardware interface of a nic device
+ * @page_size: vat page size
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_set_pagesize(void *hwdev, u8 page_size)
+{
+ struct hinic_page_size cmd;
+
+ if (page_size > HINIC_PAGE_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "Invalid page_size %u, bigger than %u",
+ page_size, HINIC_PAGE_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ cmd.func_idx = hinic_global_func_id(hwdev);
+ cmd.ppf_idx = hinic_ppf_idx(hwdev);
+ cmd.page_size = page_size;
+
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PAGESIZE_SET,
+ &cmd, sizeof(cmd),
+ NULL, NULL, 0);
+}
+
+static int wait_for_flr_finish(struct hinic_hwif *hwif)
+{
+ unsigned long end;
+ enum hinic_pf_status status;
+
+ end = jiffies + msecs_to_jiffies(HINIC_FLR_TIMEOUT);
+ do {
+ status = hinic_get_pf_status(hwif);
+ if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) {
+ return 0;
+ }
+
+ rte_delay_ms(10);
+ } while (time_before(jiffies, end));
+
+ return -EFAULT;
+}
+
+#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 1000
+
+static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
+{
+ enum hinic_cmdq_type cmdq_type;
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ unsigned long end;
+ int err = 0;
+
+ if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+ end = jiffies + msecs_to_jiffies(HINIC_WAIT_CMDQ_IDLE_TIMEOUT);
+ do {
+ err = 0;
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ if (!err)
+ return 0;
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ cmdqs->status |= HINIC_CMDQ_ENABLE;
+
+ return err;
+}
+
+static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clear_resource clr_res;
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ PMD_DRV_LOG(WARNING, "Cmdq is still working");
+ return err;
+ }
+
+ memset(&clr_res, 0, sizeof(clr_res));
+ clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res, sizeof(clr_res));
+ if (err)
+ PMD_DRV_LOG(WARNING, "Notice flush message failed");
+
+ /*
+ * PF firstly set VF doorbell flush csr to be disabled. After PF finish
+ * VF resources flush, PF will set VF doorbell flush csr to be enabled.
+ */
+ err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Wait doorbell flush disable timeout");
+
+ err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Wait doorbell flush enable timeout");
+
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Reinit cmdq failed when vf flush");
+
+ return err;
+}
+
+/**
+ * hinic_pf_rx_tx_flush - clean up hardware resource
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_clear_doorbell clear_db;
+ struct hinic_clear_resource clr_res;
+ int err;
+
+ rte_delay_ms(100);
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq is still working");
+ return err;
+ }
+
+ hinic_disable_doorbell(hwif);
+ memset(&clear_db, 0, sizeof(clear_db));
+ clear_db.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ clear_db.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+ clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+ sizeof(clear_db), NULL, NULL, 0);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Flush doorbell failed");
+
+ hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG);
+ memset(&clr_res, 0, sizeof(clr_res));
+ clr_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+
+ err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res), NULL, NULL);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Notice flush message failed");
+
+ err = wait_for_flr_finish(hwif);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Wait firmware FLR timeout");
+
+ hinic_enable_doorbell(hwif);
+
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err)
+ PMD_DRV_LOG(WARNING, "Reinit cmdq failed when pf flush");
+
+ return 0;
+}
+
+int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF)
+ return hinic_vf_rx_tx_flush(hwdev);
+ else
+ return hinic_pf_rx_tx_flush(hwdev);
+}
+
+/**
+ * hinic_get_interrupt_cfg - get interrupt configuration from NIC
+ * @hwdev: the hardware interface of a nic device
+ * @interrupt_info: Information of Interrupt aggregation
+ * Return: 0 on success, negative error value otherwise.
+ */
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct nic_interrupt_info *interrupt_info)
+{
+ struct hinic_msix_config msix_cfg;
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ memset(&msix_cfg, 0, sizeof(msix_cfg));
+ msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ msix_cfg.func_id = hinic_global_func_id(hwdev);
+ msix_cfg.msix_index = interrupt_info->msix_index;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Get interrupt config failed, ret: %d",
+ msix_cfg.mgmt_msg_head.status);
+ return -EINVAL;
+ }
+
+ interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+ interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
+ interrupt_info->pending_limt = msix_cfg.pending_cnt;
+ interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
+ interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+ return 0;
+}
+
+/**
+ * hinic_set_interrupt_cfg - set interrupt configuration to NIC
+ * @hwdev: the hardware interface of a nic device
+ * @interrupt_info: Information of Interrupt aggregation
+ * Return: 0 on success, negative error value otherwise.
+ */
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct nic_interrupt_info interrupt_info)
+{
+ struct hinic_msix_config msix_cfg;
+ struct nic_interrupt_info temp_info;
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ memset(&msix_cfg, 0, sizeof(msix_cfg));
+ msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ msix_cfg.func_id = hinic_global_func_id(hwdev);
+ msix_cfg.msix_index = (u16)interrupt_info.msix_index;
+
+ temp_info.msix_index = interrupt_info.msix_index;
+
+ err = hinic_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return -EINVAL;
+
+ msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg;
+ msix_cfg.pending_cnt = temp_info.pending_limt;
+ msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg;
+
+ if (interrupt_info.lli_set) {
+ msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg;
+ }
+
+ if (interrupt_info.interrupt_coalesc_set) {
+ msix_cfg.pending_cnt = interrupt_info.pending_limt;
+ msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg;
+ }
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Set interrupt config failed, ret: %d",
+ msix_cfg.mgmt_msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * init_aeqs_msix_attr - Init interrupt attributes of aeq
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int init_aeqs_msix_attr(void *hwdev)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ struct hinic_aeqs *aeqs = nic_hwdev->aeqs;
+ struct nic_interrupt_info info = {0};
+ struct hinic_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hinic_set_interrupt_cfg(hwdev, info);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set msix attr for aeq %d failed",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * set_pf_dma_attr_entry - set the dma attributes for entry
+ * @hwdev: the pointer to the private hardware device object
+ * @entry_idx: the entry index in the dma table
+ * @st: PCIE TLP steering tag
+ * @at: PCIE TLP AT field
+ * @ph: PCIE TLP Processing Hint field
+ * @no_snooping: PCIE TLP No snooping
+ * @tph_en: PCIE TLP Processing Hint Enable
+ */
+static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ u32 addr, val, dma_attr_entry;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
+
+ val = hinic_hwif_read_reg(hwdev->hwif, addr);
+ val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
+
+ dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) |
+ HINIC_DMA_ATTR_ENTRY_SET(at, AT) |
+ HINIC_DMA_ATTR_ENTRY_SET(ph, PH) |
+ HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
+ HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
+
+ val |= dma_attr_entry;
+ hinic_hwif_write_reg(hwdev->hwif, addr, val);
+}
+
+static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ struct hinic_vf_dma_attr_table attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.func_idx = hinic_global_func_id(hwdev);
+ attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev);
+ attr.entry_idx = entry_idx;
+ attr.st = st;
+ attr.at = at;
+ attr.ph = ph;
+ attr.no_snooping = no_snooping;
+ attr.tph_en = tph_en;
+
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_DMA_ATTR_SET,
+ &attr, sizeof(attr), NULL, NULL, 0);
+}
+
+/**
+ * dma_attr_table_init - initialize the the default dma attributes
+ * @hwdev: the pointer to the private hardware device object
+ */
+static int dma_attr_table_init(struct hinic_hwdev *hwdev)
+{
+ int err = 0;
+
+ if (HINIC_IS_VF(hwdev))
+ err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE,
+ HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP,
+ HINIC_PCIE_TPH_DISABLE);
+ else
+ set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE,
+ HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP,
+ HINIC_PCIE_TPH_DISABLE);
+
+ return err;
+}
+
+/**
+ * hinic_init_attr_table - init dma and aeq msix attribute table
+ * @hwdev: the pointer to the private hardware device object
+ */
+int hinic_init_attr_table(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = dma_attr_table_init(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize dma attribute table failed, err: %d",
+ err);
+ return err;
+ }
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize aeqs msix attribute failed, err: %d",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+#define FAULT_SHOW_STR_LEN 16
+static void fault_report_show(struct hinic_hwdev *hwdev,
+ struct hinic_fault_event *event)
+{
+ char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "chip", "ucode", "mem rd timeout", "mem wr timeout",
+ "reg rd timeout", "reg wr timeout"};
+ char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "fatal", "reset", "flr", "general", "suggestion"};
+ char type_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
+ char level_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
+ u8 err_level;
+
+ PMD_DRV_LOG(WARNING, "Fault event report received, func_id: %d",
+ hinic_global_func_id(hwdev));
+
+ if (event->type < FAULT_TYPE_MAX)
+ strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
+ else
+ strncpy(type_str, "unknown", FAULT_SHOW_STR_LEN);
+ PMD_DRV_LOG(WARNING, "fault type: %d [%s]",
+ event->type, type_str);
+ PMD_DRV_LOG(WARNING, "fault val[0]: 0x%08x",
+ event->event.val[0]);
+ PMD_DRV_LOG(WARNING, "fault val[1]: 0x%08x",
+ event->event.val[1]);
+ PMD_DRV_LOG(WARNING, "fault val[2]: 0x%08x",
+ event->event.val[2]);
+ PMD_DRV_LOG(WARNING, "fault val[3]: 0x%08x",
+ event->event.val[3]);
+
+ switch (event->type) {
+ case FAULT_TYPE_CHIP:
+ err_level = event->event.chip.err_level;
+ if (err_level < FAULT_LEVEL_MAX)
+ strncpy(level_str, fault_level[err_level],
+ FAULT_SHOW_STR_LEN);
+ else
+ strncpy(level_str, "unknown",
+ FAULT_SHOW_STR_LEN);
+
+ PMD_DRV_LOG(WARNING, "err_level: %d [%s]",
+ err_level, level_str);
+
+ if (err_level == FAULT_LEVEL_SERIOUS_FLR) {
+ PMD_DRV_LOG(WARNING, "flr func_id: %d",
+ event->event.chip.func_id);
+ } else {
+ PMD_DRV_LOG(WARNING, "node_id: %d",
+ event->event.chip.node_id);
+ PMD_DRV_LOG(WARNING, "err_type: %d",
+ event->event.chip.err_type);
+ PMD_DRV_LOG(WARNING, "err_csr_addr: %d",
+ event->event.chip.err_csr_addr);
+ PMD_DRV_LOG(WARNING, "err_csr_value: %d",
+ event->event.chip.err_csr_value);
+ }
+ break;
+ case FAULT_TYPE_UCODE:
+ PMD_DRV_LOG(WARNING, "cause_id: %d",
+ event->event.ucode.cause_id);
+ PMD_DRV_LOG(WARNING, "core_id: %d",
+ event->event.ucode.core_id);
+ PMD_DRV_LOG(WARNING, "c_id: %d",
+ event->event.ucode.c_id);
+ PMD_DRV_LOG(WARNING, "epc: %d",
+ event->event.ucode.epc);
+ break;
+ case FAULT_TYPE_MEM_RD_TIMEOUT:
+ case FAULT_TYPE_MEM_WR_TIMEOUT:
+ PMD_DRV_LOG(WARNING, "err_csr_ctrl: %d",
+ event->event.mem_timeout.err_csr_ctrl);
+ PMD_DRV_LOG(WARNING, "err_csr_data: %d",
+ event->event.mem_timeout.err_csr_data);
+ PMD_DRV_LOG(WARNING, "ctrl_tab: %d",
+ event->event.mem_timeout.ctrl_tab);
+ PMD_DRV_LOG(WARNING, "mem_index: %d",
+ event->event.mem_timeout.mem_index);
+ break;
+ case FAULT_TYPE_REG_RD_TIMEOUT:
+ case FAULT_TYPE_REG_WR_TIMEOUT:
+ PMD_DRV_LOG(WARNING, "err_csr: %d",
+ event->event.reg_timeout.err_csr);
+ break;
+ default:
+ break;
+ }
+}
+
+static int resources_state_set(struct hinic_hwdev *hwdev,
+ enum hinic_res_state state)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_cmd_set_res_state res_state;
+
+ memset(&res_state, 0, sizeof(res_state));
+ res_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ res_state.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+ res_state.state = state;
+
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_RES_STATE_SET,
+ &res_state, sizeof(res_state), NULL, NULL, 0);
+}
+
+/**
+ * hinic_activate_hwdev_state - Active host nic state and notify mgmt channel
+ * that host nic is ready.
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev)
+{
+ int rc = HINIC_OK;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+
+ rc = resources_state_set(hwdev, HINIC_RES_ACTIVE);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize resources state failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_deactivate_hwdev_state - Deactivate host nic state and notify mgmt
+ * channel that host nic is not ready.
+ * @hwdev: the pointer to the private hardware device object
+ */
+void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev)
+{
+ int rc = HINIC_OK;
+
+ if (!hwdev)
+ return;
+
+ rc = resources_state_set(hwdev, HINIC_RES_CLEAN);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Deinit resources state failed");
+
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+}
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info)
+{
+ struct hinic_comm_board_info board_info;
+ u16 out_size = sizeof(board_info);
+ int err;
+
+ if (!hwdev || !info)
+ return -EINVAL;
+
+ memset(&board_info, 0, sizeof(board_info));
+ board_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_BOARD_INFO,
+ &board_info, sizeof(board_info),
+ &board_info, &out_size, 0);
+ if (err || board_info.mgmt_msg_head.status || !out_size) {
+ PMD_DRV_LOG(ERR, "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x",
+ err, board_info.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(info, &board_info.info, sizeof(*info));
+ return 0;
+}
+
+/**
+ * hinic_l2nic_reset - Restore the initial state of NIC
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_l2nic_reset l2nic_reset;
+ int err = 0;
+
+ err = hinic_set_vport_enable(hwdev, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set vport disable failed");
+ return err;
+ }
+
+ rte_delay_ms(100);
+
+ memset(&l2nic_reset, 0, sizeof(l2nic_reset));
+ l2nic_reset.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ l2nic_reset.func_id = HINIC_HWIF_GLOBAL_IDX(hwif);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_L2NIC_RESET,
+ &l2nic_reset, sizeof(l2nic_reset),
+ NULL, NULL, 0);
+ if (err || l2nic_reset.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Reset L2NIC resources failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void
+hinic_show_sw_watchdog_timeout_info(void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_mgmt_watchdog_info *watchdog_info;
+ u32 *dump_addr, *reg, stack_len, i, j;
+
+ if (in_size != sizeof(*watchdog_info)) {
+ PMD_DRV_LOG(ERR, "Invalid mgmt watchdog report, length: %d, should be %zu",
+ in_size, sizeof(*watchdog_info));
+ return;
+ }
+
+ watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_in;
+
+ PMD_DRV_LOG(ERR, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x",
+ watchdog_info->curr_time_h, watchdog_info->curr_time_l,
+ watchdog_info->task_id, watchdog_info->sp);
+ PMD_DRV_LOG(ERR, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x",
+ watchdog_info->curr_used, watchdog_info->peak_used,
+ watchdog_info->is_overflow, watchdog_info->stack_top,
+ watchdog_info->stack_bottom);
+
+ PMD_DRV_LOG(ERR, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr: 0x%08x",
+ watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
+
+ PMD_DRV_LOG(ERR, "Mgmt register info");
+
+ for (i = 0; i < 3; i++) {
+ reg = watchdog_info->reg + (u64)(u32)(4 * i);
+ PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
+ *(reg), *(reg + 1), *(reg + 2), *(reg + 3));
+ }
+
+ PMD_DRV_LOG(ERR, "0x%08x", watchdog_info->reg[12]);
+
+ if (watchdog_info->stack_actlen <= 1024) {
+ stack_len = watchdog_info->stack_actlen;
+ } else {
+ PMD_DRV_LOG(ERR, "Oops stack length: 0x%x is wrong",
+ watchdog_info->stack_actlen);
+ stack_len = 1024;
+ }
+
+ PMD_DRV_LOG(ERR, "Mgmt dump stack, 16Bytes per line(start from sp)");
+ for (i = 0; i < (stack_len / 16); i++) {
+ dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
+ PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
+ *dump_addr, *(dump_addr + 1), *(dump_addr + 2),
+ *(dump_addr + 3));
+ }
+
+ for (j = 0; j < ((stack_len % 16) / 4); j++) {
+ dump_addr = (u32 *)(watchdog_info->data +
+ ((u64)(u32)(i * 16 + j * 4)));
+ PMD_DRV_LOG(ERR, "0x%08x", *dump_addr);
+ }
+
+ *out_size = sizeof(*watchdog_info);
+ watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_out;
+ watchdog_info->mgmt_msg_head.status = 0;
+}
+
+static void hinic_show_pcie_dfx_info(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_pcie_dfx_ntc *notice_info =
+ (struct hinic_pcie_dfx_ntc *)buf_in;
+ struct hinic_pcie_dfx_info dfx_info;
+ u16 size = 0;
+ u16 cnt = 0;
+ u32 num = 0;
+ u32 i, j;
+ int err;
+ u32 *reg;
+
+ if (in_size != sizeof(*notice_info)) {
+ PMD_DRV_LOG(ERR, "Invalid pcie dfx notice info, length: %d, should be %zu.",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ ((struct hinic_pcie_dfx_ntc *)buf_out)->mgmt_msg_head.status = 0;
+ *out_size = sizeof(*notice_info);
+ memset(&dfx_info, 0, sizeof(dfx_info));
+ num = (u32)(notice_info->len / 1024);
+ PMD_DRV_LOG(INFO, "INFO LEN: %d", notice_info->len);
+ PMD_DRV_LOG(INFO, "PCIE DFX:");
+ dfx_info.host_id = 0;
+ dfx_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ for (i = 0; i < num; i++) {
+ dfx_info.offset = i * MAX_PCIE_DFX_BUF_SIZE;
+ if (i == (num - 1))
+ dfx_info.last = 1;
+ size = sizeof(dfx_info);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PCIE_DFX_GET,
+ &dfx_info, sizeof(dfx_info),
+ &dfx_info, &size, 0);
+ if (err || dfx_info.mgmt_msg_head.status || !size) {
+ PMD_DRV_LOG(ERR, "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x",
+ err, dfx_info.mgmt_msg_head.status, size);
+ return;
+ }
+
+ reg = (u32 *)dfx_info.data;
+ for (j = 0; j < 256; j = j + 8) {
+ PMD_DRV_LOG(ERR, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
+ cnt, reg[j], reg[(u32)(j + 1)],
+ reg[(u32)(j + 2)], reg[(u32)(j + 3)],
+ reg[(u32)(j + 4)], reg[(u32)(j + 5)],
+ reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
+ cnt = cnt + 32;
+ }
+ memset(dfx_info.data, 0, MAX_PCIE_DFX_BUF_SIZE);
+ }
+}
+
+static void
+hinic_show_ffm_info(struct hinic_hwdev *hwdev, void *buf_in, u16 in_size)
+{
+ struct ffm_intr_info *intr;
+
+ if (in_size != sizeof(struct ffm_intr_info)) {
+ PMD_DRV_LOG(ERR, "Invalid input buffer len, length: %d, should be %zu.",
+ in_size, sizeof(struct ffm_intr_info));
+ return;
+ }
+
+ if (hwdev->ffm_num < FFM_RECORD_NUM_MAX) {
+ hwdev->ffm_num++;
+ intr = (struct ffm_intr_info *)buf_in;
+ PMD_DRV_LOG(WARNING, "node_id(%d),err_csr_addr(0x%x),err_csr_val(0x%x),err_level(0x%x),err_type(0x%x)",
+ intr->node_id,
+ intr->err_csr_addr,
+ intr->err_csr_value,
+ intr->err_level,
+ intr->err_type);
+ }
+}
+
+void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_fault_event *fault_event, *ret_fault_event;
+
+ if (!hwdev)
+ return;
+
+ *out_size = 0;
+
+ switch (cmd) {
+ case HINIC_MGMT_CMD_FAULT_REPORT:
+ if (in_size != sizeof(*fault_event)) {
+ PMD_DRV_LOG(ERR, "Invalid fault event report, length: %d, should be %zu",
+ in_size, sizeof(*fault_event));
+ return;
+ }
+
+ fault_event = (struct hinic_cmd_fault_event *)buf_in;
+ fault_report_show(hwdev, &fault_event->event);
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ ret_fault_event =
+ (struct hinic_cmd_fault_event *)buf_out;
+ ret_fault_event->mgmt_msg_head.status = 0;
+ *out_size = sizeof(*ret_fault_event);
+ }
+ break;
+
+ case HINIC_MGMT_CMD_WATCHDOG_INFO:
+ hinic_show_sw_watchdog_timeout_info(buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_MGMT_CMD_PCIE_DFX_NTC:
+ hinic_show_pcie_dfx_info(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_MGMT_CMD_FFM_SET:
+ hinic_show_ffm_info(hwdev, buf_in, in_size);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cable_plug_event *plug_event;
+ struct hinic_link_err_event *link_err;
+
+ if (cmd == HINIC_PORT_CMD_CABLE_PLUG_EVENT) {
+ plug_event = (struct hinic_cable_plug_event *)buf_in;
+ PMD_DRV_LOG(INFO, "Port module event: Cable %s",
+ plug_event->plugged ? "plugged" : "unplugged");
+
+ *out_size = sizeof(*plug_event);
+ plug_event = (struct hinic_cable_plug_event *)buf_out;
+ plug_event->mgmt_msg_head.status = 0;
+ } else if (cmd == HINIC_PORT_CMD_LINK_ERR_EVENT) {
+ link_err = (struct hinic_link_err_event *)buf_in;
+ if (link_err->err_type >= LINK_ERR_NUM) {
+ PMD_DRV_LOG(ERR, "Link failed, Unknown type: 0x%x",
+ link_err->err_type);
+ } else {
+ PMD_DRV_LOG(INFO, "Link failed, type: 0x%x: %s",
+ link_err->err_type,
+ hinic_module_link_err[link_err->err_type]);
+ }
+
+ *out_size = sizeof(*link_err);
+ link_err = (struct hinic_link_err_event *)buf_out;
+ link_err->mgmt_msg_head.status = 0;
+ }
+}
+
+static int hinic_link_event_process(struct hinic_hwdev *hwdev,
+ struct rte_eth_dev *eth_dev, u8 status)
+{
+ uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
+ ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
+ ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
+ ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+ struct nic_port_info port_info;
+ struct rte_eth_link link;
+ int rc = HINIC_OK;
+
+ if (!status) {
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = 0;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_FIXED;
+ } else {
+ link.link_status = ETH_LINK_UP;
+
+ memset(&port_info, 0, sizeof(port_info));
+ rc = hinic_get_port_info(hwdev, &port_info);
+ if (rc) {
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = ETH_LINK_FIXED;
+ } else {
+ link.link_speed = port_speed[port_info.speed %
+ LINK_SPEED_MAX];
+ link.link_duplex = port_info.duplex;
+ link.link_autoneg = port_info.autoneg_state;
+ }
+ }
+ (void)rte_eth_linkstatus_set(eth_dev, &link);
+
+ return rc;
+}
+
+static void hinic_lsc_process(struct hinic_hwdev *hwdev,
+ struct rte_eth_dev *rte_dev, u8 status)
+{
+ int ret;
+
+ ret = hinic_link_event_process(hwdev, rte_dev, status);
+ /* check if link has changed, notify callback */
+ if (ret == 0)
+ _rte_eth_dev_callback_process(rte_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+}
+
+void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev,
+ void *param, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_status *in_link;
+ struct rte_eth_dev *eth_dev;
+
+ if (!hwdev)
+ return;
+
+ *out_size = 0;
+
+ switch (cmd) {
+ case HINIC_PORT_CMD_LINK_STATUS_REPORT:
+ eth_dev = param;
+ in_link = (struct hinic_port_link_status *)buf_in;
+ PMD_DRV_LOG(INFO, "Link status event report, dev_name: %s, port_id: %d, link_status: %s",
+ eth_dev->data->name, eth_dev->data->port_id,
+ in_link->link ? "UP" : "DOWN");
+
+ hinic_lsc_process(hwdev, eth_dev, in_link->link);
+ break;
+
+ case HINIC_PORT_CMD_CABLE_PLUG_EVENT:
+ case HINIC_PORT_CMD_LINK_ERR_EVENT:
+ hinic_cable_status_event(cmd, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_MGMT_RESET:
+ PMD_DRV_LOG(WARNING, "Mgmt is reset");
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported event %d to process",
+ cmd);
+ break;
+ }
+}
+
+static void print_cable_info(struct hinic_link_info *info)
+{
+ char tmp_str[512] = {0};
+ char tmp_vendor[17] = {0};
+ const char *port_type = "Unknown port type";
+ int i;
+
+ if (info->cable_absent) {
+ PMD_DRV_LOG(INFO, "Cable unpresent");
+ return;
+ }
+
+ if (info->port_type < LINK_PORT_MAX_TYPE)
+ port_type = __hw_to_char_port_type[info->port_type];
+ else
+ PMD_DRV_LOG(INFO, "Unknown port type: %u",
+ info->port_type);
+ if (info->port_type == LINK_PORT_FIBRE) {
+ if (info->port_sub_type == FIBRE_SUBTYPE_SR)
+ port_type = "Fibre-SR";
+ else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
+ port_type = "Fibre-LR";
+ }
+
+ for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
+ if (info->vendor_name[i] == ' ')
+ info->vendor_name[i] = '\0';
+ else
+ break;
+ }
+
+ memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name));
+ snprintf(tmp_str, sizeof(tmp_str),
+ "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps",
+ tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type,
+ info->cable_length, info->cable_max_speed);
+ if (info->port_type != LINK_PORT_COPPER)
+ snprintf(tmp_str + strlen(tmp_str),
+ sizeof(tmp_str) - strlen(tmp_str),
+ ", Temperature: %u", info->cable_temp);
+
+ PMD_DRV_LOG(INFO, "Cable information: %s", tmp_str);
+}
+
+static void print_hi30_status(struct hinic_link_info *info)
+{
+ struct hi30_ffe_data *ffe_data;
+ struct hi30_ctle_data *ctle_data;
+
+ ffe_data = (struct hi30_ffe_data *)info->hi30_ffe;
+ ctle_data = (struct hi30_ctle_data *)info->hi30_ctle;
+
+ PMD_DRV_LOG(INFO, "TX_FFE: PRE2=%s%d; PRE1=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d",
+ (ffe_data->PRE1 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE1 & 0xf),
+ (ffe_data->PRE2 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE2 & 0xf),
+ (int)ffe_data->MAIN,
+ (ffe_data->POST1 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST1 & 0xf),
+ (ffe_data->POST2 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST2 & 0xf));
+ PMD_DRV_LOG(INFO, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u",
+ ctle_data->ctlebst[0], ctle_data->ctlebst[1],
+ ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
+ ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
+ ctle_data->ctlermband[0], ctle_data->ctlermband[1],
+ ctle_data->ctlermband[2], ctle_data->ctleza[0],
+ ctle_data->ctleza[1], ctle_data->ctleza[2]);
+}
+
+static void print_link_info(struct hinic_link_info *info,
+ enum hilink_info_print_event type)
+{
+ const char *fec = "None";
+
+ if (info->fec < HILINK_FEC_MAX_TYPE)
+ fec = __hw_to_char_fec[info->fec];
+ else
+ PMD_DRV_LOG(INFO, "Unknown fec type: %u",
+ info->fec);
+
+ if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
+ PMD_DRV_LOG(INFO, "Link information: speed %dGbps, %s, autoneg %s",
+ info->speed, fec, info->an_state ? "on" : "off");
+ } else {
+ PMD_DRV_LOG(INFO, "Link information: antoneg: %s",
+ info->an_state ? "on" : "off");
+ }
+}
+
+static const char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
+ "", "link up", "link down", "cable plugged"
+};
+
+static void hinic_print_hilink_info(void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hilink_link_info *hilink_info =
+ (struct hinic_hilink_link_info *)buf_in;
+ struct hinic_link_info *info;
+ enum hilink_info_print_event type;
+
+ if (in_size != sizeof(*hilink_info)) {
+ PMD_DRV_LOG(ERR, "Invalid hilink info message size %d, should be %zu",
+ in_size, sizeof(*hilink_info));
+ return;
+ }
+
+ ((struct hinic_hilink_link_info *)buf_out)->mgmt_msg_head.status = 0;
+ *out_size = sizeof(*hilink_info);
+
+ info = &hilink_info->info;
+ type = hilink_info->info_type;
+
+ if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
+ PMD_DRV_LOG(INFO, "Invalid hilink info report, type: %d",
+ type);
+ return;
+ }
+
+ PMD_DRV_LOG(INFO, "Hilink info report after %s",
+ hilink_info_report_type[type]);
+
+ print_cable_info(info);
+
+ print_link_info(info, type);
+
+ print_hi30_status(info);
+
+ if (type == HILINK_EVENT_LINK_UP)
+ return;
+
+ if (type == HILINK_EVENT_CABLE_PLUGGED) {
+ PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u",
+ info->alos, info->rx_los);
+ return;
+ }
+
+ PMD_DRV_LOG(INFO, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug inforeg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x",
+ info->pma_status ? "on" : "off",
+ info->mac_tx_en ? "enable" : "disable",
+ info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg,
+ info->pma_signal_ok_reg, info->rf_lf_status_reg);
+ PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x,PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x",
+ info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
+ info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
+}
+
+void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ if (!hwdev)
+ return;
+
+ *out_size = 0;
+
+ switch (cmd) {
+ case HINIC_HILINK_CMD_GET_LINK_INFO:
+ hinic_print_hilink_info(buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported event %d to process",
+ cmd);
+ break;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h
new file mode 100644
index 000000000..d6896b3f1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_HWDEV_H_
+#define _HINIC_PMD_HWDEV_H_
+
+#include "hinic_pmd_cmd.h"
+
+#define HINIC_PAGE_SIZE_MAX 20
+
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+#define HINIC_PF_SET_VF_ALREADY 0x4
+
+#define MAX_PCIE_DFX_BUF_SIZE 1024
+
+#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE
+
+/* dma pool */
+struct dma_pool {
+ rte_atomic32_t inuse;
+ size_t elem_size;
+ size_t align;
+ size_t boundary;
+ void *hwdev;
+
+ char name[32];
+};
+
+enum hinic_res_state {
+ HINIC_RES_CLEAN = 0,
+ HINIC_RES_ACTIVE = 1,
+};
+
+enum hilink_info_print_event {
+ HILINK_EVENT_LINK_UP = 1,
+ HILINK_EVENT_LINK_DOWN,
+ HILINK_EVENT_CABLE_PLUGGED,
+ HILINK_EVENT_MAX_TYPE,
+};
+
+struct hinic_port_link_status {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 link;
+ u8 port_id;
+};
+
+enum link_err_status {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+struct hinic_cable_plug_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+struct hinic_link_err_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
+struct hinic_cons_idx_attr {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 sq_id;
+ u64 ci_addr;
+};
+
+struct hinic_clear_doorbell {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hinic_clear_resource {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hinic_cmd_set_res_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 state;
+ u8 rsvd1;
+ u32 rsvd2;
+};
+
+struct hinic_l2nic_reset {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_page_size {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+ u32 rsvd;
+};
+
+struct hinic_msix_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesct_timer_cnt;
+ u8 lli_tmier_cnt;
+ u8 lli_credit_cnt;
+ u8 resend_timer_cnt;
+ u8 rsvd1[3];
+};
+
+/* defined by chip */
+enum hinic_fault_type {
+ FAULT_TYPE_CHIP,
+ FAULT_TYPE_UCODE,
+ FAULT_TYPE_MEM_RD_TIMEOUT,
+ FAULT_TYPE_MEM_WR_TIMEOUT,
+ FAULT_TYPE_REG_RD_TIMEOUT,
+ FAULT_TYPE_REG_WR_TIMEOUT,
+ FAULT_TYPE_MAX,
+};
+
+/* defined by chip */
+enum hinic_fault_err_level {
+ /* default err_level=FAULT_LEVEL_FATAL if
+ * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT ||
+ * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT ||
+ * FAULT_TYPE_UCODE
+ * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP
+ */
+ FAULT_LEVEL_FATAL,
+ FAULT_LEVEL_SERIOUS_RESET,
+ FAULT_LEVEL_SERIOUS_FLR,
+ FAULT_LEVEL_GENERAL,
+ FAULT_LEVEL_SUGGESTION,
+ FAULT_LEVEL_MAX
+};
+
+/* defined by chip */
+struct hinic_fault_event {
+ /* enum hinic_fault_type */
+ u8 type;
+ u8 rsvd0[3];
+ union {
+ u32 val[4];
+ /* valid only type==FAULT_TYPE_CHIP */
+ struct {
+ u8 node_id;
+ /* enum hinic_fault_err_level */
+ u8 err_level;
+ u8 err_type;
+ u8 rsvd1;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+ /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR */
+ u16 func_id;
+ u16 rsvd2;
+ } chip;
+
+ /* valid only type==FAULT_TYPE_UCODE */
+ struct {
+ u8 cause_id;
+ u8 core_id;
+ u8 c_id;
+ u8 rsvd3;
+ u32 epc;
+ u32 rsvd4;
+ u32 rsvd5;
+ } ucode;
+
+ /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT ||
+ * FAULT_TYPE_MEM_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr_ctrl;
+ u32 err_csr_data;
+ u32 ctrl_tab;
+ u32 mem_index;
+ } mem_timeout;
+
+ /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT ||
+ * FAULT_TYPE_REG_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr;
+ u32 rsvd6;
+ u32 rsvd7;
+ u32 rsvd8;
+ } reg_timeout;
+ } event;
+};
+
+struct hinic_cmd_fault_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_fault_event event;
+};
+
+struct hinic_mgmt_watchdog_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u32 curr_time_h;
+ u32 curr_time_l;
+ u32 task_id;
+ u32 rsv;
+
+ u32 reg[13];
+ u32 pc;
+ u32 lr;
+ u32 cpsr;
+
+ u32 stack_top;
+ u32 stack_bottom;
+ u32 sp;
+ u32 curr_used;
+ u32 peak_used;
+ u32 is_overflow;
+
+ u32 stack_actlen;
+ u8 data[1024];
+};
+
+struct hinic_pcie_dfx_ntc {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ int len;
+ u32 rsvd;
+};
+
+struct hinic_pcie_dfx_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 host_id;
+ u8 last;
+ u8 rsvd[2];
+ u32 offset;
+
+ u8 data[MAX_PCIE_DFX_BUF_SIZE];
+};
+
+struct ffm_intr_info {
+ u8 node_id;
+ /* error level of the interrupt source */
+ u8 err_level;
+ /* Classification by interrupt source properties */
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+};
+
+struct hinic_board_info {
+ u32 board_type;
+ u32 port_num;
+ u32 port_speed;
+ u32 pcie_width;
+ u32 host_num;
+ u32 pf_num;
+ u32 vf_total_num;
+ u32 tile_num;
+ u32 qcm_num;
+ u32 core_num;
+ u32 work_mode;
+ u32 service_mode;
+ u32 pcie_mode;
+ u32 cfg_addr;
+ u32 boot_sel;
+};
+
+struct hinic_comm_board_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_board_info info;
+
+ u32 rsvd1[5];
+};
+
+struct hi30_ctle_data {
+ u8 ctlebst[3];
+ u8 ctlecmband[3];
+ u8 ctlermband[3];
+ u8 ctleza[3];
+ u8 ctlesqh[3];
+ u8 ctleactgn[3];
+ u8 ctlepassgn;
+};
+
+struct hi30_ffe_data {
+ u8 PRE2;
+ u8 PRE1;
+ u8 POST1;
+ u8 POST2;
+ u8 MAIN;
+};
+
+enum hilink_fec_type {
+ HILINK_FEC_RSFEC,
+ HILINK_FEC_BASEFEC,
+ HILINK_FEC_NOFEC,
+ HILINK_FEC_MAX_TYPE,
+};
+
+enum hinic_link_port_type {
+ LINK_PORT_FIBRE = 1,
+ LINK_PORT_ELECTRIC,
+ LINK_PORT_COPPER,
+ LINK_PORT_AOC,
+ LINK_PORT_BACKPLANE,
+ LINK_PORT_BASET,
+ LINK_PORT_MAX_TYPE,
+};
+
+enum hilink_fibre_subtype {
+ FIBRE_SUBTYPE_SR = 1,
+ FIBRE_SUBTYPE_LR,
+ FIBRE_SUBTYPE_MAX,
+};
+
+struct hinic_link_info {
+ u8 vendor_name[16];
+ /* port type:
+ * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane;
+ * 6 - baseT; 0xffff - unknown
+ *
+ * port subtype:
+ * Only when port_type is fiber:
+ * 1 - SR; 2 - LR
+ */
+ u32 port_type;
+ u32 port_sub_type;
+ u32 cable_length;
+ u8 cable_temp;
+ u8 cable_max_speed;/* 1(G)/10(G)/25(G)... */
+ u8 sfp_type; /* 0 - qsfp; 1 - sfp */
+ u8 rsvd0;
+ u32 power[4]; /* uW; if is sfp, only power[2] is valid */
+
+ u8 an_state; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u16 speed; /* 1(G)/10(G)/25(G)... */
+
+ u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */
+ u8 alos; /* 0 - yes; 1 - no */
+ u8 rx_los; /* 0 - yes; 1 - no */
+ u8 pma_status;
+ u32 pma_dbg_info_reg; /* pma debug info: */
+ u32 pma_signal_ok_reg; /* signal ok: */
+
+ u32 pcs_err_blk_cnt_reg; /* error block counter: */
+ u32 rf_lf_status_reg; /* RF/LF status: */
+ u8 pcs_link_reg; /* pcs link: */
+ u8 mac_link_reg; /* mac link: */
+ u8 mac_tx_en;
+ u8 mac_rx_en;
+ u32 pcs_err_cnt;
+
+ u8 lane_used;
+ u8 hi30_ffe[5];
+ u8 hi30_ctle[19];
+ u8 hi30_dfe[14];
+ u8 rsvd4;
+};
+
+struct hinic_hilink_link_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 port_id;
+ u8 info_type; /* 1: link up 2: link down 3 cable plugged */
+ u8 rsvd1;
+
+ struct hinic_link_info info;
+
+ u8 rsvd2[780];
+};
+
+/* dma os dependency implementation */
+struct hinic_os_dep {
+ /* kernel dma alloc api */
+ rte_atomic32_t dma_alloc_cnt;
+ rte_spinlock_t dma_hash_lock;
+ struct rte_hash *dma_addr_hash;
+};
+
+struct nic_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limt;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+struct hinic_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ /* bit[63:2] is addr's high 62bit, bit[0] is valid flag */
+ u64 ci_dma_base;
+};
+
+struct hinic_hwdev {
+ struct rte_pci_device *pcidev_hdl;
+ u32 ffm_num;
+
+ /* dma memory allocator */
+ struct hinic_os_dep os_dep;
+ struct hinic_hwif *hwif;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct hinic_aeqs *aeqs;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_nic_io *nic_io;
+};
+
+int hinic_osdep_init(struct hinic_hwdev *hwdev);
+
+void hinic_osdep_deinit(struct hinic_hwdev *hwdev);
+
+void dma_free_coherent_volatile(void *hwdev, size_t size,
+ volatile void *virt, dma_addr_t phys);
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info);
+
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr);
+
+int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev);
+
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct nic_interrupt_info interrupt_info);
+
+int init_aeqs_msix_attr(void *hwdev);
+
+void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev, void *param,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_init_attr_table(struct hinic_hwdev *hwdev);
+
+int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev);
+
+void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev);
+
+int hinic_l2nic_reset(struct hinic_hwdev *hwdev);
+
+int hinic_set_pagesize(void *hwdev, u8 page_size);
+
+void hinic_cpu_to_be32(void *data, u32 len);
+
+void hinic_be32_to_cpu(void *data, u32 len);
+
+#endif /* _HINIC_PMD_HWDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c
new file mode 100644
index 000000000..4578b689d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c
@@ -0,0 +1,554 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <rte_bus_pci.h>
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+
+#define HINIC_CFG_REGS_BAR 0
+#define HINIC_INTR_MSI_BAR 2
+#define HINIC_DB_MEM_BAR 4
+
+#define PAGE_SIZE_4K 0x1000
+#define PAGE_SIZE_64K 0x10000
+
+#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
+#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U
+
+#define HINIC_MSIX_CNT_SET(val, member) \
+ (((val) & HINIC_MSIX_CNT_##member##_MASK) << \
+ HINIC_MSIX_CNT_##member##_SHIFT)
+
+/**
+ * hwif_ready - test if the HW initialization passed
+ * @hwdev: the pointer to the private hardware device object
+ * Return: 0 - success, negative - failure
+ */
+static int hwif_ready(struct hinic_hwdev *hwdev)
+{
+ u32 addr, attr0, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwdev->hwif, addr);
+ if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwdev->hwif, addr);
+ if ((HINIC_AF0_GET(attr0, FUNC_TYPE) == TYPE_VF) &&
+ !HINIC_AF1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * set_hwif_attr - set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ * @attr0: the first attribute that was read from the hw
+ * @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
+ */
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
+{
+ hwif->attr.func_global_idx = HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ hwif->attr.port_to_port_idx = HINIC_AF0_GET(attr0, P2P_IDX);
+ hwif->attr.pci_intf_idx = HINIC_AF0_GET(attr0, PCI_INTF_IDX);
+ hwif->attr.vf_in_pf = HINIC_AF0_GET(attr0, VF_IN_PF);
+ hwif->attr.func_type = HINIC_AF0_GET(attr0, FUNC_TYPE);
+
+ hwif->attr.ppf_idx = HINIC_AF1_GET(attr1, PPF_IDX);
+
+ hwif->attr.num_aeqs = BIT(HINIC_AF1_GET(attr1, AEQS_PER_FUNC));
+ hwif->attr.num_ceqs = BIT(HINIC_AF1_GET(attr1, CEQS_PER_FUNC));
+ hwif->attr.num_irqs = BIT(HINIC_AF1_GET(attr1, IRQS_PER_FUNC));
+ hwif->attr.num_dma_attr = BIT(HINIC_AF1_GET(attr1, DMA_ATTR_PER_FUNC));
+
+ hwif->attr.global_vf_id_of_pf = HINIC_AF2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
+}
+
+/**
+ * get_hwif_attr - read and set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ */
+static void get_hwif_attr(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0, attr1, attr2;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
+}
+
+void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status)
+{
+ u32 attr5 = HINIC_AF5_SET(status, PF_STATUS);
+ u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR;
+
+ if (hwif->attr.func_type == TYPE_VF) {
+ PMD_DRV_LOG(INFO, "VF doesn't support to set attr5");
+ return;
+ }
+
+ hinic_hwif_write_reg(hwif, addr, attr5);
+}
+
+enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif)
+{
+ u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC_AF5_GET(attr5, PF_STATUS);
+}
+
+static enum hinic_doorbell_ctrl
+hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+static enum hinic_outbound_ctrl
+hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_AF4_GET(attr4, OUTBOUND_CTRL);
+}
+
+void hinic_enable_doorbell(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HINIC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_disable_doorbell(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HINIC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+/**
+ * set_ppf - try to set hwif as ppf and set the type of hwif in this case
+ * @hwif: the hardware interface of a pci function device
+ */
+static void set_ppf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 addr, val, ppf_election;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_PPF_ELECTION_ADDR;
+
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
+
+ ppf_election = HINIC_PPF_ELECTION_SET(attr->func_global_idx, IDX);
+ val |= ppf_election;
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* Check PPF */
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
+ if (attr->ppf_idx == attr->func_global_idx)
+ attr->func_type = TYPE_PPF;
+}
+
+static void init_db_area_idx(struct hinic_hwif *hwif)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 db_max_areas = hwif->db_max_areas;
+ u32 i;
+
+ for (i = 0; i < db_max_areas; i++)
+ free_db_area->db_idx[i] = i;
+
+ free_db_area->alloc_pos = 0;
+ free_db_area->return_pos = 0;
+
+ free_db_area->num_free = db_max_areas;
+
+ spin_lock_init(&free_db_area->idx_lock);
+}
+
+static int get_db_idx(struct hinic_hwif *hwif, u32 *idx)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 pos;
+ u32 pg_idx;
+
+ spin_lock(&free_db_area->idx_lock);
+
+ if (free_db_area->num_free == 0) {
+ spin_unlock(&free_db_area->idx_lock);
+ return -ENOMEM;
+ }
+
+ free_db_area->num_free--;
+
+ pos = free_db_area->alloc_pos++;
+ pos &= (hwif->db_max_areas - 1);
+
+ pg_idx = free_db_area->db_idx[pos];
+
+ free_db_area->db_idx[pos] = 0xFFFFFFFF;
+
+ spin_unlock(&free_db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic_hwif *hwif, u32 idx)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 pos;
+
+ spin_lock(&free_db_area->idx_lock);
+
+ pos = free_db_area->return_pos++;
+ pos &= (hwif->db_max_areas - 1);
+
+ free_db_area->db_idx[pos] = idx;
+
+ free_db_area->num_free++;
+
+ spin_unlock(&free_db_area->idx_lock);
+}
+
+void hinic_free_db_addr(void *hwdev, void __iomem *db_base)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ u32 idx = DB_IDX(db_base, hwif->db_base);
+
+ free_db_idx(hwif, idx);
+}
+
+int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ u32 idx;
+ int err;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return -EFAULT;
+
+ *db_base = hwif->db_base + idx * HINIC_DB_PAGE_SIZE;
+
+ return 0;
+}
+
+void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag)
+{
+ struct hinic_hwdev *hw = hwdev;
+ struct hinic_hwif *hwif = hw->hwif;
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE
+ + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ /* vfio-pci does not mmap msi-x vector table to user space,
+ * we can not access the space when kernel driver is vfio-pci
+ */
+ if (hw->pcidev_hdl->kdrv == RTE_KDRV_VFIO)
+ return;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+
+static void disable_all_msix(struct hinic_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE);
+}
+
+/**
+ * Wait for up enable or disable doorbell flush finished.
+ * @hwif: the hardware interface of a pci function device.
+ * @states: Disable or Enable.
+ */
+int wait_until_doorbell_flush_states(struct hinic_hwif *hwif,
+ enum hinic_doorbell_ctrl states)
+{
+ unsigned long end;
+ enum hinic_doorbell_ctrl db_ctrl;
+
+ end = jiffies +
+ msecs_to_jiffies(HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT);
+ do {
+ db_ctrl = hinic_get_doorbell_ctrl_status(hwif);
+ if (db_ctrl == states)
+ return 0;
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ return -EFAULT;
+}
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif)
+{
+ unsigned long end;
+ enum hinic_doorbell_ctrl db_ctrl;
+ enum hinic_outbound_ctrl outbound_ctrl;
+
+ end = jiffies +
+ msecs_to_jiffies(HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT);
+ do {
+ db_ctrl = hinic_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic_get_outbound_ctrl_status(hwif);
+
+ if (outbound_ctrl == ENABLE_OUTBOUND &&
+ db_ctrl == ENABLE_DOORBELL)
+ return 0;
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ return -EFAULT;
+}
+
+u16 hinic_global_func_id(void *hwdev)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_global_idx;
+}
+
+enum func_type hinic_func_type(void *hwdev)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_type;
+}
+
+u8 hinic_ppf_idx(void *hwdev)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.ppf_idx;
+}
+
+/**
+ * hinic_dma_attr_entry_num - get number id of DMA attribute table.
+ * @hwdev: the pointer to the private hardware device object.
+ * Return: The number id of DMA attribute table.
+ */
+u8 hinic_dma_attr_entry_num(void *hwdev)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ return hwif->attr.num_dma_attr;
+}
+
+/**
+ * hinic_init_hwif - initialize the hw interface
+ * @hwdev: the pointer to the private hardware device object
+ * @cfg_reg_base: base physical address of configuration registers
+ * @intr_reg_base: base physical address of msi-x vector table
+ * @db_base_phy: base physical address of doorbell registers
+ * @db_base: base virtual address of doorbell registers
+ * @dwqe_mapping: direct wqe io mapping address
+ * Return: 0 - success, negative - failure
+ */
+static int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, __rte_unused void *dwqe_mapping)
+{
+ struct hinic_hwif *hwif;
+ struct rte_pci_device *pci_dev;
+ u64 db_bar_len;
+ int err;
+
+ pci_dev = (struct rte_pci_device *)(hwdev->pcidev_hdl);
+ db_bar_len = pci_dev->mem_resource[HINIC_DB_MEM_BAR].len;
+
+ hwif = hwdev->hwif;
+
+ hwif->cfg_regs_base = (u8 __iomem *)cfg_reg_base;
+ hwif->intr_regs_base = (u8 __iomem *)intr_reg_base;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = (u8 __iomem *)db_base;
+ hwif->db_max_areas = db_bar_len / HINIC_DB_PAGE_SIZE;
+ if (hwif->db_max_areas > HINIC_DB_MAX_AREAS)
+ hwif->db_max_areas = HINIC_DB_MAX_AREAS;
+
+ init_db_area_idx(hwif);
+
+ get_hwif_attr(hwif);
+
+ err = hwif_ready(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Hwif is not ready");
+ goto hwif_ready_err;
+ }
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Hw doorbell/outbound is disabled");
+ goto hwif_ready_err;
+ }
+
+ if (!HINIC_IS_VF(hwdev))
+ set_ppf(hwif);
+
+ /* disable mgmt cpu report any event */
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ return 0;
+
+hwif_ready_err:
+ spin_lock_deinit(&hwif->free_db_area.idx_lock);
+
+ return err;
+}
+
+#define HINIC_HWIF_ATTR_REG_PRINT_NUM (6)
+#define HINIC_HWIF_APICMD_REG_PRINT_NUM (2)
+#define HINIC_HWIF_EQ_REG_PRINT_NUM (2)
+
+static void hinic_parse_hwif_attr(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+
+ PMD_DRV_LOG(INFO, "Device %s hwif attribute:", hwdev->pcidev_hdl->name);
+ PMD_DRV_LOG(INFO, "func_idx: %u, p2p_idx: %u, pciintf_idx: %u, "
+ "vf_in_pf: %u, ppf_idx: %u, global_vf_id: %u, func_type: %u",
+ hwif->attr.func_global_idx,
+ hwif->attr.port_to_port_idx, hwif->attr.pci_intf_idx,
+ hwif->attr.vf_in_pf, hwif->attr.ppf_idx,
+ hwif->attr.global_vf_id_of_pf, hwif->attr.func_type);
+ PMD_DRV_LOG(INFO, "num_aeqs:%u, num_ceqs:%u, num_irqs:%u, dma_attr:%u",
+ hwif->attr.num_aeqs, hwif->attr.num_ceqs,
+ hwif->attr.num_irqs, hwif->attr.num_dma_attr);
+}
+
+static void hinic_get_mmio(struct hinic_hwdev *hwdev, void **cfg_regs_base,
+ void **intr_base, void **db_base)
+{
+ struct rte_pci_device *pci_dev = hwdev->pcidev_hdl;
+ uint64_t bar0_size;
+ uint64_t bar2_size;
+ uint64_t bar0_phy_addr;
+ uint64_t pagesize = sysconf(_SC_PAGESIZE);
+
+ *cfg_regs_base = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].addr;
+ *intr_base = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].addr;
+ *db_base = pci_dev->mem_resource[HINIC_DB_MEM_BAR].addr;
+
+ bar0_size = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].len;
+ bar2_size = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].len;
+
+ if (pagesize == PAGE_SIZE_64K && (bar0_size % pagesize != 0)) {
+ bar0_phy_addr =
+ pci_dev->mem_resource[HINIC_CFG_REGS_BAR].phys_addr;
+ if (bar0_phy_addr % pagesize != 0 &&
+ (bar0_size + bar2_size <= pagesize) &&
+ bar2_size >= bar0_size) {
+ *cfg_regs_base = (void *)((uint8_t *)(*intr_base)
+ + bar2_size);
+ }
+ }
+}
+
+void hinic_hwif_res_free(struct hinic_hwdev *hwdev)
+{
+ rte_free(hwdev->hwif);
+ hwdev->hwif = NULL;
+}
+
+int hinic_hwif_res_init(struct hinic_hwdev *hwdev)
+{
+ int err = HINIC_ERROR;
+ void *cfg_regs_base, *db_base, *intr_base = NULL;
+
+ /* hinic related init */
+ hwdev->hwif = rte_zmalloc("hinic_hwif", sizeof(*hwdev->hwif),
+ RTE_CACHE_LINE_SIZE);
+ if (!hwdev->hwif) {
+ PMD_DRV_LOG(ERR, "Allocate hwif failed, dev_name: %s",
+ hwdev->pcidev_hdl->name);
+ return -ENOMEM;
+ }
+
+ hinic_get_mmio(hwdev, &cfg_regs_base, &intr_base, &db_base);
+
+ err = hinic_init_hwif(hwdev, cfg_regs_base,
+ intr_base, 0, db_base, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s",
+ hwdev->pcidev_hdl->name);
+ goto init_hwif_err;
+ }
+
+ /* disable msix interrupt in hw device */
+ disable_all_msix(hwdev);
+
+ /* print hwif attributes */
+ hinic_parse_hwif_attr(hwdev);
+
+ return HINIC_OK;
+
+init_hwif_err:
+ rte_free(hwdev->hwif);
+ hwdev->hwif = NULL;
+
+ return err;
+}
+
+/**
+ * hinic_misx_intr_clear_resend_bit - clear interrupt resend configuration
+ * @hwdev: the hardware interface of a nic device
+ * @msix_idx: Index of msix interrupt
+ * @clear_resend_en: enable flag of clear resend configuration
+ */
+void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ u32 msix_ctrl = 0, addr;
+
+ msix_ctrl = HINIC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CNT_ADDR(msix_idx);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h
new file mode 100644
index 000000000..de99507ac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_HWIF_H_
+#define _HINIC_PMD_HWIF_H_
+
+#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 30000
+
+#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
+#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
+#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
+#define HINIC_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx)
+#define HINIC_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf)
+#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
+#define HINIC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx)
+
+#define HINIC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type)
+#define HINIC_IS_PF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PF)
+#define HINIC_IS_VF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+#define HINIC_IS_PPF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PPF)
+
+enum func_type {
+ TYPE_PF,
+ TYPE_VF,
+ TYPE_PPF,
+};
+
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
+
+/* Defines the IRQ information structure */
+struct irq_info {
+ u16 msix_entry_idx; /* IRQ corresponding index number */
+ u32 irq_id; /* the IRQ number from OS */
+};
+
+struct hinic_free_db_area {
+ u32 db_idx[HINIC_DB_MAX_AREAS];
+
+ u32 num_free;
+
+ u32 alloc_pos;
+ u32 return_pos;
+ /* spinlock for idx */
+ spinlock_t idx_lock;
+};
+
+struct hinic_func_attr {
+ u16 func_global_idx;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 vf_in_pf;
+ enum func_type func_type;
+
+ u8 mpf_idx;
+
+ u8 ppf_idx;
+
+ u16 num_irqs; /* max: 2 ^ 15 */
+ u8 num_aeqs; /* max: 2 ^ 3 */
+ u8 num_ceqs; /* max: 2 ^ 7 */
+
+ u8 num_dma_attr; /* max: 2 ^ 6 */
+
+ u16 global_vf_id_of_pf;
+};
+
+struct hinic_hwif {
+ u8 __iomem *cfg_regs_base;
+ u8 __iomem *intr_regs_base;
+ u64 db_base_phy;
+ u8 __iomem *db_base;
+ u64 db_max_areas;
+ struct hinic_free_db_area free_db_area;
+ struct hinic_func_attr attr;
+};
+
+static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
+{
+ return be32_to_cpu(readl(hwif->cfg_regs_base + reg));
+}
+
+static inline void
+hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val)
+{
+ writel(cpu_to_be32(val), hwif->cfg_regs_base + reg);
+}
+
+u16 hinic_global_func_id(void *hwdev); /* func_attr.glb_func_idx */
+
+enum func_type hinic_func_type(void *hwdev);
+
+void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status);
+
+enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif);
+
+void hinic_enable_doorbell(struct hinic_hwif *hwif);
+
+void hinic_disable_doorbell(struct hinic_hwif *hwif);
+
+int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base);
+
+void hinic_free_db_addr(void *hwdev, void __iomem *db_base);
+
+int wait_until_doorbell_flush_states(struct hinic_hwif *hwif,
+ enum hinic_doorbell_ctrl states);
+
+void hinic_set_msix_state(void *hwdev, u16 msix_idx,
+ enum hinic_msix_state flag);
+
+void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx,
+ u8 clear_resend_en);
+
+u8 hinic_ppf_idx(void *hwdev);
+
+int hinic_hwif_res_init(struct hinic_hwdev *hwdev);
+
+void hinic_hwif_res_free(struct hinic_hwdev *hwdev);
+
+u8 hinic_dma_attr_entry_num(void *hwdev);
+
+#endif /* _HINIC_PMD_HWIF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c
new file mode 100644
index 000000000..ab1106a37
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c
@@ -0,0 +1,933 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_eqs.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_mbox.h"
+
+#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
+#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
+#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
+/* The size of data to be send (unit of 4 bytes) */
+#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
+/* SO_RO(strong order, relax order) */
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
+#define HINIC_MBOX_INT_WB_EN_SHIFT 28
+
+
+#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
+#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
+#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
+#define HINIC_MBOX_INT_WB_EN_MASK 0x1
+
+#define HINIC_MBOX_INT_SET(val, field) \
+ (((val) & HINIC_MBOX_INT_##field##_MASK) << \
+ HINIC_MBOX_INT_##field##_SHIFT)
+
+enum hinic_mbox_tx_status {
+ TX_DONE = 0,
+ TX_IN_PROGRESS,
+};
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
+/* specifies the issue request for the message data.
+ * 0 - Tx request is done;
+ * 1 - Tx request is in process.
+ */
+#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
+#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
+
+#define HINIC_MBOX_CTRL_SET(val, field) \
+ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
+ HINIC_MBOX_CTRL_##field##_SHIFT)
+
+#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
+#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
+#define HINIC_MBOX_HEADER_LAST_SHIFT 30
+
+#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MBOX_HEADER_CMD_SHIFT 32
+#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
+#define HINIC_MBOX_HEADER_STATUS_SHIFT 48
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
+
+#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
+#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
+#define HINIC_MBOX_HEADER_LAST_MASK 0x1
+#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MBOX_HEADER_CMD_MASK 0xFF
+#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
+#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
+
+#define HINIC_MBOX_HEADER_GET(val, field) \
+ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
+ HINIC_MBOX_HEADER_##field##_MASK)
+#define HINIC_MBOX_HEADER_SET(val, field) \
+ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
+ HINIC_MBOX_HEADER_##field##_SHIFT)
+
+#define HINIC_MBOX_COMP_TIME_MS 8000U
+#define MBOX_MSG_POLLING_TIMEOUT_MS 5000
+
+/* The size unit is Bytes */
+#define HINIC_MBOX_DATA_SIZE 2040
+#define MBOX_MAX_BUF_SZ 2048UL
+#define MBOX_HEADER_SZ 8
+
+/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16UL
+#define MBOX_SIZE 64
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define SEQ_ID_START_VAL 0
+
+#define DST_AEQ_IDX_DEFAULT_VAL 0
+#define SRC_AEQ_IDX_DEFAULT_VAL 0
+#define NO_DMA_ATTRIBUTE_VAL 0
+
+#define MBOX_MSG_NO_DATA_LEN 1
+
+#define FUNC_ID_OFF_SET_8B 8
+#define FUNC_ID_OFF_SET_10B 10
+
+#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define MBOX_RESPONSE_ERROR 0x1
+#define MBOX_MSG_ID_MASK 0xFF
+#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
+
+enum hinic_hwif_direction_type {
+ /* driver send msg to up or up send msg to driver*/
+ HINIC_HWIF_DIRECT_SEND = 0,
+ /* after driver/up send msg to each other, then up/driver ack the msg */
+ HINIC_HWIF_RESPONSE,
+};
+
+enum mbox_send_mod {
+ MBOX_SEND_MSG_POLL = 1
+};
+
+enum mbox_seg_type {
+ NOT_LAST_SEG,
+ LAST_SEG,
+};
+
+enum mbox_ordering_type {
+ STRONG_ORDER,
+ RELAX_ORDER,
+};
+
+enum mbox_write_back_type {
+ NOT_WRITE_BACK = 0,
+ WRITE_BACK,
+};
+
+enum mbox_aeq_trig_type {
+ NOT_TRIGGER,
+ TRIGGER,
+};
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info);
+
+static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size, void *param)
+{
+ int rc = 0;
+ *out_size = 0;
+
+ switch (recv_mbox->mod) {
+ case HINIC_MOD_COMM:
+ hinic_comm_async_event_handle(func_to_func->hwdev,
+ recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len,
+ buf_out, out_size);
+ break;
+ case HINIC_MOD_L2NIC:
+ hinic_l2nic_async_event_handle(func_to_func->hwdev, param,
+ recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len,
+ buf_out, out_size);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "No handler, mod: %d", recv_mbox->mod);
+ rc = HINIC_MBOX_VF_CMD_ERROR;
+ break;
+ }
+
+ return rc;
+}
+
+static void set_mbx_msg_status(struct mbox_msg_info *msg_info, int status)
+{
+ if (status == HINIC_DEV_BUSY_ACTIVE_FW)
+ msg_info->status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (status == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info->status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (status)
+ msg_info->status = HINIC_MBOX_PF_SEND_ERR;
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx, void *param)
+{
+ struct hinic_hwdev *dev = func_to_func->hwdev;
+ struct mbox_msg_info msg_info = { 0 };
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ void *buf_out = recv_mbox->buf_out;
+ int err = 0;
+
+ if (HINIC_IS_VF(dev)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size, param);
+ } else {
+ err = -EINVAL;
+ PMD_DRV_LOG(ERR, "PMD doesn't support non-VF handle mailbox message");
+ }
+
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ set_mbx_msg_status(&msg_info, err);
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK, &msg_info);
+ }
+}
+
+static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_mbox->sed_id = seq_id;
+ } else {
+ if (seq_id != recv_mbox->sed_id + 1) {
+ recv_mbox->sed_id = 0;
+ return false;
+ }
+
+ recv_mbox->sed_id = seq_id;
+ }
+
+ return true;
+}
+
+static void clear_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* clear mailbox write back status */
+ *mbox->wb_status = 0;
+ rte_wmb();
+}
+
+static void mbox_copy_header(struct hinic_send_mbox *mbox, u64 *header)
+{
+ u32 *data = (u32 *)header;
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
+}
+
+static void
+mbox_copy_send_data(struct hinic_send_mbox *mbox, void *seg, u16 seg_len)
+{
+ u32 *data = (u32 *)seg;
+ u32 data_len, chk_sz = sizeof(u32);
+ u32 i, idx_max;
+ u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
+
+ /* The mbox message should be aligned in 4 bytes. */
+ if (seg_len % chk_sz) {
+ memcpy(mbox_max_buf, seg, seg_len);
+ data = (u32 *)mbox_max_buf;
+ }
+
+ data_len = seg_len;
+ idx_max = ALIGN(data_len, chk_sz) / chk_sz;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i),
+ mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+}
+
+static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
+ u16 dst_func, u16 dst_aeqn,
+ __rte_unused u16 seg_len, int poll)
+{
+ u32 mbox_int, mbox_ctrl;
+
+ mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
+ HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ HINIC_MBOX_INT_SET(HINIC_MBOX_RSP_AEQN, SRC_RESP_AEQN) |
+ HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
+ HINIC_MBOX_INT_SET(ALIGN(MBOX_SIZE, MBOX_SEG_LEN_ALIGN) >> 2,
+ TX_SIZE) |
+ HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
+ HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
+
+ hinic_hwif_write_reg(func_to_func->hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
+
+ rte_wmb();
+ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_IN_PROGRESS, TX_STATUS);
+
+ if (poll)
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
+ else
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
+
+ hinic_hwif_write_reg(func_to_func->hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
+}
+
+static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ int err;
+
+ mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->mbox) {
+ PMD_DRV_LOG(ERR, "Alloc mbox buf_in mem failed\n");
+ return -ENOMEM;
+ }
+
+ mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->buf_out) {
+ PMD_DRV_LOG(ERR, "Alloc mbox buf_out mem failed\n");
+ err = -ENOMEM;
+ goto alloc_buf_out_err;
+ }
+
+ return 0;
+
+alloc_buf_out_err:
+ kfree(mbox_info->mbox);
+
+ return err;
+}
+
+static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ kfree(mbox_info->buf_out);
+ kfree(mbox_info->mbox);
+}
+
+static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx, i;
+ int err;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
+ err = init_mbox_info(&mbox_info[func_idx]);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize function[%d] mailbox information failed, err: %d",
+ func_idx, err);
+ goto init_mbox_info_err;
+ }
+ }
+
+ return 0;
+
+init_mbox_info_err:
+ for (i = 0; i < func_idx; i++)
+ clean_mbox_info(&mbox_info[i]);
+
+ return err;
+}
+
+static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
+ clean_mbox_info(&mbox_info[func_idx]);
+}
+
+static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+
+ send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev, MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr, SOCKET_ID_ANY);
+ if (!send_mbox->wb_vaddr) {
+ PMD_DRV_LOG(ERR, "Allocating memory for mailbox wb status failed");
+ return -ENOMEM;
+ }
+ send_mbox->wb_status = (volatile u64 *)send_mbox->wb_vaddr;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+ hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h);
+ hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+
+ hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0);
+ hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0);
+
+ dma_free_coherent(hwdev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr, send_mbox->wb_paddr);
+}
+
+static int recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ void *header, struct hinic_recv_mbox *recv_mbox, void *param)
+{
+ u64 mbox_header = *((u64 *)header);
+ void *mbox_body = MBOX_BODY_FROM_HDR(header);
+ u16 src_func_idx;
+ enum hinic_hwif_direction_type direction;
+ u8 seq_id, seg_len;
+
+ seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
+ seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
+ direction = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
+ src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
+ PMD_DRV_LOG(ERR,
+ "Mailbox sequence and segment check failed, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
+ src_func_idx, recv_mbox->sed_id, seq_id, seg_len);
+ return HINIC_ERROR;
+ }
+
+ memcpy((u8 *)recv_mbox->mbox + seq_id * HINIC_MSG_SEG_LEN,
+ mbox_body, seg_len);
+
+ if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
+ return HINIC_ERROR;
+
+ recv_mbox->sed_id = 0;
+ recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
+ recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
+ recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
+ recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
+ recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
+ recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+
+ if (direction == HINIC_HWIF_RESPONSE) {
+ if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
+ func_to_func->event_flag == EVENT_START) {
+ return HINIC_OK;
+ }
+
+ PMD_DRV_LOG(ERR, "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)",
+ func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
+ recv_mbox->msg_info.status);
+ return HINIC_ERROR;
+ }
+
+ recv_func_mbox_handler(func_to_func, recv_mbox, src_func_idx, param);
+
+ return HINIC_ERROR;
+}
+
+/**
+ * hinic_mbox_func_aeqe_handler - Process mbox info from func which is
+ * sent by aeqe.
+ *
+ * @param handle
+ * Pointer to hradware nic device.
+ * @param header
+ * Mbox header info.
+ * @param size
+ * The size of aeqe descriptor.
+ * @param param
+ * customized parameter.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int hinic_mbox_func_aeqe_handler(void *handle, u8 *header,
+ __rte_unused u8 size, void *param)
+{
+ struct hinic_mbox_func_to_func *func_to_func =
+ ((struct hinic_hwdev *)handle)->func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+ u64 mbox_header = *((u64 *)header);
+ u16 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (src >= HINIC_MAX_FUNCTIONS) {
+ PMD_DRV_LOG(ERR, "Mailbox source function id: %d is invalid",
+ src);
+ return HINIC_ERROR;
+ }
+
+ recv_mbox = (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_HWIF_DIRECT_SEND) ?
+ &func_to_func->mbox_send[src] :
+ &func_to_func->mbox_resp[src];
+
+ return recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox, param);
+}
+
+static u16 get_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* write back is 16B, but only use first 4B */
+ u64 wb_val = be64_to_cpu(*mbox->wb_status);
+
+ rte_rmb(); /* verify reading before check */
+
+ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
+}
+
+static void dump_mox_reg(struct hinic_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ PMD_DRV_LOG(WARNING, "Mailbox control reg: 0x%x", val);
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
+ PMD_DRV_LOG(WARNING, "Mailbox interrupt offset: 0x%x", val);
+}
+
+static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
+ u64 header, u16 dst_func, void *seg, u16 seg_len)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ u16 dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
+ HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
+ u16 err_code, wb_status = 0;
+ u32 cnt = 0;
+
+ clear_mbox_status(send_mbox);
+
+ mbox_copy_header(send_mbox, &header);
+
+ mbox_copy_send_data(send_mbox, seg, seg_len);
+
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len,
+ MBOX_SEND_MSG_POLL);
+
+ rte_wmb();
+
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT_MS) {
+ wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(wb_status))
+ break;
+
+ rte_delay_ms(1); /* loop every ms */
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT_MS) {
+ PMD_DRV_LOG(ERR, "Send mailbox segment timeout, wb status: 0x%x",
+ wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ PMD_DRV_LOG(ERR, "Send mailbox segment to function %d error, wb status: 0x%x",
+ dst_func, wb_status);
+ /*
+ * err_code: 0 responses no errors, other values can
+ * refer to FS doc.
+ */
+ err_code = MBOX_STATUS_ERRCODE(wb_status);
+ return err_code ? err_code : -EFAULT;
+ }
+
+ return 0;
+}
+
+static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ func_to_func->event_flag = event_flag;
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ int err = 0;
+ u32 seq_id = 0;
+ u16 seg_len = HINIC_MSG_SEG_LEN;
+ u16 left = msg_len;
+ u8 *msg_seg = (u8 *)msg;
+ u64 header = 0;
+
+ err = hinic_mutex_lock(&func_to_func->msg_send_mutex);
+ if (err)
+ return err;
+
+ header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MBOX_HEADER_SET(mod, MODULE) |
+ HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
+ HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
+ HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
+ HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
+ HINIC_MBOX_HEADER_SET(cmd, CMD) |
+ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id(hwdev),
+ SRC_GLB_FUNC_IDX);
+
+ while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
+ if (left <= HINIC_MSG_SEG_LEN) {
+ header &=
+ ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK,
+ SEG_LEN));
+ header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
+ header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
+
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
+ seg_len);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Fail to send mbox seg, err: %d", err);
+ goto send_err;
+ }
+
+ left -= HINIC_MSG_SEG_LEN;
+ msg_seg += HINIC_MSG_SEG_LEN;
+
+ seq_id++;
+ header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
+ SEQID));
+ header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
+ }
+
+send_err:
+ (void)hinic_mutex_unlock(&func_to_func->msg_send_mutex);
+
+ return err;
+}
+
+static int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
+ u32 timeout)
+{
+ struct hinic_recv_mbox *mbox_for_resp =
+ &func_to_func->mbox_resp[dst_func];
+ struct mbox_msg_info msg_info = {0};
+ u32 time;
+ int err;
+
+ err = hinic_mutex_lock(&func_to_func->mbox_send_mutex);
+ if (err)
+ return err;
+
+ msg_info.msg_id = (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK;
+ MBOX_MSG_ID(func_to_func) = msg_info.msg_id;
+
+ set_mbox_to_func_event(func_to_func, EVENT_START);
+
+ err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
+ dst_func, HINIC_HWIF_DIRECT_SEND,
+ MBOX_ACK, &msg_info);
+ if (err)
+ goto send_err;
+
+ time = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME_MS);
+ err = hinic_aeq_poll_msg(func_to_func->rsp_aeq, time, NULL);
+ if (err) {
+ set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
+ PMD_DRV_LOG(ERR, "Send mailbox message time out");
+ err = -ETIMEDOUT;
+ goto send_err;
+ }
+
+ set_mbox_to_func_event(func_to_func, EVENT_END);
+
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ PMD_DRV_LOG(ERR, "Mailbox response error: 0x%x",
+ mbox_for_resp->msg_info.status);
+ else
+ PMD_DRV_LOG(ERR, "Chip is in active, PF can't process VF message");
+ goto send_err;
+ }
+
+ rte_rmb();
+
+ if (mbox_for_resp->mbox_len && buf_out && out_size) {
+ if (mbox_for_resp->mbox_len <= *out_size) {
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+ *out_size = mbox_for_resp->mbox_len;
+ } else {
+ PMD_DRV_LOG(ERR, "Mailbox response message len[%u] overflow",
+ mbox_for_resp->mbox_len);
+ err = -ERANGE;
+ }
+ }
+
+send_err:
+ if (err && out_size)
+ *out_size = 0;
+ (void)hinic_mutex_unlock(&func_to_func->mbox_send_mutex);
+
+ return err;
+}
+
+static int
+mbox_func_params_valid(__rte_unused struct hinic_mbox_func_to_func *mbox_obj,
+ void *buf_in, u16 in_size)
+{
+ if (!buf_in || !in_size)
+ return -EINVAL;
+
+ if (in_size > HINIC_MBOX_DATA_SIZE) {
+ PMD_DRV_LOG(ERR, "Mailbox message len(%d) exceed limit(%d)",
+ in_size, HINIC_MBOX_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 hinic_pf_id_of_vf(void *hwdev)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ return hwif->attr.port_to_port_idx;
+}
+
+/**
+ * hinic_mbox_to_pf - Send mbox info to pf and need pf to response.
+ *
+ * @param hwdev
+ * Pointer to hardware nic device.
+ * @param mod
+ * Mode type of hardware.
+ * @param cmd
+ * The command sent to pf.
+ * @param buf_in
+ * Input parameter.
+ * @param in_size
+ * Input parameter size.
+ * @param buf_out
+ * Output parameter.
+ * @param out_size
+ * Output parameter size.
+ * @param timeout
+ * Timeout.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err;
+
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Mailbox parameters check failed: %d", err);
+ return err;
+ }
+
+ if (!HINIC_IS_VF(hwdev)) {
+ PMD_DRV_LOG(ERR, "Input function type error, func_type: %d",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf(hwdev), buf_in, in_size,
+ buf_out, out_size, timeout);
+}
+
+/**
+ * hinic_mbox_to_pf_no_ack - Send mbox info to pf and do not need pf to response
+ *
+ * @param hwdev
+ * Pointer to hardware nic device.
+ * @param mod
+ * Mode type of hardware.
+ * @param cmd
+ * The command sent to pf.
+ * @param buf_in
+ * Input parameter.
+ * @param in_size
+ * Input parameter size.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size)
+{
+ int err;
+ struct mbox_msg_info msg_info = {0};
+
+ err = hinic_mutex_lock(&hwdev->func_to_func->mbox_send_mutex);
+ if (err)
+ return err;
+
+ err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size,
+ hinic_pf_id_of_vf(hwdev), HINIC_HWIF_DIRECT_SEND,
+ MBOX_NO_ACK, &msg_info);
+ if (err)
+ PMD_DRV_LOG(ERR, "Send mailbox no ack failed, err: %d", err);
+
+ (void)hinic_mutex_unlock(&hwdev->func_to_func->mbox_send_mutex);
+
+ return err;
+}
+
+static int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ int err;
+
+ func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
+ if (!func_to_func) {
+ PMD_DRV_LOG(ERR, "Allocating memory for func_to_func object failed");
+ return -ENOMEM;
+ }
+ hwdev->func_to_func = func_to_func;
+ func_to_func->hwdev = hwdev;
+ (void)hinic_mutex_init(&func_to_func->mbox_send_mutex, NULL);
+ (void)hinic_mutex_init(&func_to_func->msg_send_mutex, NULL);
+
+ err = alloc_mbox_info(func_to_func->mbox_send);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocating memory for mailbox sending failed");
+ goto alloc_mbox_for_send_err;
+ }
+
+ err = alloc_mbox_info(func_to_func->mbox_resp);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocating memory for mailbox responding failed");
+ goto alloc_mbox_for_resp_err;
+ }
+
+ err = alloc_mbox_wb_status(func_to_func);
+ if (err)
+ goto alloc_wb_status_err;
+
+ prepare_send_mbox(func_to_func);
+
+ return 0;
+
+alloc_wb_status_err:
+ free_mbox_info(func_to_func->mbox_resp);
+
+alloc_mbox_for_resp_err:
+ free_mbox_info(func_to_func->mbox_send);
+
+alloc_mbox_for_send_err:
+ kfree(func_to_func);
+
+ return err;
+}
+
+/**
+ * hinic_comm_func_to_func_free - Uninitialize func to func resource.
+ *
+ * @param hwdev
+ * Pointer to hardware nic device.
+ */
+void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ free_mbox_wb_status(func_to_func);
+ free_mbox_info(func_to_func->mbox_resp);
+ free_mbox_info(func_to_func->mbox_send);
+ (void)hinic_mutex_destroy(&func_to_func->mbox_send_mutex);
+ (void)hinic_mutex_destroy(&func_to_func->msg_send_mutex);
+ kfree(func_to_func);
+}
+
+/**
+ * hinic_comm_func_to_func_init - Initialize func to func resource.
+ *
+ * @param hwdev
+ * Pointer to hardware nic device.
+ */
+int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ int rc;
+
+ rc = hinic_func_to_func_init(hwdev);
+ if (rc)
+ return rc;
+
+ hwdev->func_to_func->rsp_aeq = &hwdev->aeqs->aeq[HINIC_MBOX_RSP_AEQN];
+ hwdev->func_to_func->recv_aeq = &hwdev->aeqs->aeq[HINIC_MBOX_RECV_AEQN];
+
+ return 0;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h
new file mode 100644
index 000000000..bf7b4906d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_MBOX_H_
+#define _HINIC_PMD_MBOX_H_
+
+#define HINIC_MBOX_RECV_AEQN 0
+#define HINIC_MBOX_RSP_AEQN 2
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+/* PFs do not support enable SR-IOV cap when PFs use PMD, VFs just receive
+ * mailbox message from PFs. The max number of PFs is 16, so the max number
+ * of mailbox buffer for functions is also 16.
+ */
+#define HINIC_MAX_FUNCTIONS 16
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
+#define HINIC_SEQ_ID_MAX_VAL 42
+#define HINIC_MSG_SEG_LEN 48
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status; /*can only use 6 bit*/
+};
+
+struct hinic_recv_mbox {
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 sed_id;
+};
+
+struct hinic_send_mbox {
+ u8 *data;
+ volatile u64 *wb_status;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+
+ pthread_mutex_t mbox_send_mutex;
+ pthread_mutex_t msg_send_mutex;
+
+ struct hinic_send_mbox send_mbox;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ struct hinic_eq *rsp_aeq;
+ struct hinic_eq *recv_aeq;
+
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ spinlock_t mbox_lock; /* lock for mbox event flag */
+};
+
+/*
+ * mbox function prototypes
+ */
+int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev);
+void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev);
+int hinic_mbox_func_aeqe_handler(void *handle, u8 *header,
+ u8 size, void *param);
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size);
+
+#endif /* _HINIC_PMD_MBOX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c
new file mode 100644
index 000000000..94bc45f83
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c
@@ -0,0 +1,804 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_mbox.h"
+
+#define BUF_OUT_DEFAULT_SIZE 1
+
+#define MAX_PF_MGMT_BUF_SIZE 2048UL
+
+#define MGMT_MSG_SIZE_MIN 20
+#define MGMT_MSG_SIZE_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+
+#define MGMT_MSG_TIMEOUT 5000 /* millisecond */
+
+#define SYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_FLAG 0x200
+
+#define MSG_NO_RESP 0xFFFF
+
+#define MAX_MSG_SZ 2016
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
+
+#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
+
+#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
+ ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
+ | ASYNC_MSG_FLAG)
+
+#define HINIC_SEQ_ID_MAX_VAL 42
+#define HINIC_MSG_SEG_LEN 48
+
+#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx)
+
+#define EQ_ELEM_DESC_TYPE_SHIFT 0
+#define EQ_ELEM_DESC_SRC_SHIFT 7
+#define EQ_ELEM_DESC_SIZE_SHIFT 8
+#define EQ_ELEM_DESC_WRAPPED_SHIFT 31
+
+#define EQ_ELEM_DESC_TYPE_MASK 0x7FU
+#define EQ_ELEM_DESC_SRC_MASK 0x1U
+#define EQ_ELEM_DESC_SIZE_MASK 0xFFU
+#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define EQ_ELEM_DESC_GET(val, member) \
+ (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
+ EQ_ELEM_DESC_##member##_MASK)
+
+#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0
+#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1
+
+#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK)
+
+#define HINIC_MSG_TO_MGMT_MAX_LEN 2016
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ * Return: the total message length
+ */
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* u64 - the size of the header */
+ u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) +
+ msg_data_len);
+
+ if (msg_size > MGMT_MSG_SIZE_MIN)
+ msg_size = MGMT_MSG_SIZE_MIN +
+ ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
+ MGMT_MSG_SIZE_STEP);
+ else
+ msg_size = MGMT_MSG_SIZE_MIN;
+
+ return msg_size;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: pointer of the header to prepare
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @ack_type: the type to response
+ * @direction: the direction of the original message
+ * @cmd: the command to do
+ * @msg_id: message id
+ */
+static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, int msg_len, enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ u8 cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header to prepare
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ */
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
+ int msg_len)
+{
+ u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE;
+
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ cmd_buf_max -= sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * alloc_recv_msg - allocate received message memory
+ * @recv_msg: pointer that will hold the allocated data
+ * Return: 0 - success, negative - failure
+ */
+static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ int err;
+
+ recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg buf failed");
+ return -ENOMEM;
+ }
+
+ recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->buf_out) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed");
+ err = -ENOMEM;
+ goto alloc_buf_out_err;
+ }
+
+ return 0;
+
+alloc_buf_out_err:
+ kfree(recv_msg->msg);
+ return err;
+}
+
+/**
+ * free_recv_msg - free received message memory
+ * @recv_msg: pointer that holds the allocated data
+ */
+static void free_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ kfree(recv_msg->buf_out);
+ kfree(recv_msg->msg);
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ */
+static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ int err;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg failed");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate resp recv msg failed");
+ goto alloc_msg_for_resp_err;
+ }
+
+ pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->async_msg_buf) {
+ PMD_DRV_LOG(ERR, "Allocate async msg buf failed");
+ err = -ENOMEM;
+ goto async_msg_buf_err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf) {
+ PMD_DRV_LOG(ERR, "Allocate sync msg buf failed");
+ err = -ENOMEM;
+ goto sync_msg_buf_err;
+ }
+
+ return 0;
+
+sync_msg_buf_err:
+ kfree(pf_to_mgmt->async_msg_buf);
+
+async_msg_buf_err:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+
+alloc_msg_for_resp_err:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+
+ return err;
+}
+
+/**
+ * free_msg_buf - free all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ */
+static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->sync_msg_buf);
+ kfree(pf_to_mgmt->async_msg_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+static int hinic_get_mgmt_channel_status(void *hwdev)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ u32 val;
+
+ if (hinic_func_type((struct hinic_hwdev *)hwdev) == TYPE_VF)
+ return false;
+
+ val = hinic_hwif_read_reg(hwif, HINIC_ICPL_RESERVD_ADDR);
+
+ return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+}
+
+/**
+ * send_msg_to_mgmt_async - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ * @direction: the direction of the original message
+ * @resp_msg_id: message id of response
+ * Return: 0 - success, negative - failure
+ */
+static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+/**
+ * send_msg_to_mgmt_sync - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the msg data
+ * @msg_len: the msg data length
+ * @ack_type: indicate mgmt command whether need ack or not
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ */
+static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ __rte_unused u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ /* If fw is hot active, return failed */
+ if (hinic_get_mgmt_channel_status(pf_to_mgmt->hwdev)) {
+ if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC)
+ return HINIC_DEV_BUSY_ACTIVE_FW;
+ else
+ return -EBUSY;
+ }
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, SYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST,
+ mgmt_cmd, cmd_size);
+}
+
+/**
+ * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @hwdev: the pointer to the private hardware device object
+ * Return: 0 - success, negative - failure
+ */
+static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt) {
+ PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed");
+ return -ENOMEM;
+ }
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+
+ err = hinic_mutex_init(&pf_to_mgmt->sync_msg_mutex, NULL);
+ if (err)
+ goto mutex_init_err;
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate msg buffers failed");
+ goto alloc_msg_buf_err;
+ }
+
+ err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init the api cmd chains failed");
+ goto api_cmd_init_err;
+ }
+
+ return 0;
+
+api_cmd_init_err:
+ free_msg_buf(pf_to_mgmt);
+
+alloc_msg_buf_err:
+ hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex);
+
+mutex_init_err:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+/**
+ * hinic_pf_to_mgmt_free - free PF to MGMT channel
+ * @hwdev: the pointer to the private hardware device object
+ */
+static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ free_msg_buf(pf_to_mgmt);
+ hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex);
+ kfree(pf_to_mgmt);
+}
+
+static int
+hinic_pf_to_mgmt_sync(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+ u32 timeo;
+ int err, i;
+
+ err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex);
+ if (err)
+ return err;
+
+ SYNC_MSG_ID_INC(pf_to_mgmt);
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Send msg to mgmt failed");
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+ for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) {
+ err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d",
+ mod, cmd, pf_to_mgmt->sync_msg_id, err);
+ err = -ETIMEDOUT;
+ hinic_dump_aeq_info(hwdev);
+ goto unlock_sync_msg;
+ } else {
+ if (mod == recv_msg->mod && cmd == recv_msg->cmd &&
+ recv_msg->msg_id == pf_to_mgmt->sync_msg_id) {
+ /* the expected response polled */
+ break;
+ }
+ PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an "
+ "unexpected(mod=%d, cmd=%d, msg_id=%u) response",
+ pf_to_mgmt->rx_aeq->q_id, mod, cmd,
+ pf_to_mgmt->sync_msg_id, recv_msg->mod,
+ recv_msg->cmd, recv_msg->msg_id);
+ }
+ }
+
+ if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) {
+ PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed",
+ i, pf_to_mgmt->rx_aeq->q_id);
+ err = -EBADMSG;
+ goto unlock_sync_msg;
+ }
+
+ rte_smp_rmb();
+ if (recv_msg->msg_len && buf_out && out_size) {
+ if (recv_msg->msg_len <= *out_size) {
+ memcpy(buf_out, recv_msg->msg,
+ recv_msg->msg_len);
+ *out_size = recv_msg->msg_len;
+ } else {
+ PMD_DRV_LOG(ERR, "Mgmt rsp's msg len: %u overflow.",
+ recv_msg->msg_len);
+ err = -ERANGE;
+ }
+ }
+
+unlock_sync_msg:
+ if (err && out_size)
+ *out_size = 0;
+ (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex);
+ return err;
+}
+
+int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ int rc = HINIC_ERROR;
+
+ if (!hwdev || in_size > HINIC_MSG_TO_MGMT_MAX_LEN)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ rc = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ } else {
+ rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ }
+
+ return rc;
+}
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, __rte_unused void *buf_out,
+ __rte_unused u16 *out_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ int err = -EINVAL;
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid");
+ return err;
+ }
+
+ err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex);
+ if (err)
+ return err;
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+
+ (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex);
+
+ return err;
+}
+
+static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_msg->sed_id = seq_id;
+ } else {
+ if (seq_id != recv_msg->sed_id + 1) {
+ recv_msg->sed_id = 0;
+ return false;
+ }
+ recv_msg->sed_id = seq_id;
+ }
+
+ return true;
+}
+
+/**
+ * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ * @param: customized parameter
+ */
+static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg,
+ void *param)
+{
+ void *buf_out = recv_msg->buf_out;
+ u16 out_size = 0;
+
+ switch (recv_msg->mod) {
+ case HINIC_MOD_COMM:
+ hinic_comm_async_event_handle(pf_to_mgmt->hwdev,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ case HINIC_MOD_L2NIC:
+ hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ case HINIC_MOD_HILINK:
+ hinic_hilink_async_event_handle(pf_to_mgmt->hwdev,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "No handler, mod: %d", recv_msg->mod);
+ break;
+ }
+
+ if (!recv_msg->async_mgmt_to_pf) {
+ if (!out_size)
+ out_size = BUF_OUT_DEFAULT_SIZE;
+
+ /* MGMT sent sync msg, send the response */
+ (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod,
+ recv_msg->cmd, buf_out, out_size,
+ HINIC_MSG_RESPONSE,
+ recv_msg->msg_id);
+ }
+}
+
+/**
+ * recv_mgmt_msg_handler - handler a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ * @param: customized parameter
+ * Return: 0 when aeq is response message, -1 default result,
+ * and when wrong message or not last message
+ */
+static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u8 *header, struct hinic_recv_msg *recv_msg,
+ void *param)
+{
+ u64 msg_header = *((u64 *)header);
+ void *msg_body = header + sizeof(msg_header);
+ u8 *dest_msg;
+ u8 seq_id, seq_len;
+ u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE;
+
+ seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID);
+ seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN);
+
+ if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) {
+ PMD_DRV_LOG(ERR,
+ "Mgmt msg sequence and segment check failed, "
+ "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x",
+ hinic_global_func_id(pf_to_mgmt->hwdev),
+ recv_msg->sed_id, seq_id, seq_len);
+ return HINIC_ERROR;
+ }
+
+ dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN;
+ msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN;
+ memcpy(dest_msg, msg_body, seq_len);
+
+ if (!HINIC_MSG_HEADER_GET(msg_header, LAST))
+ return HINIC_ERROR;
+
+ recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD);
+ recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE);
+ recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN);
+ recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
+
+ if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE)
+ return HINIC_OK;
+
+ hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param);
+
+ return HINIC_ERROR;
+}
+
+/**
+ * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @hwdev: the pointer to the private hardware device object
+ * @header: the header of the message
+ * @size: unused
+ * @param: customized parameter
+ * Return: 0 when aeq is response message,
+ * -1 default result, and when wrong message or not last message
+ */
+static int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header,
+ __rte_unused u8 size, void *param)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+
+ recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
+ HINIC_MSG_DIRECT_SEND) ?
+ &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param);
+}
+
+static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event,
+ u8 *data, u8 size, void *param)
+{
+ int rc = 0;
+
+ switch (event) {
+ case HINIC_MSG_FROM_MGMT_CPU:
+ rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param);
+ break;
+ case HINIC_MBX_FROM_FUNC:
+ rc = hinic_mbox_func_aeqe_handler(handle, data, size, param);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d",
+ event, size);
+ rc = HINIC_ERROR;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * hinic_aeq_poll_msg - poll one or continue aeqe, and call dedicated process
+ * @eq: aeq of the chip
+ * @timeout: 0 - poll all aeqe in eq, used in interrupt mode,
+ * > 0 - poll aeq until get aeqe with 'last' field set to 1,
+ * used in polling mode.
+ * @param: customized parameter
+ * Return: 0 - Success, EIO - poll timeout, ENODEV - swe not support
+ */
+int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param)
+{
+ struct hinic_aeq_elem *aeqe_pos;
+ enum hinic_aeq_type event;
+ u32 aeqe_desc = 0;
+ u16 i;
+ u8 size;
+ int done = HINIC_ERROR;
+ int err = -EFAULT;
+ unsigned long end;
+
+ for (i = 0; ((timeout == 0) && (i < eq->eq_len)) ||
+ ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) {
+ err = -EIO;
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+ rte_rmb();
+
+ /* Data in HW is in Big endian Format */
+ aeqe_desc = be32_to_cpu(aeqe_pos->desc);
+
+ /* HW updates wrapped bit,
+ * when it adds eq element event
+ */
+ if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED)
+ != eq->wrapped) {
+ err = 0;
+ break;
+ }
+
+ if (timeout != 0)
+ usleep(1000);
+ } while (time_before(jiffies, end));
+
+ if (err != HINIC_OK) /*poll time out*/
+ break;
+
+ event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
+ if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
+ PMD_DRV_LOG(ERR, "AEQ sw event not support %d", event);
+ return -ENODEV;
+
+ } else {
+ size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
+ done = hinic_handle_aeqe(eq->hwdev, event,
+ aeqe_pos->aeqe_data,
+ size, param);
+ }
+
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+ }
+
+ eq_update_ci(eq);
+
+ return err;
+}
+
+int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int rc;
+
+ /* VF do not support send msg to mgmt directly */
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ rc = hinic_pf_to_mgmt_init(hwdev);
+ if (rc)
+ return rc;
+
+ hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN];
+
+ return 0;
+}
+
+void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ /* VF do not support send msg to mgmt directly */
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return;
+
+ hinic_pf_to_mgmt_free(hwdev);
+}
+
+void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param)
+{
+ struct hinic_eq *aeq = &hwdev->aeqs->aeq[0];
+
+ /* clear resend timer cnt register */
+ hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ (void)hinic_aeq_poll_msg(aeq, 0, param);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h
new file mode 100644
index 000000000..52b319ead
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_MGMT_H_
+#define _HINIC_PMD_MGMT_H_
+
+#include "hinic_pmd_api_cmd.h"
+#include "hinic_pmd_eqs.h"
+
+#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MSG_HEADER_MODULE_SHIFT 11
+#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HINIC_MSG_HEADER_SEQID_SHIFT 24
+#define HINIC_MSG_HEADER_LAST_SHIFT 30
+#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MSG_HEADER_CMD_SHIFT 32
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
+#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50
+#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
+#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
+#define HINIC_MSG_HEADER_LAST_MASK 0x1
+#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MSG_HEADER_CMD_MASK 0xFF
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
+#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF
+#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE
+
+#define HINIC_MSG_HEADER_GET(val, member) \
+ (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
+ HINIC_MSG_HEADER_##member##_MASK)
+
+#define HINIC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
+ HINIC_MSG_HEADER_##member##_SHIFT)
+
+#define HINIC_MGMT_RSP_AEQN (1)
+
+enum hinic_msg_direction_type {
+ HINIC_MSG_DIRECT_SEND = 0,
+ HINIC_MSG_RESPONSE = 1
+};
+enum hinic_msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum hinic_msg_ack_type {
+ HINIC_MSG_ACK = 0,
+ HINIC_MSG_NO_ACK = 1,
+};
+
+struct hinic_recv_msg {
+ void *msg;
+ void *buf_out;
+
+ u16 msg_len;
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+ u8 sed_id;
+};
+
+#define HINIC_COMM_SELF_CMD_MAX 8
+
+enum comm_pf_to_mgmt_event_state {
+ SEND_EVENT_START = 0,
+ SEND_EVENT_TIMEOUT,
+ SEND_EVENT_END,
+};
+
+struct hinic_msg_pf_to_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* mutex for sync message */
+ pthread_mutex_t sync_msg_mutex;
+
+ void *async_msg_buf;
+ void *sync_msg_buf;
+
+ struct hinic_recv_msg recv_msg_from_mgmt;
+ struct hinic_recv_msg recv_resp_msg_from_mgmt;
+
+ u16 async_msg_id;
+ u16 sync_msg_id;
+
+ struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+
+ struct hinic_eq *rx_aeq;
+};
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+
+void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
+int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param);
+
+int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param);
+
+#endif /* _HINIC_PMD_MGMT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c
new file mode 100644
index 000000000..c5663dfab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c
@@ -0,0 +1,2121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_eqs.h"
+#include "hinic_pmd_wq.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_cmdq.h"
+#include "hinic_pmd_niccfg.h"
+#include "hinic_pmd_mbox.h"
+
+#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, \
+ in_size, buf_out, out_size) \
+ hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd, \
+ buf_in, in_size, \
+ buf_out, out_size, 0)
+
+
+/**
+ * hinic_init_function_table - Initialize function table.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param rx_buf_sz
+ * Receive buffer size.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz)
+{
+ struct hinic_function_table function_table;
+ u16 out_size = sizeof(function_table);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&function_table, 0, sizeof(function_table));
+ function_table.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ function_table.func_id = hinic_global_func_id(hwdev);
+ function_table.mtu = 0x3FFF; /* default, max mtu */
+ function_table.rx_wqe_buf_size = rx_buf_sz;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_INIT_FUNC,
+ &function_table, sizeof(function_table),
+ &function_table, &out_size, 0);
+ if (err || function_table.mgmt_msg_head.status || !out_size) {
+ PMD_DRV_LOG(ERR,
+ "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x",
+ err, function_table.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_get_base_qpn - Get global queue number.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param global_qpn
+ * Global queue number.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn)
+{
+ struct hinic_cmd_qpn cmd_qpn;
+ u16 out_size = sizeof(cmd_qpn);
+ int err;
+
+ if (!hwdev || !global_qpn) {
+ PMD_DRV_LOG(ERR, "Hwdev or global_qpn is NULL");
+ return -EINVAL;
+ }
+
+ memset(&cmd_qpn, 0, sizeof(cmd_qpn));
+ cmd_qpn.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ cmd_qpn.func_id = hinic_global_func_id(hwdev);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_GLOBAL_QPN,
+ &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn,
+ &out_size, 0);
+ if (err || !out_size || cmd_qpn.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x",
+ err, cmd_qpn.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ *global_qpn = cmd_qpn.base_qpn;
+
+ return 0;
+}
+
+/**
+ * hinic_set_mac - Init mac_vlan table in NIC.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param mac_addr
+ * MAC address.
+ * @param vlan_id
+ * Set 0 for mac_vlan table initialization.
+ * @param func_id
+ * Global function id of NIC.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_set mac_info;
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr) {
+ PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL");
+ return -EINVAL;
+ }
+
+ memset(&mac_info, 0, sizeof(mac_info));
+ mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memmove(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || !out_size || (mac_info.mgmt_msg_head.status &&
+ mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {
+ PMD_DRV_LOG(ERR, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x",
+ err, mac_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {
+ PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore set operation.");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_del_mac - Uninit mac_vlan table in NIC.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param mac_addr
+ * MAC address.
+ * @param vlan_id
+ * Set 0 for mac_vlan table initialization.
+ * @param func_id
+ * Global function id of NIC.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_set mac_info;
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr) {
+ PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL");
+ return -EINVAL;
+ }
+
+ if (vlan_id >= VLAN_N_VID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN number");
+ return -EINVAL;
+ }
+
+ memset(&mac_info, 0, sizeof(mac_info));
+ mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memmove(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || !out_size || (mac_info.mgmt_msg_head.status &&
+ mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {
+ PMD_DRV_LOG(ERR, "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x",
+ err, mac_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+ if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {
+ PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore delete operation.");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_get_default_mac - Get default mac address from hardware.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param mac_addr
+ * MAC address.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_get_default_mac(void *hwdev, u8 *mac_addr)
+{
+ struct hinic_port_mac_set mac_info;
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr) {
+ PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL");
+ return -EINVAL;
+ }
+
+ memset(&mac_info, 0, sizeof(mac_info));
+ mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ mac_info.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC,
+ &mac_info, sizeof(mac_info),
+ &mac_info, &out_size);
+ if (err || !out_size || mac_info.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x",
+ err, mac_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ memmove(mac_addr, mac_info.mac, ETH_ALEN);
+
+ return 0;
+}
+
+/**
+* hinic_update_mac - Update mac address to hardware.
+*
+* @param hwdev
+* The hardware interface of a nic device.
+* @param old_mac
+* Old mac address.
+* @param new_mac
+* New mac address.
+* @param vlan_id
+* Set 0 for mac_vlan table initialization.
+* @param func_id
+* Global function id of NIC.
+*
+* @return
+* 0 on success.
+* negative error value otherwise.
+*/
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id,
+ u16 func_id)
+{
+ struct hinic_port_mac_update mac_info;
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac) {
+ PMD_DRV_LOG(ERR, "Hwdev, old_mac or new_mac is NULL");
+ return -EINVAL;
+ }
+
+ memset(&mac_info, 0, sizeof(mac_info));
+ mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC,
+ &mac_info, sizeof(mac_info),
+ &mac_info, &out_size);
+ if (err || !out_size ||
+ (mac_info.mgmt_msg_head.status &&
+ mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {
+ PMD_DRV_LOG(ERR, "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x",
+ err, mac_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+ if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {
+ PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore update operation");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_set_port_mtu - Set MTU to port.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param new_mtu
+ * MTU size.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu)
+{
+ struct hinic_mtu mtu_info;
+ u16 out_size = sizeof(mtu_info);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&mtu_info, 0, sizeof(mtu_info));
+ mtu_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ mtu_info.func_id = hinic_global_func_id(hwdev);
+ mtu_info.mtu = new_mtu;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
+ &mtu_info, sizeof(mtu_info),
+ &mtu_info, &out_size);
+ if (err || !out_size || mtu_info.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x",
+ err, mtu_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_add_remove_vlan - Add or remove vlan id to vlan elb table.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param vlan_id
+ * Vlan id.
+ * @param func_id
+ * Global function id of NIC.
+ * @param add
+ * Add or remove operation.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_add_remove_vlan(void *hwdev, u16 vlan_id, u16 func_id, bool add)
+{
+ struct hinic_vlan_config vlan_info;
+ u16 out_size = sizeof(vlan_info);
+ u8 cmd;
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ cmd = add ? HINIC_PORT_CMD_ADD_VLAN : HINIC_PORT_CMD_DEL_VLAN;
+
+ memset(&vlan_info, 0, sizeof(vlan_info));
+ vlan_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vlan_info,
+ sizeof(vlan_info), &vlan_info,
+ &out_size);
+ if (err || !out_size || vlan_info.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x",
+ add ? "add" : "remove", err,
+ vlan_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_config_vlan_filter - Enable or Disable vlan filter.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param vlan_filter_ctrl
+ * Enable or Disable.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_config_vlan_filter(void *hwdev, u32 vlan_filter_ctrl)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_filter vlan_filter;
+ u16 out_size = sizeof(vlan_filter);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ memset(&vlan_filter, 0, sizeof(vlan_filter));
+ vlan_filter.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ vlan_filter.func_id = hinic_global_func_id(nic_hwdev);
+ vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl;
+
+ err = l2nic_msg_to_mgmt_sync(nic_hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
+ &vlan_filter, sizeof(vlan_filter),
+ &vlan_filter, &out_size);
+ if (vlan_filter.mgmt_msg_head.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ (HINIC_IS_VF(nic_hwdev))) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || vlan_filter.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to config vlan filter, vlan_filter_ctrl: 0x%x, err: %d, status: 0x%x, out size: 0x%x",
+ vlan_filter_ctrl, err,
+ vlan_filter.mgmt_msg_head.status, out_size);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/**
+ * hinic_set_rx_vlan_offload - Enable or Disable vlan offload.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param en
+ * Enable or Disable.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_rx_vlan_offload(void *hwdev, u8 en)
+{
+ struct hinic_vlan_offload vlan_cfg;
+ u16 out_size = sizeof(vlan_cfg);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&vlan_cfg, 0, sizeof(vlan_cfg));
+ vlan_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ vlan_cfg.func_id = hinic_global_func_id(hwdev);
+ vlan_cfg.vlan_rx_offload = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD,
+ &vlan_cfg, sizeof(vlan_cfg),
+ &vlan_cfg, &out_size);
+ if (err || !out_size || vlan_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x",
+ err, vlan_cfg.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_get_link_status - Get link status from hardware.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param link_state
+ * Link status.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_get_link_status(void *hwdev, u8 *link_state)
+{
+ struct hinic_get_link get_link;
+ u16 out_size = sizeof(get_link);
+ int err;
+
+ if (!hwdev || !link_state) {
+ PMD_DRV_LOG(ERR, "Hwdev or link_state is NULL");
+ return -EINVAL;
+ }
+
+ memset(&get_link, 0, sizeof(get_link));
+ get_link.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ get_link.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
+ &get_link, sizeof(get_link),
+ &get_link, &out_size);
+ if (err || !out_size || get_link.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x",
+ err, get_link.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ *link_state = get_link.link_status;
+
+ return 0;
+}
+
+/**
+ * hinic_set_vport_enable - Notify firmware that driver is ready or not.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param enable
+ * 1: driver is ready; 0: driver is not ok.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_vport_enable(void *hwdev, bool enable)
+{
+ struct hinic_vport_state en_state;
+ u16 out_size = sizeof(en_state);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&en_state, 0, sizeof(en_state));
+ en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ en_state.func_id = hinic_global_func_id(hwdev);
+ en_state.state = (enable ? 1 : 0);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VPORT_ENABLE,
+ &en_state, sizeof(en_state),
+ &en_state, &out_size);
+ if (err || !out_size || en_state.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x",
+ err, en_state.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_set_port_enable - Open MAG to receive packets.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param enable
+ * 1: open MAG; 0: close MAG.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_port_enable(void *hwdev, bool enable)
+{
+ struct hinic_port_state en_state;
+ u16 out_size = sizeof(en_state);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ if (HINIC_IS_VF((struct hinic_hwdev *)hwdev))
+ return 0;
+
+ memset(&en_state, 0, sizeof(en_state));
+ en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ en_state.state = (enable ? HINIC_PORT_ENABLE : HINIC_PORT_DISABLE);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE,
+ &en_state, sizeof(en_state),
+ &en_state, &out_size);
+ if (err || !out_size || en_state.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set phy port state, err: %d, status: 0x%x, out size: 0x%x",
+ err, en_state.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info)
+{
+ struct hinic_port_info port_msg;
+ u16 out_size = sizeof(port_msg);
+ int err;
+
+ if (!hwdev || !port_info) {
+ PMD_DRV_LOG(ERR, "Hwdev or port_info is NULL");
+ return -EINVAL;
+ }
+
+ memset(&port_msg, 0, sizeof(port_msg));
+ port_msg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ port_msg.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO,
+ &port_msg, sizeof(port_msg),
+ &port_msg, &out_size);
+ if (err || !out_size || port_msg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x",
+ err, port_msg.mgmt_msg_head.status, out_size);
+ return err;
+ }
+
+ port_info->autoneg_cap = port_msg.autoneg_cap;
+ port_info->autoneg_state = port_msg.autoneg_state;
+ port_info->duplex = port_msg.duplex;
+ port_info->port_type = port_msg.port_type;
+ port_info->speed = port_msg.speed;
+
+ return 0;
+}
+
+int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause)
+{
+ struct hinic_pause_config pause_info;
+ u16 out_size = sizeof(pause_info);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&pause_info, 0, sizeof(pause_info));
+ pause_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ pause_info.func_id = hinic_global_func_id(hwdev);
+ pause_info.auto_neg = nic_pause.auto_neg;
+ pause_info.rx_pause = nic_pause.rx_pause;
+ pause_info.tx_pause = nic_pause.tx_pause;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ &pause_info, sizeof(pause_info),
+ &pause_info, &out_size);
+ if (err || !out_size || pause_info.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x",
+ err, pause_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause)
+{
+ struct hinic_pause_config pause_info;
+ u16 out_size = sizeof(pause_info);
+ int err;
+
+ if (!hwdev || !nic_pause)
+ return -EINVAL;
+
+ memset(&pause_info, 0, sizeof(pause_info));
+ pause_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ pause_info.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ &pause_info, sizeof(pause_info),
+ &pause_info, &out_size);
+ if (err || !out_size || pause_info.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ nic_pause->auto_neg = pause_info.auto_neg;
+ nic_pause->rx_pause = pause_info.rx_pause;
+ nic_pause->tx_pause = pause_info.tx_pause;
+
+ return 0;
+}
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw,
+ u8 *pgid, u8 *up_bw, u8 *prio)
+{
+ struct hinic_up_ets_cfg ets;
+ u16 out_size = sizeof(ets);
+ u16 up_bw_t = 0;
+ u8 pg_bw_t = 0;
+ int i, err;
+
+ if (!hwdev || !up_tc || !pg_bw || !pgid || !up_bw || !prio) {
+ PMD_DRV_LOG(ERR, "Hwdev, up_tc, pg_bw, pgid, up_bw or prio is NULL");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ up_bw_t += *(up_bw + i);
+ pg_bw_t += *(pg_bw + i);
+
+ if (*(up_tc + i) > HINIC_DCB_TC_MAX) {
+ PMD_DRV_LOG(ERR, "Invalid up %d mapping tc: %d", i,
+ *(up_tc + i));
+ return -EINVAL;
+ }
+ }
+
+ if (pg_bw_t != 100 || (up_bw_t % 100) != 0) {
+ PMD_DRV_LOG(ERR,
+ "Invalid pg_bw: %d or up_bw: %d", pg_bw_t, up_bw_t);
+ return -EINVAL;
+ }
+
+ memset(&ets, 0, sizeof(ets));
+ ets.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ ets.port_id = 0; /* reserved */
+ memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX);
+ memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX);
+ memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX);
+ memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX);
+ memcpy(ets.prio, prio, HINIC_DCB_UP_MAX);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS,
+ &ets, sizeof(ets), &ets, &out_size);
+ if (err || ets.mgmt_msg_head.status || !out_size) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x",
+ err, ets.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats)
+{
+ struct hinic_port_stats_info vport_stats_cmd;
+ struct hinic_cmd_vport_stats vport_stats_rsp;
+ u16 out_size = sizeof(vport_stats_rsp);
+ int err;
+
+ if (!hwdev || !stats) {
+ PMD_DRV_LOG(ERR, "Hwdev or stats is NULL");
+ return -EINVAL;
+ }
+
+ memset(&vport_stats_rsp, 0, sizeof(vport_stats_rsp));
+ memset(&vport_stats_cmd, 0, sizeof(vport_stats_cmd));
+ vport_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ vport_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION;
+ vport_stats_cmd.func_id = hinic_global_func_id(hwdev);
+ vport_stats_cmd.stats_size = sizeof(vport_stats_rsp);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
+ &vport_stats_cmd, sizeof(vport_stats_cmd),
+ &vport_stats_rsp, &out_size);
+ if (err || !out_size || vport_stats_rsp.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Get vport stats from fw failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, vport_stats_rsp.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &vport_stats_rsp.stats, sizeof(*stats));
+
+ return 0;
+}
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats)
+{
+ struct hinic_port_stats_info port_stats_cmd;
+ struct hinic_port_stats port_stats_rsp;
+ u16 out_size = sizeof(port_stats_rsp);
+ int err;
+
+ if (!hwdev || !stats) {
+ PMD_DRV_LOG(ERR, "Hwdev or stats is NULL");
+ return -EINVAL;
+ }
+
+ memset(&port_stats_rsp, 0, sizeof(port_stats_rsp));
+ memset(&port_stats_cmd, 0, sizeof(port_stats_cmd));
+ port_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ port_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION;
+ port_stats_cmd.stats_size = sizeof(port_stats_rsp);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
+ &port_stats_cmd, sizeof(port_stats_cmd),
+ &port_stats_rsp, &out_size);
+ if (err || !out_size || port_stats_rsp.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x",
+ err, port_stats_rsp.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &port_stats_rsp.stats, sizeof(*stats));
+
+ return 0;
+}
+
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type)
+{
+ struct nic_rss_context_tbl *ctx_tbl;
+ struct hinic_cmd_buf *cmd_buf;
+ u32 ctx = 0;
+ u64 out_param;
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+ return -ENOMEM;
+ }
+
+ ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+
+ cmd_buf->size = sizeof(struct nic_rss_context_tbl);
+
+ ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf;
+ ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
+ ctx_tbl->offset = 0;
+ ctx_tbl->size = sizeof(u32);
+ ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
+ ctx_tbl->rsvd = 0;
+ ctx_tbl->ctx = cpu_to_be32(ctx);
+
+ /* cfg the rss context table by command queue */
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ cmd_buf, &out_param, 0);
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ if (err || out_param != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set rss context table");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type)
+{
+ struct hinic_rss_context_table ctx_tbl;
+ u16 out_size = sizeof(ctx_tbl);
+ int err;
+
+ if (!hwdev || !rss_type) {
+ PMD_DRV_LOG(ERR, "Hwdev or rss_type is NULL");
+ return -EINVAL;
+ }
+
+ ctx_tbl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ ctx_tbl.func_id = hinic_global_func_id(hwdev);
+ ctx_tbl.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ &ctx_tbl, sizeof(ctx_tbl),
+ &ctx_tbl, &out_size);
+ if (err || !out_size || ctx_tbl.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x",
+ err, ctx_tbl.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
+ rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
+ rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
+ rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
+ rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
+ rss_type->tcp_ipv6_ext =
+ HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT);
+ rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
+ rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
+
+ return 0;
+}
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)
+{
+ struct hinic_rss_template_key temp_key;
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp) {
+ PMD_DRV_LOG(ERR, "Hwdev or temp is NULL");
+ return -EINVAL;
+ }
+
+ memset(&temp_key, 0, sizeof(temp_key));
+ temp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ temp_key.func_id = hinic_global_func_id(hwdev);
+ temp_key.template_id = (u8)tmpl_idx;
+ memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x",
+ err, temp_key.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)
+{
+ struct hinic_rss_template_key temp_key;
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp) {
+ PMD_DRV_LOG(ERR, "Hwdev or temp is NULL");
+ return -EINVAL;
+ }
+
+ memset(&temp_key, 0, sizeof(temp_key));
+ temp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ temp_key.func_id = hinic_global_func_id(hwdev);
+ temp_key.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get hash key, err: %d, status: 0x%x, out size: 0x%x",
+ err, temp_key.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+/**
+ * hinic_rss_set_hash_engine - Init rss hash function.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param tmpl_idx
+ * Index of rss template from NIC.
+ * @param type
+ * Hash function, such as Toeplitz or XOR.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type)
+{
+ struct hinic_rss_engine_type hash_type;
+ u16 out_size = sizeof(hash_type);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&hash_type, 0, sizeof(hash_type));
+ hash_type.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ hash_type.func_id = hinic_global_func_id(hwdev);
+ hash_type.hash_engine = type;
+ hash_type.template_id = tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x",
+ err, hash_type.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)
+{
+ struct nic_rss_indirect_tbl *indir_tbl;
+ struct hinic_cmd_buf *cmd_buf;
+ int i;
+ u32 *temp;
+ u32 indir_size;
+ u64 out_param;
+ int err;
+
+ if (!hwdev || !indir_table) {
+ PMD_DRV_LOG(ERR, "Hwdev or indir_table is NULL");
+ return -EINVAL;
+ }
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = sizeof(struct nic_rss_indirect_tbl);
+ indir_tbl = cmd_buf->buf;
+ indir_tbl->group_index = cpu_to_be32(tmpl_idx);
+
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ indir_tbl->entry[i] = (u8)(*(indir_table + i));
+
+ if (0x3 == (i & 0x3)) {
+ temp = (u32 *)&indir_tbl->entry[i - 3];
+ *temp = cpu_to_be32(*temp);
+ }
+ }
+
+ /* configure the rss indirect table by command queue */
+ indir_size = HINIC_RSS_INDIR_SIZE / 2;
+ indir_tbl->offset = 0;
+ indir_tbl->size = cpu_to_be32(indir_size);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set rss indir table");
+ err = -EFAULT;
+ goto free_buf;
+ }
+
+ indir_tbl->offset = cpu_to_be32(indir_size);
+ indir_tbl->size = cpu_to_be32(indir_size);
+ memcpy(indir_tbl->entry, &indir_tbl->entry[indir_size], indir_size);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set rss indir table");
+ err = -EFAULT;
+ }
+
+free_buf:
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)
+{
+ struct hinic_rss_indir_table rss_cfg;
+ u16 out_size = sizeof(rss_cfg);
+ int err = 0, i;
+
+ if (!hwdev || !indir_table) {
+ PMD_DRV_LOG(ERR, "Hwdev or indir_table is NULL");
+ return -EINVAL;
+ }
+
+ memset(&rss_cfg, 0, sizeof(rss_cfg));
+ rss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ rss_cfg.func_id = hinic_global_func_id(hwdev);
+ rss_cfg.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+ &out_size);
+ if (err || !out_size || rss_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x",
+ err, rss_cfg.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_table[i] = rss_cfg.indir[i];
+
+ return 0;
+}
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc)
+{
+ struct hinic_rss_config rss_cfg;
+ u16 out_size = sizeof(rss_cfg);
+ int err;
+
+ /* micro code required: number of TC should be power of 2 */
+ if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) {
+ PMD_DRV_LOG(ERR, "Hwdev or prio_tc is NULL, or tc_num: %u Not power of 2",
+ tc_num);
+ return -EINVAL;
+ }
+
+ memset(&rss_cfg, 0, sizeof(rss_cfg));
+ rss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ rss_cfg.func_id = hinic_global_func_id(hwdev);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.template_id = tmpl_idx;
+ rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0;
+
+ memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG,
+ &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+ &out_size);
+ if (err || !out_size || rss_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x",
+ err, rss_cfg.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_rss_template_alloc - Get rss template id from the chip,
+ * all functions share 96 templates.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param tmpl_idx
+ * Index of rss template from chip.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx)
+{
+ struct hinic_rss_template_mgmt template_mgmt;
+ u16 out_size = sizeof(template_mgmt);
+ int err;
+
+ if (!hwdev || !tmpl_idx) {
+ PMD_DRV_LOG(ERR, "Hwdev or tmpl_idx is NULL");
+ return -EINVAL;
+ }
+
+ memset(&template_mgmt, 0, sizeof(template_mgmt));
+ template_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ template_mgmt.func_id = hinic_global_func_id(hwdev);
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x",
+ err, template_mgmt.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ *tmpl_idx = template_mgmt.template_id;
+
+ return 0;
+}
+
+/**
+ * hinic_rss_template_free - Free rss template id to the chip.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param tmpl_idx
+ * Index of rss template from chip.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx)
+{
+ struct hinic_rss_template_mgmt template_mgmt;
+ u16 out_size = sizeof(template_mgmt);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&template_mgmt, 0, sizeof(template_mgmt));
+ template_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ template_mgmt.func_id = hinic_global_func_id(hwdev);
+ template_mgmt.template_id = tmpl_idx;
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x",
+ err, template_mgmt.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_set_rx_vhd_mode - Change rx buffer size after initialization.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param vhd_mode
+ * Not needed.
+ * @param rx_buf_sz
+ * receive buffer size.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz)
+{
+ struct hinic_set_vhd_mode vhd_mode_cfg;
+ u16 out_size = sizeof(vhd_mode_cfg);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&vhd_mode_cfg, 0, sizeof(vhd_mode_cfg));
+
+ vhd_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ vhd_mode_cfg.func_id = hinic_global_func_id(hwdev);
+ vhd_mode_cfg.vhd_type = vhd_mode;
+ vhd_mode_cfg.rx_wqe_buffer_size = rx_buf_sz;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VHD_CFG,
+ &vhd_mode_cfg, sizeof(vhd_mode_cfg),
+ &vhd_mode_cfg, &out_size);
+ if (err || !out_size || vhd_mode_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set vhd mode, err: %d, status: 0x%x, out size: 0x%x",
+ err, vhd_mode_cfg.mgmt_msg_head.status, out_size);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_mode(void *hwdev, u32 enable)
+{
+ struct hinic_rx_mode_config rx_mode_cfg;
+ u16 out_size = sizeof(rx_mode_cfg);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg));
+ rx_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ rx_mode_cfg.func_id = hinic_global_func_id(hwdev);
+ rx_mode_cfg.rx_mode = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
+ &rx_mode_cfg, sizeof(rx_mode_cfg),
+ &rx_mode_cfg, &out_size);
+ if (err || !out_size || rx_mode_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x",
+ err, rx_mode_cfg.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_get_mgmt_version - Get mgmt module version from chip.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param fw
+ * Firmware version.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_get_mgmt_version(void *hwdev, char *fw)
+{
+ struct hinic_version_info fw_ver;
+ u16 out_size = sizeof(fw_ver);
+ int err;
+
+ if (!hwdev || !fw) {
+ PMD_DRV_LOG(ERR, "Hwdev or fw is NULL");
+ return -EINVAL;
+ }
+
+ memset(&fw_ver, 0, sizeof(fw_ver));
+ fw_ver.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &fw_ver, sizeof(fw_ver), &fw_ver,
+ &out_size);
+ if (err || !out_size || fw_ver.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, fw_ver.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ snprintf(fw, HINIC_MGMT_VERSION_MAX_LEN, "%s", fw_ver.ver);
+
+ return 0;
+}
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en)
+{
+ struct hinic_checksum_offload rx_csum_cfg;
+ u16 out_size = sizeof(rx_csum_cfg);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&rx_csum_cfg, 0, sizeof(rx_csum_cfg));
+ rx_csum_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ rx_csum_cfg.func_id = hinic_global_func_id(hwdev);
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x",
+ err, rx_csum_cfg.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num)
+{
+ struct hinic_lro_config lro_cfg;
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&lro_cfg, 0, sizeof(lro_cfg));
+ lro_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ lro_cfg.func_id = hinic_global_func_id(hwdev);
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO,
+ &lro_cfg, sizeof(lro_cfg), &lro_cfg,
+ &out_size);
+ if (err || !out_size || lro_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x",
+ err, lro_cfg.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_anti_attack(void *hwdev, bool enable)
+{
+ struct hinic_port_anti_attack_rate rate;
+ u16 out_size = sizeof(rate);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&rate, 0, sizeof(rate));
+ rate.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ rate.func_id = hinic_global_func_id(hwdev);
+ rate.enable = enable;
+ rate.cir = ANTI_ATTACK_DEFAULT_CIR;
+ rate.xir = ANTI_ATTACK_DEFAULT_XIR;
+ rate.cbs = ANTI_ATTACK_DEFAULT_CBS;
+ rate.xbs = ANTI_ATTACK_DEFAULT_XBS;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE,
+ &rate, sizeof(rate), &rate,
+ &out_size);
+ if (err || !out_size || rate.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Can't %s port Anti-Attack rate limit, err: %d, status: 0x%x, out size: 0x%x",
+ (enable ? "enable" : "disable"), err,
+ rate.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set autoneg status and restart port link status */
+int hinic_reset_port_link_cfg(void *hwdev)
+{
+ struct hinic_reset_link_cfg reset_cfg;
+ u16 out_size = sizeof(reset_cfg);
+ int err;
+
+ memset(&reset_cfg, 0, sizeof(reset_cfg));
+ reset_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ reset_cfg.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG,
+ &reset_cfg, sizeof(reset_cfg),
+ &reset_cfg, &out_size);
+ if (err || !out_size || reset_cfg.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Reset port link configure failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, reset_cfg.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_vf_func_init - Register VF to PF.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ int err, state = 0;
+
+ if (!HINIC_IS_VF(hwdev))
+ return 0;
+
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER, &state, sizeof(state),
+ NULL, NULL, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Fail to register vf");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_vf_func_free - Unregister VF from PF.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ */
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) != TYPE_VF)
+ return;
+
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER, &err, sizeof(err),
+ NULL, NULL, 0);
+ if (err)
+ PMD_DRV_LOG(ERR, "Fail to unregister VF, err: %d", err);
+}
+
+int hinic_set_fast_recycle_mode(void *hwdev, u8 mode)
+{
+ struct hinic_fast_recycled_mode fast_recycled_mode;
+ u16 out_size = sizeof(fast_recycled_mode);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&fast_recycled_mode, 0, sizeof(fast_recycled_mode));
+ fast_recycled_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ fast_recycled_mode.func_id = hinic_global_func_id(hwdev);
+ fast_recycled_mode.fast_recycled_mode = mode;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET,
+ &fast_recycled_mode,
+ sizeof(fast_recycled_mode),
+ &fast_recycled_mode, &out_size, 0);
+ if (err || fast_recycled_mode.mgmt_msg_head.status || !out_size) {
+ PMD_DRV_LOG(ERR, "Failed to set recycle mode, ret: %d",
+ fast_recycled_mode.mgmt_msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_clear_vport_stats(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clear_vport_stats clear_vport_stats;
+ u16 out_size = sizeof(clear_vport_stats);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&clear_vport_stats, 0, sizeof(clear_vport_stats));
+ clear_vport_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ clear_vport_stats.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAN_VPORT_STAT,
+ &clear_vport_stats,
+ sizeof(clear_vport_stats),
+ &clear_vport_stats, &out_size);
+ if (err || !out_size || clear_vport_stats.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to clear vport statistics, err: %d, status: 0x%x, out size: 0x%x",
+ err, clear_vport_stats.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clear_port_stats clear_phy_port_stats;
+ u16 out_size = sizeof(clear_phy_port_stats);
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&clear_phy_port_stats, 0, sizeof(clear_phy_port_stats));
+ clear_phy_port_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ clear_phy_port_stats.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS,
+ &clear_phy_port_stats,
+ sizeof(clear_phy_port_stats),
+ &clear_phy_port_stats, &out_size);
+ if (err || !out_size || clear_phy_port_stats.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to clear phy port statistics, err: %d, status: 0x%x, out size: 0x%x",
+ err, clear_phy_port_stats.mgmt_msg_head.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_link_status_follow(void *hwdev,
+ enum hinic_link_follow_status status)
+{
+ struct hinic_set_link_follow follow;
+ u16 out_size = sizeof(follow);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (HINIC_IS_VF((struct hinic_hwdev *)hwdev))
+ return 0;
+
+ if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) {
+ PMD_DRV_LOG(ERR, "Invalid link follow status: %d", status);
+ return -EINVAL;
+ }
+
+ memset(&follow, 0, sizeof(follow));
+ follow.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ follow.func_id = hinic_global_func_id(hwdev);
+ follow.follow_status = status;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW,
+ &follow, sizeof(follow),
+ &follow, &out_size);
+ if ((follow.mgmt_msg_head.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ follow.mgmt_msg_head.status) || err || !out_size) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set link status follow phy port status, err: %d, status: 0x%x, out size: 0x%x",
+ err, follow.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return follow.mgmt_msg_head.status;
+}
+
+int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised)
+{
+ struct hinic_link_mode_cmd link_mode;
+ u16 out_size = sizeof(link_mode);
+ int err;
+
+ if (!hwdev || !supported || !advertised)
+ return -EINVAL;
+
+ memset(&link_mode, 0, sizeof(link_mode));
+ link_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ link_mode.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ &link_mode, sizeof(link_mode),
+ &link_mode, &out_size);
+ if (err || !out_size || link_mode.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x",
+ err, link_mode.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ *supported = link_mode.supported;
+ *advertised = link_mode.advertised;
+
+ return 0;
+}
+
+/**
+ * hinic_set_xsfp_tx_status - Enable or disable the fiber in
+ * tx direction when set link up or down.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param enable
+ * Enable or Disable.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_set_xsfp_tx_status(void *hwdev, bool enable)
+{
+ struct hinic_set_xsfp_status xsfp_status;
+ u16 out_size = sizeof(struct hinic_set_xsfp_status);
+ int err;
+
+ memset(&xsfp_status, 0, sizeof(xsfp_status));
+ xsfp_status.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ xsfp_status.port_id = hinic_global_func_id(hwdev);
+ xsfp_status.xsfp_tx_dis = ((enable == 0) ? 1 : 0);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_XSFP_STATUS,
+ &xsfp_status, sizeof(struct hinic_set_xsfp_status),
+ &xsfp_status, &out_size);
+ if (err || !out_size || xsfp_status.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to %s port xsfp status, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "Disable" : "Enable", err,
+ xsfp_status.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport
+ * fake failed when device start.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_flush_qp_res(void *hwdev)
+{
+ struct hinic_clear_qp_resource qp_res;
+ u16 out_size = sizeof(qp_res);
+ int err;
+
+ memset(&qp_res, 0, sizeof(qp_res));
+ qp_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ qp_res.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_QP_RES,
+ &qp_res, sizeof(qp_res), &qp_res,
+ &out_size);
+ if (err || !out_size || qp_res.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x",
+ err, qp_res.mgmt_msg_head.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_vf_get_default_cos - Get default cos of VF.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param cos_id
+ * Cos value.
+ *
+ * @return
+ * 0 on success.
+ * negative error value otherwise.
+ */
+int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id)
+{
+ struct hinic_vf_default_cos vf_cos;
+ u16 out_size = sizeof(vf_cos);
+ int err;
+
+ memset(&vf_cos, 0, sizeof(vf_cos));
+ vf_cos.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_VF_COS, &vf_cos,
+ sizeof(vf_cos), &vf_cos,
+ &out_size, 0);
+ if (err || !out_size || vf_cos.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, vf_cos.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+ *cos_id = vf_cos.state.default_cos;
+
+ return 0;
+}
+
+/**
+ * hinic_set_fdir_filter - Set fdir filter for control path
+ * packet to notify firmware.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param filter_type
+ * Packet type to filter.
+ * @param qid
+ * Rx qid to filter.
+ * @param type_enable
+ * The status of pkt type filter.
+ * @param enable
+ * Fdir function Enable or Disable.
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid, u8 type_enable,
+ bool enable)
+{
+ struct hinic_port_qfilter_info port_filer_cmd;
+ u16 out_size = sizeof(port_filer_cmd);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ memset(&port_filer_cmd, 0, sizeof(port_filer_cmd));
+ port_filer_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ port_filer_cmd.func_id = hinic_global_func_id(hwdev);
+ port_filer_cmd.filter_enable = (u8)enable;
+ port_filer_cmd.filter_type = filter_type;
+ port_filer_cmd.qid = qid;
+ port_filer_cmd.filter_type_enable = type_enable;
+ port_filer_cmd.fdir_flag = 0;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_Q_FILTER,
+ &port_filer_cmd, sizeof(port_filer_cmd),
+ &port_filer_cmd, &out_size);
+ if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Set port Q filter failed, err: %d, status: 0x%x, out size: 0x%x, type: 0x%x,"
+ " enable: 0x%x, qid: 0x%x, filter_type_enable: 0x%x\n",
+ err, port_filer_cmd.mgmt_msg_head.status, out_size,
+ filter_type, enable, qid, type_enable);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_set_normal_filter - Set fdir filter for IO path packet.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param qid
+ * Rx qid to filter.
+ * @param normal_type_enable
+ * IO path packet function Enable or Disable
+ * @param key
+ * IO path packet filter key value, such as DIP from pkt.
+ * @param enable
+ * Fdir function Enable or Disable.
+ * @param flag
+ * Filter flag, such as dip or others.
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable,
+ u32 key, bool enable, u8 flag)
+{
+ struct hinic_port_qfilter_info port_filer_cmd;
+ u16 out_size = sizeof(port_filer_cmd);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ memset(&port_filer_cmd, 0, sizeof(port_filer_cmd));
+ port_filer_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ port_filer_cmd.func_id = hinic_global_func_id(hwdev);
+ port_filer_cmd.filter_enable = (u8)enable;
+ port_filer_cmd.qid = qid;
+ port_filer_cmd.normal_type_enable = normal_type_enable;
+ port_filer_cmd.fdir_flag = flag; /* fdir flag: support dip */
+ port_filer_cmd.key = key;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_Q_FILTER,
+ &port_filer_cmd, sizeof(port_filer_cmd),
+ &port_filer_cmd, &out_size);
+ if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Set normal filter failed, err: %d, status: 0x%x, out size: 0x%x, fdir_flag: 0x%x,"
+ " enable: 0x%x, qid: 0x%x, normal_type_enable: 0x%x, key:0x%x\n",
+ err, port_filer_cmd.mgmt_msg_head.status, out_size,
+ flag, enable, qid, normal_type_enable, key);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_set_fdir_tcam - Set fdir filter for control packet
+ * by tcam table to notify hardware.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param type_mask
+ * Index of TCAM.
+ * @param filter_rule
+ * TCAM rule for control packet, such as lacp or bgp.
+ * @param filter_action
+ * TCAM action for control packet, such as accept or drop.
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_set_fdir_tcam(void *hwdev, u16 type_mask,
+ struct tag_pa_rule *filter_rule,
+ struct tag_pa_action *filter_action)
+{
+ struct hinic_fdir_tcam_info port_tcam_cmd;
+ u16 out_size = sizeof(port_tcam_cmd);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd));
+ port_tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ port_tcam_cmd.tcam_index = type_mask;
+ port_tcam_cmd.flag = TCAM_SET;
+ memcpy((void *)&port_tcam_cmd.filter_rule,
+ (void *)filter_rule, sizeof(struct tag_pa_rule));
+ memcpy((void *)&port_tcam_cmd.filter_action,
+ (void *)filter_action, sizeof(struct tag_pa_action));
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_TCAM_FILTER,
+ &port_tcam_cmd, sizeof(port_tcam_cmd),
+ &port_tcam_cmd, &out_size);
+ if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Set tcam table failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, port_tcam_cmd.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * hinic_clear_fdir_tcam - Clear fdir filter TCAM table for control packet.
+ *
+ * @param hwdev
+ * The hardware interface of a nic device.
+ * @param type_mask
+ * Index of TCAM.
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask)
+{
+ struct hinic_fdir_tcam_info port_tcam_cmd;
+ u16 out_size = sizeof(port_tcam_cmd);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd));
+ port_tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ port_tcam_cmd.tcam_index = type_mask;
+ port_tcam_cmd.flag = TCAM_CLEAR;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_TCAM_FILTER,
+ &port_tcam_cmd, sizeof(port_tcam_cmd),
+ &port_tcam_cmd, &out_size);
+ if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR, "Clear tcam table failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, port_tcam_cmd.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_add_tcam_rule(void *hwdev, struct tag_tcam_cfg_rule *tcam_rule)
+{
+ u16 out_size = sizeof(struct tag_fdir_add_rule_cmd);
+ struct tag_fdir_add_rule_cmd tcam_cmd;
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_rule->index >= HINIC_MAX_TCAM_RULES_NUM) {
+ PMD_DRV_LOG(ERR, "Tcam rules num to add is invalid");
+ return -EFAULT;
+ }
+
+ memset(&tcam_cmd, 0, sizeof(struct tag_fdir_add_rule_cmd));
+ tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule,
+ sizeof(struct tag_tcam_cfg_rule));
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_ADD_FLOW,
+ &tcam_cmd, sizeof(tcam_cmd),
+ &tcam_cmd, &out_size);
+ if (err || tcam_cmd.mgmt_msg_head.status || !out_size) {
+ PMD_DRV_LOG(ERR,
+ "Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, tcam_cmd.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_del_tcam_rule(void *hwdev, u32 index)
+{
+ u16 out_size = sizeof(struct tag_fdir_del_rule_cmd);
+ struct tag_fdir_del_rule_cmd tcam_cmd;
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ if (index >= HINIC_MAX_TCAM_RULES_NUM) {
+ PMD_DRV_LOG(ERR, "Tcam rules num to del is invalid");
+ return -EFAULT;
+ }
+
+ memset(&tcam_cmd, 0, sizeof(struct tag_fdir_del_rule_cmd));
+ tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ tcam_cmd.index_start = index;
+ tcam_cmd.index_num = 1;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_DEL_FLOW,
+ &tcam_cmd, sizeof(tcam_cmd),
+ &tcam_cmd, &out_size);
+ if (err || tcam_cmd.mgmt_msg_head.status || !out_size) {
+ PMD_DRV_LOG(ERR,
+ "Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, tcam_cmd.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_mgmt_tcam_block(void *hwdev, u8 alloc_en,
+ u8 block_type, u16 *index)
+{
+ struct hinic_cmd_ctrl_tcam_block tcam_block_info;
+ u16 out_size = sizeof(struct hinic_cmd_ctrl_tcam_block);
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&tcam_block_info, 0, sizeof(struct hinic_cmd_ctrl_tcam_block));
+ tcam_block_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ tcam_block_info.func_id = hinic_global_func_id(hwdev);
+ tcam_block_info.alloc_en = alloc_en;
+ tcam_block_info.tcam_type = block_type;
+ tcam_block_info.tcam_block_index = *index;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK,
+ &tcam_block_info, sizeof(tcam_block_info),
+ &tcam_block_info, &out_size);
+ if (tcam_block_info.mgmt_msg_head.status ==
+ HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ PMD_DRV_LOG(INFO, "Firmware/uP doesn't support alloc or del tcam block");
+ return err;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ (HINIC_IS_VF(nic_hwdev))) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ PMD_DRV_LOG(INFO, "VF doesn't support alloc and del tcam block.");
+ return err;
+ } else if (err || (!out_size) || tcam_block_info.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, tcam_block_info.mgmt_msg_head.status, out_size);
+ return -EFAULT;
+ }
+
+ if (alloc_en)
+ *index = tcam_block_info.tcam_block_index;
+
+ return 0;
+}
+
+int hinic_alloc_tcam_block(void *hwdev, u8 block_type, u16 *index)
+{
+ return hinic_mgmt_tcam_block(hwdev, HINIC_TCAM_BLOCK_ENABLE,
+ block_type, index);
+}
+
+int hinic_free_tcam_block(void *hwdev, u8 block_type, u16 *index)
+{
+ return hinic_mgmt_tcam_block(hwdev, HINIC_TCAM_BLOCK_DISABLE,
+ block_type, index);
+}
+
+int hinic_flush_tcam_rule(void *hwdev)
+{
+ struct hinic_cmd_flush_tcam_rules tcam_flush;
+ u16 out_size = sizeof(struct hinic_cmd_flush_tcam_rules);
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ int err;
+
+ if (!hwdev) {
+ PMD_DRV_LOG(ERR, "Hwdev is NULL");
+ return -EINVAL;
+ }
+
+ memset(&tcam_flush, 0, sizeof(struct hinic_cmd_flush_tcam_rules));
+ tcam_flush.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ tcam_flush.func_id = hinic_global_func_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_FLUSH_TCAM,
+ &tcam_flush, sizeof(struct hinic_cmd_flush_tcam_rules),
+ &tcam_flush, &out_size);
+ if (tcam_flush.mgmt_msg_head.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ PMD_DRV_LOG(INFO, "Firmware/uP doesn't support flush tcam fdir");
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ (HINIC_IS_VF(nic_hwdev))) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ PMD_DRV_LOG(INFO, "VF doesn't support flush tcam fdir");
+ } else if (err || (!out_size) || tcam_flush.mgmt_msg_head.status) {
+ PMD_DRV_LOG(ERR,
+ "Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x",
+ err, tcam_flush.mgmt_msg_head.status, out_size);
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h
new file mode 100644
index 000000000..846b5973e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h
@@ -0,0 +1,944 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_NICCFG_H_
+#define _HINIC_PMD_NICCFG_H_
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+
+#define HINIC_RSS_INDIR_SIZE 256
+#define HINIC_DCB_TC_MAX 0x8
+#define HINIC_DCB_UP_MAX 0x8
+#define HINIC_DCB_PG_MAX 0x8
+#define HINIC_RSS_KEY_SIZE 40
+
+#define HINIC_MAX_NUM_RQ 64
+
+#define ANTI_ATTACK_DEFAULT_CIR 500000
+#define ANTI_ATTACK_DEFAULT_XIR 600000
+#define ANTI_ATTACK_DEFAULT_CBS 10000000
+#define ANTI_ATTACK_DEFAULT_XBS 12000000
+
+#define NIC_RSS_INDIR_SIZE 256
+#define NIC_RSS_KEY_SIZE 40
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+#define NIC_DCB_UP_MAX 0x8
+
+enum hinic_rss_hash_type {
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
+
+ HINIC_RSS_HASH_ENGINE_TYPE_MAX,
+};
+
+struct nic_port_info {
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_MAX
+};
+
+enum hinic_link_status {
+ HINIC_LINK_DOWN = 0,
+ HINIC_LINK_UP
+};
+
+struct hinic_up_ets_cfg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 port_id;
+ u8 rsvd1[3];
+ u8 up_tc[HINIC_DCB_UP_MAX];
+ u8 pg_bw[HINIC_DCB_PG_MAX];
+ u8 pgid[HINIC_DCB_UP_MAX];
+ u8 up_bw[HINIC_DCB_UP_MAX];
+ u8 prio[HINIC_DCB_PG_MAX];
+};
+
+struct nic_pause_config {
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct nic_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+enum hinic_rx_mod {
+ HINIC_RX_MODE_UC = 1 << 0,
+ HINIC_RX_MODE_MC = 1 << 1,
+ HINIC_RX_MODE_BC = 1 << 2,
+ HINIC_RX_MODE_MC_ALL = 1 << 3,
+ HINIC_RX_MODE_PROMISC = 1 << 4,
+};
+
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \
+ HINIC_RX_MODE_BC)
+
+#define HINIC_MAX_MTU_SIZE (9600)
+#define HINIC_MIN_MTU_SIZE (256)
+
+/* MIN_MTU + ETH_HLEN + CRC (256+14+4) */
+#define HINIC_MIN_FRAME_SIZE 274
+
+/* MAX_MTU + ETH_HLEN + CRC + VLAN(9600+14+4+4) */
+#define HINIC_MAX_JUMBO_FRAME_SIZE (9622)
+
+#define HINIC_PORT_DISABLE 0x0
+#define HINIC_PORT_ENABLE 0x3
+
+struct hinic_vport_stats {
+ u64 tx_unicast_pkts_vport;
+ u64 tx_unicast_bytes_vport;
+ u64 tx_multicast_pkts_vport;
+ u64 tx_multicast_bytes_vport;
+ u64 tx_broadcast_pkts_vport;
+ u64 tx_broadcast_bytes_vport;
+
+ u64 rx_unicast_pkts_vport;
+ u64 rx_unicast_bytes_vport;
+ u64 rx_multicast_pkts_vport;
+ u64 rx_multicast_bytes_vport;
+ u64 rx_broadcast_pkts_vport;
+ u64 rx_broadcast_bytes_vport;
+
+ u64 tx_discard_vport;
+ u64 rx_discard_vport;
+ u64 tx_err_vport;
+ u64 rx_err_vport; /* rx checksum err pkts in ucode */
+};
+
+struct hinic_phy_port_stats {
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_pkt_num;
+ u64 mac_rx_1519_max_good_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+
+ u64 mac_rx_mac_pause_num;
+ u64 mac_rx_pfc_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_rx_mac_control_pkt_num;
+ u64 mac_rx_y1731_pkt_num;
+ u64 mac_rx_sym_err_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_pkt_num;
+ u64 mac_tx_1519_max_good_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_trans_jabber_pkt_num;
+
+ u64 mac_tx_mac_pause_num;
+ u64 mac_tx_pfc_pkt_num;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_tx_mac_control_pkt_num;
+ u64 mac_tx_y1731_pkt_num;
+ u64 mac_tx_1588_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+
+ u64 rx_higig2_ext_pkts_port;
+ u64 rx_higig2_message_pkts_port;
+ u64 rx_higig2_error_pkts_port;
+ u64 rx_higig2_cpu_ctrl_pkts_port;
+ u64 rx_higig2_unicast_pkts_port;
+ u64 rx_higig2_broadcast_pkts_port;
+ u64 rx_higig2_l2_multicast_pkts;
+ u64 rx_higig2_l3_multicast_pkts;
+
+ u64 tx_higig2_message_pkts_port;
+ u64 tx_higig2_ext_pkts_port;
+ u64 tx_higig2_cpu_ctrl_pkts_port;
+ u64 tx_higig2_unicast_pkts_port;
+ u64 tx_higig2_broadcast_pkts_port;
+ u64 tx_higig2_l2_multicast_pkts;
+ u64 tx_higig2_l3_multicast_pkts;
+};
+
+enum hinic_link_follow_status {
+ HINIC_LINK_FOLLOW_DEFAULT,
+ HINIC_LINK_FOLLOW_PORT,
+ HINIC_LINK_FOLLOW_SEPARATE,
+ HINIC_LINK_FOLLOW_STATUS_MAX,
+};
+
+#define HINIC_PORT_STATS_VERSION 0
+struct hinic_port_stats_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_vport_stats stats;
+};
+
+struct hinic_clear_port_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_clear_vport_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_fast_recycled_mode {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ /*
+ * 1: enable fast recycle, available in dpdk mode,
+ * 0: normal mode, available in kernel nic mode
+ */
+ u8 fast_recycled_mode;
+ u8 rsvd1;
+};
+
+struct hinic_function_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rx_wqe_buf_size;
+ u32 mtu;
+};
+
+struct hinic_cmd_qpn {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 base_qpn;
+};
+
+struct hinic_port_mac_set {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct hinic_port_mac_update {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vport_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hinic_port_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 state;
+ u8 rsvd1[3];
+};
+
+struct hinic_mtu {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 mtu;
+};
+
+struct hinic_vlan_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+};
+
+struct hinic_vlan_filter {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 rsvd1[2];
+ u32 vlan_filter_ctrl;
+};
+
+struct hinic_vlan_offload {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_get_link {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 link_status;
+ u8 rsvd1;
+};
+
+#define HINIC_DEFAUT_PAUSE_CONFIG 1
+struct hinic_pause_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_port_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+ u8 resv2[3];
+};
+
+struct hinic_tso_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
+struct hinic_lro_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_checksum_offload {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
+
+struct hinic_rx_mode_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
+#define HINIC_MGMT_VERSION_MAX_LEN 32
+#define HINIC_COMPILE_TIME_LEN 20
+#define HINIC_FW_VERSION_NAME 16
+
+struct hinic_version_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
+/* rss */
+struct nic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[NIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[3];
+ u8 prio_tc[NIC_DCB_UP_MAX];
+};
+
+struct hinic_rss_template_mgmt {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_indir_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[NIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_template_key {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[NIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_context_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_reset_link_cfg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_set_vhd_mode {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vhd_type;
+ u16 rx_wqe_buffer_size;
+ u16 rsvd;
+};
+
+struct hinic_set_link_follow {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd0;
+ u8 follow_status;
+ u8 rsvd1[3];
+};
+
+struct hinic_link_mode_cmd {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represent Invalid value */
+ u16 advertised;
+};
+
+struct hinic_set_xsfp_status {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u32 port_id;
+ u32 xsfp_tx_dis; /* 0: tx enable; 1: tx disable */
+};
+
+struct hinic_clear_qp_resource {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_dcb_state {
+ u8 dcb_on;
+ u8 default_cos;
+ u8 up_cos[8];
+};
+
+struct hinic_vf_default_cos {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_dcb_state state;
+};
+
+/* set physical port Anti-Attack rate */
+struct hinic_port_anti_attack_rate {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */
+ u32 cir; /* Committed Information Rate */
+ u32 xir; /* eXtended Information Rate */
+ u32 cbs; /* Committed Burst Size */
+ u32 xbs; /* eXtended Burst Size */
+};
+
+struct pa_u8_s {
+ u8 val8;
+ u8 mask8;
+};
+
+struct pa_u16_s {
+ u16 val16;
+ u16 mask16;
+};
+
+struct pa_u32_s {
+ u32 val32;
+ u32 mask32;
+};
+
+struct pa_u48_s {
+ u8 val8[6];
+ u8 mask8[6];
+};
+
+struct pa_u64_s {
+ u8 val8[8];
+ u8 mask8[8];
+};
+
+struct tag_pa_eth_ip_header {
+ struct pa_u8_s ip_ver; /* 3bit */
+ struct pa_u8_s ipv4_option_flag; /* 1bit */
+ /* 8bit ipv4 option or ipv6 next header */
+ struct pa_u8_s protocol;
+ struct pa_u8_s dscp; /* 6bit DSCP */
+};
+
+struct tag_pa_common_l2_header {
+ struct pa_u48_s dmac; /* dmac 48bit */
+ struct pa_u16_s eth_type; /* ethernet type/length 16bit */
+ struct pa_u8_s tag_flag; /* tag flag: 4bit */
+ struct pa_u8_s np2np_hdr_qindex; /* NP2NP Header Qindex 4bit */
+ struct pa_u8_s e_tag_pcp; /* 3bit */
+ struct pa_u8_s vlan_layer; /* 2bit */
+ struct pa_u8_s s_tag; /* 3bit */
+ struct pa_u8_s c_tag; /* 3bit */
+ struct pa_u16_s vlan_id; /* 12bit */
+};
+
+struct tag_pa_tcp {
+ struct pa_u16_s sport; /* 16bit */
+ struct pa_u16_s dport; /* 16bit */
+ struct pa_u16_s tcp_flag; /* 6bit */
+};
+
+struct tag_pa_udp {
+ struct pa_u16_s sport; /* 16bit */
+ struct pa_u16_s dport; /* 16bit */
+ /* 8bit :
+ * 1.udp dport=67/68 && ipv4 protocol=0x11
+ * 2.udp dport=546/547 && ipv6 next header=0x11
+ * 3. do not care
+ */
+ struct pa_u8_s dhcp_op_or_msg_type;
+};
+
+/* ICMP:
+ * ipv4 protocol = 0x1
+ * ipv6 next header = 0x3A
+ */
+struct tag_pa_icmp {
+ struct pa_u8_s type; /* 8bit */
+ struct pa_u8_s code; /* 8bit */
+};
+
+/* IGMP:
+ * ipv4 protocol = 0x2
+ */
+struct tag_pa_ipv4_igmp {
+ struct pa_u32_s dip; /* 32bit */
+ struct pa_u8_s type; /* 8bit */
+};
+
+struct tag_pa_rule {
+ struct pa_u8_s ncsi_flag; /* 1bit valid */
+ struct tag_pa_common_l2_header l2_header;
+
+ u8 eth_type;
+
+ struct pa_u64_s eth_other; /* eth_type=other 64bit */
+ struct pa_u8_s eth_roce_opcode; /* eth_type=roce 8bit opcode */
+
+ struct tag_pa_eth_ip_header ip_header; /* eth_type=ip */
+
+ u8 ip_protocol_type;
+
+ struct tag_pa_tcp eth_ip_tcp; /* eth_type=ip && ip_protocol = tcp */
+ struct tag_pa_udp eth_ip_udp; /* eth_type=ip && ip_protocol = udp */
+ struct tag_pa_icmp eth_ip_icmp; /* eth_type=ip && ip_protocol = icmp */
+
+ /* eth_type=ip && ip_protocol = ipv4_igmp */
+ struct tag_pa_ipv4_igmp eth_ipv4_igmp;
+
+ /* eth_type=ip && ip_protocol = sctp;
+ * 16bit ipv4 protocol=0x84 or ipv6 nhr=0x84
+ */
+ struct pa_u16_s eth_ip_sctp;
+};
+
+struct tag_pa_action {
+ u16 pkt_type;
+ u8 err_type;
+ u8 pri;
+ u8 fwd_action;
+ u8 push_len;
+};
+
+struct hinic_fdir_tcam_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 tcam_index;
+ u8 flag; /* clear or set tcam table flag */
+ u8 rsvd1;
+ struct tag_pa_rule filter_rule;
+ struct tag_pa_action filter_action;
+};
+
+#define TCAM_SET 0x1
+#define TCAM_CLEAR 0x2
+
+struct hinic_port_qfilter_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 normal_type_enable;
+ u8 filter_type_enable;
+ u8 filter_enable;
+ u8 filter_type;
+ u8 qid;
+ u8 fdir_flag;
+ u32 key;
+};
+
+#define HINIC_MAX_TCAM_RULES_NUM (10240)
+#define HINIC_TCAM_BLOCK_ENABLE 1
+#define HINIC_TCAM_BLOCK_DISABLE 0
+
+struct tag_tcam_result {
+ u32 qid;
+ u32 rsvd;
+};
+
+#define TCAM_FLOW_KEY_SIZE 24
+
+struct tag_tcam_key_x_y {
+ u8 x[TCAM_FLOW_KEY_SIZE];
+ u8 y[TCAM_FLOW_KEY_SIZE];
+};
+
+struct tag_tcam_cfg_rule {
+ u32 index;
+ struct tag_tcam_result data;
+ struct tag_tcam_key_x_y key;
+};
+
+struct tag_fdir_add_rule_cmd {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+ struct tag_tcam_cfg_rule rule;
+};
+
+struct tag_fdir_del_rule_cmd {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u32 index_start;
+ u32 index_num;
+};
+
+struct hinic_cmd_flush_tcam_rules {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+};
+
+struct hinic_cmd_ctrl_tcam_block {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 alloc_en; /* 0: free tcam block, 1: alloc tcam block */
+ /*
+ * 0: alloc 1k size tcam block,
+ * 1: alloc 128 size tcam block, others rsvd
+ */
+ u8 tcam_type;
+ u16 tcam_block_index;
+ u16 rsvd;
+};
+
+int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id,
+ u16 func_id);
+
+int hinic_get_default_mac(void *hwdev, u8 *mac_addr);
+
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu);
+
+int hinic_add_remove_vlan(void *hwdev, u16 vlan_id, u16 func_id, bool add);
+
+int hinic_config_vlan_filter(void *hwdev, u32 vlan_filter_ctrl);
+
+int hinic_set_rx_vlan_offload(void *hwdev, u8 en);
+
+int hinic_set_vport_enable(void *hwdev, bool enable);
+
+int hinic_set_port_enable(void *hwdev, bool enable);
+
+int hinic_get_link_status(void *hwdev, u8 *link_state);
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info);
+
+int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz);
+
+int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause);
+
+int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause);
+
+int hinic_reset_port_link_cfg(void *hwdev);
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,
+ u8 *prio);
+
+int hinic_set_anti_attack(void *hwdev, bool enable);
+
+/* offload feature */
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num);
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats);
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats);
+
+/* rss */
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx,
+ struct nic_rss_type rss_type);
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx,
+ struct nic_rss_type *rss_type);
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);
+
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type);
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc);
+
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx);
+
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx);
+
+int hinic_set_rx_mode(void *hwdev, u32 enable);
+
+int hinic_get_mgmt_version(void *hwdev, char *fw);
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en);
+
+int hinic_set_link_status_follow(void *hwdev,
+ enum hinic_link_follow_status status);
+
+int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised);
+
+int hinic_set_xsfp_tx_status(void *hwdev, bool enable);
+
+int hinic_flush_qp_res(void *hwdev);
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz);
+
+int hinic_set_fast_recycle_mode(void *hwdev, u8 mode);
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn);
+
+int hinic_clear_vport_stats(struct hinic_hwdev *hwdev);
+
+int hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id);
+
+int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid,
+ u8 type_enable, bool enable);
+
+int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable,
+ u32 key, bool enable, u8 flag);
+
+int hinic_set_fdir_tcam(void *hwdev, u16 type_mask,
+ struct tag_pa_rule *filter_rule, struct tag_pa_action *filter_action);
+
+int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask);
+
+int hinic_add_tcam_rule(void *hwdev, struct tag_tcam_cfg_rule *tcam_rule);
+
+int hinic_del_tcam_rule(void *hwdev, u32 index);
+
+int hinic_alloc_tcam_block(void *hwdev, u8 block_type, u16 *index);
+
+int hinic_free_tcam_block(void *hwdev, u8 block_type, u16 *index);
+
+int hinic_flush_tcam_rule(void *hwdev);
+
+#endif /* _HINIC_PMD_NICCFG_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c
new file mode 100644
index 000000000..7f7e11dbd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c
@@ -0,0 +1,907 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+#include<rte_bus_pci.h>
+
+#include "hinic_compat.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_wq.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_cmdq.h"
+#include "hinic_pmd_cfg.h"
+#include "hinic_pmd_niccfg.h"
+#include "hinic_pmd_nicio.h"
+
+#define WQ_PREFETCH_MAX 6
+#define WQ_PREFETCH_MIN 1
+#define WQ_PREFETCH_THRESHOLD 256
+
+#define DEFAULT_RX_BUF_SIZE ((u16)0xB)
+
+enum {
+ RECYCLE_MODE_NIC = 0x0,
+ RECYCLE_MODE_DPDK = 0x1,
+};
+
+/* Queue buffer related define */
+enum hinic_rx_buf_size {
+ HINIC_RX_BUF_SIZE_32B = 0x20,
+ HINIC_RX_BUF_SIZE_64B = 0x40,
+ HINIC_RX_BUF_SIZE_96B = 0x60,
+ HINIC_RX_BUF_SIZE_128B = 0x80,
+ HINIC_RX_BUF_SIZE_192B = 0xC0,
+ HINIC_RX_BUF_SIZE_256B = 0x100,
+ HINIC_RX_BUF_SIZE_384B = 0x180,
+ HINIC_RX_BUF_SIZE_512B = 0x200,
+ HINIC_RX_BUF_SIZE_768B = 0x300,
+ HINIC_RX_BUF_SIZE_1K = 0x400,
+ HINIC_RX_BUF_SIZE_1_5K = 0x600,
+ HINIC_RX_BUF_SIZE_2K = 0x800,
+ HINIC_RX_BUF_SIZE_3K = 0xC00,
+ HINIC_RX_BUF_SIZE_4K = 0x1000,
+ HINIC_RX_BUF_SIZE_8K = 0x2000,
+ HINIC_RX_BUF_SIZE_16K = 0x4000,
+};
+
+const u32 hinic_hw_rx_buf_size[] = {
+ HINIC_RX_BUF_SIZE_32B,
+ HINIC_RX_BUF_SIZE_64B,
+ HINIC_RX_BUF_SIZE_96B,
+ HINIC_RX_BUF_SIZE_128B,
+ HINIC_RX_BUF_SIZE_192B,
+ HINIC_RX_BUF_SIZE_256B,
+ HINIC_RX_BUF_SIZE_384B,
+ HINIC_RX_BUF_SIZE_512B,
+ HINIC_RX_BUF_SIZE_768B,
+ HINIC_RX_BUF_SIZE_1K,
+ HINIC_RX_BUF_SIZE_1_5K,
+ HINIC_RX_BUF_SIZE_2K,
+ HINIC_RX_BUF_SIZE_3K,
+ HINIC_RX_BUF_SIZE_4K,
+ HINIC_RX_BUF_SIZE_8K,
+ HINIC_RX_BUF_SIZE_16K,
+};
+
+struct hinic_qp_ctxt_header {
+ u16 num_queues;
+ u16 queue_type;
+ u32 addr_offset;
+};
+
+struct hinic_sq_ctxt {
+ u32 ceq_attr;
+
+ u32 ci_owner;
+
+ u32 wq_pfn_hi;
+ u32 wq_pfn_lo;
+
+ u32 pref_cache;
+ u32 pref_owner;
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 rsvd8;
+ u32 rsvd9;
+
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
+struct hinic_rq_ctxt {
+ u32 ceq_attr;
+
+ u32 pi_intr_attr;
+
+ u32 wq_pfn_hi_ci;
+ u32 wq_pfn_lo;
+
+ u32 pref_cache;
+ u32 pref_owner;
+
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 pi_paddr_hi;
+ u32 pi_paddr_lo;
+
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
+struct hinic_sq_ctxt_block {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_rq_ctxt_block {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_clean_queue_ctxt {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ u32 ctxt_size;
+};
+
+
+static void
+hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
+ enum hinic_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 max_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = ctxt_type;
+ qp_ctxt_hdr->num_queues = num_queues;
+
+ if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
+ qp_ctxt_hdr->addr_offset =
+ SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+ else
+ qp_ctxt_hdr->addr_offset =
+ RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+
+ qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
+
+ hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
+}
+
+static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
+ struct hinic_sq_ctxt *sq_ctxt)
+{
+ struct hinic_wq *wq = sq->wq;
+ u64 wq_page_addr;
+ u64 wq_page_pfn, wq_block_pfn;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (u16)(wq->cons_idx);
+ pi_start = (u16)(wq->prod_idx);
+
+ /* read the first page from the HW table */
+ wq_page_addr = wq->queue_buf_paddr;
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ /* must config as ceq disabled */
+ sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
+ SQ_CTXT_CEQ_ATTR_SET(0, ARM) |
+ SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |
+ SQ_CTXT_CEQ_ATTR_SET(0, EN);
+
+ sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
+ SQ_CTXT_CI_SET(1, OWNER);
+
+ sq_ctxt->wq_pfn_hi =
+ SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
+
+ sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ sq_ctxt->pref_cache =
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ sq_ctxt->pref_owner = 1;
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ SQ_CTXT_PREF_SET(ci_start, CI) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
+
+ sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ sq_ctxt->wq_block_pfn_hi =
+ SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
+}
+
+static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,
+ struct hinic_rq_ctxt *rq_ctxt)
+{
+ struct hinic_wq *wq = rq->wq;
+ u64 wq_page_addr;
+ u64 wq_page_pfn, wq_block_pfn;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (u16)(wq->cons_idx);
+ pi_start = (u16)(wq->prod_idx);
+
+ /* read the first page from the HW table */
+ wq_page_addr = wq->queue_buf_paddr;
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ /* must config as ceq enable but do not generate ceq */
+ rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
+
+ rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
+ RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |
+ RQ_CTXT_PI_SET(0, CEQ_ARM);
+
+ rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
+
+ rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pref_cache =
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ rq_ctxt->pref_owner = 1;
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI);
+
+ rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
+ rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
+
+ rq_ctxt->wq_block_pfn_hi =
+ RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
+
+static int init_sq_ctxts(struct hinic_nic_io *nic_io)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_sq_ctxt_block *sq_ctxt_block;
+ struct hinic_sq_ctxt *sq_ctxt;
+ struct hinic_cmd_buf *cmd_buf;
+ struct hinic_qp *qp;
+ u64 out_param = EIO;
+ u16 q_id, curr_id, global_qpn, max_ctxts, i;
+ int err = 0;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ /* sq and rq number may not equal */
+ while (q_id < nic_io->num_sqs) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?
+ HINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);
+
+ hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
+ nic_io->max_qps, q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ qp = &nic_io->qps[curr_id];
+ global_qpn = nic_io->global_qpn + curr_id;
+
+ hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
+ }
+
+ cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set SQ ctxts, err: %d",
+ err);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic_nic_io *nic_io)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_rq_ctxt_block *rq_ctxt_block;
+ struct hinic_rq_ctxt *rq_ctxt;
+ struct hinic_cmd_buf *cmd_buf;
+ struct hinic_qp *qp;
+ u64 out_param = 0;
+ u16 q_id, curr_id, max_ctxts, i;
+ int err = 0;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ /* sq and rq number may not equal */
+ while (q_id < nic_io->num_rqs) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?
+ HINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);
+
+ hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
+ nic_io->max_qps, q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ qp = &nic_io->qps[curr_id];
+
+ hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
+ }
+
+ cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+ if ((err) || out_param != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set RQ ctxts");
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic_nic_io *nic_io)
+{
+ return (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));
+}
+
+static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
+ enum hinic_qp_ctxt_type ctxt_type)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_clean_queue_ctxt *ctxt_block;
+ struct hinic_cmd_buf *cmd_buf;
+ u64 out_param = 0;
+ int err;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
+ ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+ ctxt_block->cmdq_hdr.addr_offset = 0;
+
+ /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+ ctxt_block->ctxt_size = 0x3;
+
+ hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = sizeof(*ctxt_block);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+
+ if ((err) || (out_param)) {
+ PMD_DRV_LOG(ERR, "Failed to clean queue offload ctxts");
+ err = -EFAULT;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
+{
+ /* clean LRO/TSO context space */
+ return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
+}
+
+/**
+ * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size
+ * @rx_buf_sz: receive buffer size
+ * @return
+ * hw rx buffer size
+ */
+static u16 get_hw_rx_buf_size(u32 rx_buf_sz)
+{
+ u16 num_hw_types = sizeof(hinic_hw_rx_buf_size)
+ / sizeof(hinic_hw_rx_buf_size[0]);
+ u16 i;
+
+ for (i = 0; i < num_hw_types; i++) {
+ if (hinic_hw_rx_buf_size[i] == rx_buf_sz)
+ return i;
+ }
+
+ PMD_DRV_LOG(ERR, "Hw can't support rx buf size of %u", rx_buf_sz);
+
+ return DEFAULT_RX_BUF_SIZE; /* default 2K */
+}
+
+/**
+ * hinic_set_root_ctxt - init root context in NIC
+ * @hwdev: the hardware interface of a nic device
+ * @rq_depth: the depth of receive queue
+ * @sq_depth: the depth of transmit queue
+ * @rx_buf_sz: receive buffer size from app
+ * Return: 0 on success, negative error value otherwise.
+ */
+static int
+hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
+{
+ struct hinic_root_ctxt root_ctxt;
+
+ memset(&root_ctxt, 0, sizeof(root_ctxt));
+ root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ root_ctxt.func_idx = hinic_global_func_id(hwdev);
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+ root_ctxt.lro_en = 1;
+ root_ctxt.rq_depth = (u16)ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
+ root_ctxt.sq_depth = (u16)ilog2(sq_depth);
+
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ NULL, NULL, 0);
+}
+
+/**
+ * hinic_clean_root_ctxt - clean root context table in NIC
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+static int hinic_clean_root_ctxt(void *hwdev)
+{
+ struct hinic_root_ctxt root_ctxt;
+
+ memset(&root_ctxt, 0, sizeof(root_ctxt));
+ root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+ root_ctxt.func_idx = hinic_global_func_id(hwdev);
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+ root_ctxt.lro_en = 0;
+ root_ctxt.rq_depth = 0;
+ root_ctxt.rx_buf_sz = 0;
+ root_ctxt.sq_depth = 0;
+
+ return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ NULL, NULL, 0);
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */
+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_sq_attr sq_attr;
+ u16 q_id;
+ int err, rx_buf_sz;
+
+ /* set vat page size to max queue depth page_size */
+ err = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);
+ if (err != HINIC_OK) {
+ PMD_DRV_LOG(ERR, "Set vat page size: %d failed, rc: %d",
+ HINIC_PAGE_SIZE_DPDK, err);
+ return err;
+ }
+
+ if (hwdev->cmdqs->status & HINIC_CMDQ_SET_FAIL) {
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Reinit cmdq context failed when dev start, err: %d",
+ err);
+ return err;
+ }
+ }
+
+ err = init_qp_ctxts(nic_io);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init QP ctxts failed, rc: %d", err);
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_io);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Clean qp offload ctxts failed, rc: %d", err);
+ return err;
+ }
+
+ rx_buf_sz = nic_io->rq_buf_size;
+
+ /* update rx buf size to function table */
+ err = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d", err);
+ return err;
+ }
+
+ err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
+ nic_io->sq_depth, rx_buf_sz);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set root context failed, rc: %d", err);
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_sqs; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
+ /* performance: sq ci update threshold as 8 */
+ sq_attr.pending_limit = 1;
+ sq_attr.coalescing_time = 1;
+ sq_attr.intr_en = 0;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set ci table failed, rc: %d", err);
+ goto set_cons_idx_table_err;
+ }
+ }
+
+ return 0;
+
+set_cons_idx_table_err:
+ (void)hinic_clean_root_ctxt(hwdev);
+ return err;
+}
+
+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_clean_root_ctxt(hwdev);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to clean root ctxt");
+}
+
+static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ u16 global_qpn, rx_buf_sz;
+ int err;
+
+ err = hinic_get_base_qpn(hwdev, &global_qpn);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to get base qpn");
+ goto err_init_nic_hwdev;
+ }
+
+ nic_io->global_qpn = global_qpn;
+ rx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;
+ err = hinic_init_function_table(hwdev, rx_buf_sz);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init function table");
+ goto err_init_nic_hwdev;
+ }
+
+ err = hinic_vf_func_init(hwdev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init nic mbox");
+ goto err_init_nic_hwdev;
+ }
+
+ err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to set fast recycle mode");
+ goto err_init_nic_hwdev;
+ }
+
+ return 0;
+
+err_init_nic_hwdev:
+ return err;
+}
+
+static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)
+{
+ hinic_vf_func_free(hwdev);
+ hwdev->nic_io = NULL;
+}
+
+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ return hinic_func_rx_tx_flush(hwdev);
+}
+
+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_wq *wq = &nic_io->sq_wq[q_id];
+
+ return (wq->delta) - 1;
+}
+
+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_wq *wq = &nic_io->rq_wq[q_id];
+
+ return (wq->delta) - 1;
+}
+
+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_wq *wq = &nic_io->sq_wq[q_id];
+
+ return (wq->cons_idx) & wq->mask;
+}
+
+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
+ int num_wqebbs, u16 owner)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ if (owner != sq->owner)
+ sq->owner = owner;
+
+ sq->wq->delta += num_wqebbs;
+ sq->wq->prod_idx -= num_wqebbs;
+}
+
+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,
+ u16 q_id, int wqebb_cnt)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ hinic_put_wqe(sq->wq, wqebb_cnt);
+}
+
+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ return hinic_get_wqe(rq->wq, 1, pi);
+}
+
+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ rq->wq->delta += num_wqebbs;
+ rq->wq->prod_idx -= num_wqebbs;
+}
+
+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_wq *wq = &nic_io->rq_wq[q_id];
+
+ return (wq->cons_idx) & wq->mask;
+}
+
+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ hinic_put_wqe(rq->wq, wqe_cnt);
+}
+
+static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct rte_pci_device *pdev = hwdev->pcidev_hdl;
+ u16 max_qps, num_qp;
+ int err;
+
+ max_qps = hinic_func_max_qnum(hwdev);
+ if ((max_qps & (max_qps - 1))) {
+ PMD_DRV_LOG(ERR, "Wrong number of max_qps: %d",
+ max_qps);
+ return -EINVAL;
+ }
+
+ nic_io->max_qps = max_qps;
+ nic_io->num_qps = max_qps;
+ num_qp = max_qps;
+
+ nic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),
+ GFP_KERNEL);
+ if (!nic_io->qps) {
+ PMD_DRV_LOG(ERR, "Failed to allocate qps");
+ err = -ENOMEM;
+ goto alloc_qps_err;
+ }
+
+ nic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev,
+ CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
+ &nic_io->ci_dma_base,
+ pdev->device.numa_node);
+ if (!nic_io->ci_vaddr_base) {
+ PMD_DRV_LOG(ERR, "Failed to allocate ci area");
+ err = -ENOMEM;
+ goto ci_base_err;
+ }
+
+ nic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),
+ GFP_KERNEL);
+ if (!nic_io->sq_wq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate sq wq array");
+ err = -ENOMEM;
+ goto sq_wq_err;
+ }
+
+ nic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),
+ GFP_KERNEL);
+ if (!nic_io->rq_wq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate rq wq array");
+ err = -ENOMEM;
+ goto rq_wq_err;
+ }
+
+ return HINIC_OK;
+
+rq_wq_err:
+ kfree(nic_io->sq_wq);
+
+sq_wq_err:
+ dma_free_coherent(hwdev, CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ci_base_err:
+ kfree(nic_io->qps);
+
+alloc_qps_err:
+ return err;
+}
+
+static void hinic_free_nicio(struct hinic_hwdev *hwdev)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+
+ /* nic_io->rq_wq */
+ kfree(nic_io->rq_wq);
+
+ /* nic_io->sq_wq */
+ kfree(nic_io->sq_wq);
+
+ /* nic_io->ci_vaddr_base */
+ dma_free_coherent(hwdev,
+ CI_TABLE_SIZE(nic_io->max_qps, HINIC_PAGE_SIZE),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ /* nic_io->qps */
+ kfree(nic_io->qps);
+}
+
+/* alloc nic hwdev and init function table */
+int hinic_init_nicio(struct hinic_hwdev *hwdev)
+{
+ int rc;
+
+ hwdev->nic_io = rte_zmalloc("hinic_nicio", sizeof(*hwdev->nic_io),
+ RTE_CACHE_LINE_SIZE);
+ if (!hwdev->nic_io) {
+ PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
+ hwdev->pcidev_hdl->name);
+ return -ENOMEM;
+ }
+ hwdev->nic_io->hwdev = hwdev;
+
+ /* alloc root working queue set */
+ rc = hinic_alloc_nicio(hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
+ hwdev->pcidev_hdl->name);
+ goto allc_nicio_fail;
+ }
+
+ rc = hinic_init_nic_hwdev(hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize hwdev failed, dev_name: %s",
+ hwdev->pcidev_hdl->name);
+ goto init_nic_hwdev_fail;
+ }
+
+ return 0;
+
+init_nic_hwdev_fail:
+ hinic_free_nicio(hwdev);
+
+allc_nicio_fail:
+ rte_free(hwdev->nic_io);
+ return rc;
+}
+
+void hinic_deinit_nicio(struct hinic_hwdev *hwdev)
+{
+ hinic_free_nicio(hwdev);
+
+ hinic_free_nic_hwdev(hwdev);
+
+ rte_free(hwdev->nic_io);
+ hwdev->nic_io = NULL;
+}
+
+/**
+ * hinic_convert_rx_buf_size - convert rx buffer size to hw size
+ * @rx_buf_sz: receive buffer size of mbuf
+ * @match_sz: receive buffer size of hardware
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz)
+{
+ u32 i, num_hw_types, best_match_sz;
+
+ if (unlikely(!match_sz || rx_buf_sz < HINIC_RX_BUF_SIZE_32B))
+ return -EINVAL;
+
+ if (rx_buf_sz >= HINIC_RX_BUF_SIZE_16K) {
+ best_match_sz = HINIC_RX_BUF_SIZE_16K;
+ goto size_matched;
+ }
+
+ num_hw_types = sizeof(hinic_hw_rx_buf_size) /
+ sizeof(hinic_hw_rx_buf_size[0]);
+ best_match_sz = hinic_hw_rx_buf_size[0];
+ for (i = 0; i < num_hw_types; i++) {
+ if (rx_buf_sz == hinic_hw_rx_buf_size[i]) {
+ best_match_sz = hinic_hw_rx_buf_size[i];
+ break;
+ } else if (rx_buf_sz < hinic_hw_rx_buf_size[i]) {
+ break;
+ }
+ best_match_sz = hinic_hw_rx_buf_size[i];
+ }
+
+size_matched:
+ *match_sz = best_match_sz;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h
new file mode 100644
index 000000000..9a487d024
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_NICIO_H_
+#define _HINIC_PMD_NICIO_H_
+
+#define RX_BUF_LEN_16K 16384
+#define RX_BUF_LEN_1_5K 1536
+
+#define HINIC_Q_CTXT_MAX 42
+
+/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */
+#define HINIC_CI_Q_ADDR_SIZE 64
+
+#define CI_TABLE_SIZE(num_qps, pg_sz) \
+ (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz))
+
+#define HINIC_CI_VADDR(base_addr, q_id) \
+ ((u8 *)(base_addr) + (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define HINIC_CI_PADDR(base_paddr, q_id) \
+ ((base_paddr) + (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define Q_CTXT_SIZE 48
+#define TSO_LRO_CTXT_SIZE 240
+
+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE + \
+ (q_id) * Q_CTXT_SIZE)
+
+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE + \
+ (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE)
+
+#define SQ_CTXT_SIZE(num_sqs) \
+ ((u16)(sizeof(struct hinic_qp_ctxt_header) + \
+ (num_sqs) * sizeof(struct hinic_sq_ctxt)))
+
+#define RQ_CTXT_SIZE(num_rqs) \
+ ((u16)(sizeof(struct hinic_qp_ctxt_header) + \
+ (num_rqs) * sizeof(struct hinic_rq_ctxt)))
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT 8
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
+#define SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
+#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT 31
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK 0x1FU
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FFU
+#define SQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+#define SQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U
+
+#define SQ_CTXT_CEQ_ATTR_SET(val, member) \
+ (((val) & SQ_CTXT_CEQ_ATTR_##member##_MASK) << \
+ SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define SQ_CTXT_CI_IDX_SHIFT 11
+#define SQ_CTXT_CI_OWNER_SHIFT 23
+
+#define SQ_CTXT_CI_IDX_MASK 0xFFFU
+#define SQ_CTXT_CI_OWNER_MASK 0x1U
+
+#define SQ_CTXT_CI_SET(val, member) \
+ (((val) & SQ_CTXT_CI_##member##_MASK) << SQ_CTXT_CI_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define SQ_CTXT_WQ_PAGE_PI_SHIFT 20
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define SQ_CTXT_WQ_PAGE_PI_MASK 0xFFFU
+
+#define SQ_CTXT_WQ_PAGE_SET(val, member) \
+ (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) << \
+ SQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define SQ_CTXT_PREF_CI_SHIFT 20
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define SQ_CTXT_PREF_CI_MASK 0xFFFU
+
+#define SQ_CTXT_PREF_SET(val, member) \
+ (((val) & SQ_CTXT_PREF_##member##_MASK) << \
+ SQ_CTXT_PREF_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+ SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
+#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT 1
+
+#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+#define RQ_CTXT_CEQ_ATTR_OWNER_MASK 0x1U
+
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) << \
+ RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define RQ_CTXT_PI_IDX_SHIFT 0
+#define RQ_CTXT_PI_INTR_SHIFT 22
+#define RQ_CTXT_PI_CEQ_ARM_SHIFT 31
+
+#define RQ_CTXT_PI_IDX_MASK 0xFFFU
+#define RQ_CTXT_PI_INTR_MASK 0x3FFU
+#define RQ_CTXT_PI_CEQ_ARM_MASK 0x1U
+
+#define RQ_CTXT_PI_SET(val, member) \
+ (((val) & RQ_CTXT_PI_##member##_MASK) << RQ_CTXT_PI_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define RQ_CTXT_WQ_PAGE_CI_SHIFT 20
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define RQ_CTXT_WQ_PAGE_CI_MASK 0xFFFU
+
+#define RQ_CTXT_WQ_PAGE_SET(val, member) \
+ (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \
+ RQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define RQ_CTXT_PREF_CI_SHIFT 20
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define RQ_CTXT_PREF_CI_MASK 0xFFFU
+
+#define RQ_CTXT_PREF_SET(val, member) \
+ (((val) & RQ_CTXT_PREF_##member##_MASK) << \
+ RQ_CTXT_PREF_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+ RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4)
+
+enum hinic_qp_ctxt_type {
+ HINIC_QP_CTXT_TYPE_SQ,
+ HINIC_QP_CTXT_TYPE_RQ,
+};
+
+struct hinic_sq {
+ struct hinic_wq *wq;
+ volatile u16 *cons_idx_addr;
+ void __iomem *db_addr;
+
+ u16 q_id;
+ u16 owner;
+ u16 sq_depth;
+};
+
+struct hinic_rq {
+ struct hinic_wq *wq;
+ volatile u16 *pi_virt_addr;
+ dma_addr_t pi_dma_addr;
+
+ u16 irq_id;
+ u16 msix_entry_idx;
+ u16 q_id;
+ u16 rq_depth;
+};
+
+struct hinic_qp {
+ struct hinic_sq sq;
+ struct hinic_rq rq;
+};
+
+struct hinic_event {
+ void (*tx_ack)(void *handle, u16 q_id);
+ /* status: 0 - link down; 1 - link up */
+ void (*link_change)(void *handle, int status);
+};
+
+struct hinic_nic_io {
+ struct hinic_hwdev *hwdev;
+
+ u16 global_qpn;
+
+ struct hinic_wq *sq_wq;
+ struct hinic_wq *rq_wq;
+
+ u16 max_qps;
+ u16 num_qps;
+
+ u16 num_sqs;
+ u16 num_rqs;
+
+ u16 sq_depth;
+ u16 rq_depth;
+
+ u16 rq_buf_size;
+ u16 vhd_mode;
+
+ struct hinic_qp *qps;
+ /* sq ci mem base addr of the function */
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ struct hinic_event event;
+ void *event_handle;
+};
+
+struct hinic_sq_db {
+ u32 db_info;
+};
+
+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev);
+
+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev);
+
+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev);
+
+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);
+
+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);
+
+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id,
+ int wqebb_cnt);
+
+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
+ int num_wqebbs, u16 owner);
+
+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);
+
+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi);
+
+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs);
+
+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);
+
+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt);
+
+int hinic_init_nicio(struct hinic_hwdev *hwdev);
+
+void hinic_deinit_nicio(struct hinic_hwdev *hwdev);
+
+int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz);
+
+#endif /* _HINIC_PMD_NICIO_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c
new file mode 100644
index 000000000..345248c3e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_wq.h"
+
+static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
+{
+ dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,
+ (dma_addr_t)wq->queue_buf_paddr);
+
+ wq->queue_buf_paddr = 0;
+ wq->queue_buf_vaddr = 0;
+}
+
+static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
+ unsigned int socket_id)
+{
+ dma_addr_t dma_addr = 0;
+
+ wq->queue_buf_vaddr = (u64)(u64 *)
+ dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,
+ &dma_addr, socket_id);
+ if (!wq->queue_buf_vaddr) {
+ PMD_DRV_LOG(ERR, "Failed to allocate wq page");
+ return -ENOMEM;
+ }
+
+ if (!ADDR_256K_ALIGNED(dma_addr)) {
+ PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!");
+ dma_free_coherent(hwdev, wq->wq_buf_size,
+ (void *)wq->queue_buf_vaddr,
+ dma_addr);
+ return -ENOMEM;
+ }
+ wq->queue_buf_paddr = dma_addr;
+
+ return 0;
+}
+
+int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
+ u32 wqebb_shift, u16 q_depth, unsigned int socket_id)
+{
+ int err;
+
+ if (q_depth & (q_depth - 1)) {
+ PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2");
+ return -EINVAL;
+ }
+
+ wq->wqebb_size = 1 << wqebb_shift;
+ wq->wqebb_shift = wqebb_shift;
+ wq->wq_buf_size = ((u32)q_depth) << wqebb_shift;
+ wq->q_depth = q_depth;
+
+ if (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {
+ PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can not hold",
+ q_depth);
+ return -EINVAL;
+ }
+
+ err = alloc_wq_pages(hwdev, wq, socket_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
+ return err;
+ }
+
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+ wq->delta = q_depth;
+ wq->mask = q_depth - 1;
+
+ return 0;
+}
+
+void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
+{
+ free_wq_pages(hwdev, wq);
+}
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+ wq->delta += num_wqebbs;
+}
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
+{
+ u16 curr_cons_idx;
+
+ if ((wq->delta + num_wqebbs) > wq->q_depth)
+ return NULL;
+
+ curr_cons_idx = (u16)(wq->cons_idx);
+
+ curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+
+ *cons_idx = curr_cons_idx;
+
+ return WQ_WQE_ADDR(wq, (u32)(*cons_idx));
+}
+
+int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,
+ int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
+ u16 q_depth)
+{
+ int i, j, err = -ENOMEM;
+
+ /* validate q_depth is power of 2 & wqebb_size is not 0 */
+ for (i = 0; i < cmdq_blocks; i++) {
+ wq[i].wqebb_size = 1 << wqebb_shift;
+ wq[i].wqebb_shift = wqebb_shift;
+ wq[i].wq_buf_size = wq_buf_size;
+ wq[i].q_depth = q_depth;
+
+ err = alloc_wq_pages(hwdev, &wq[i], SOCKET_ID_ANY);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
+ goto cmdq_block_err;
+ }
+
+ wq[i].cons_idx = 0;
+ wq[i].prod_idx = 0;
+ wq[i].delta = q_depth;
+
+ wq[i].mask = q_depth - 1;
+ }
+
+ return 0;
+
+cmdq_block_err:
+ for (j = 0; j < i; j++)
+ free_wq_pages(hwdev, &wq[j]);
+
+ return err;
+}
+
+void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
+ int cmdq_blocks)
+{
+ int i;
+
+ for (i = 0; i < cmdq_blocks; i++)
+ free_wq_pages(hwdev, &wq[i]);
+}
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
+{
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);
+}
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
+{
+ u16 curr_prod_idx;
+
+ wq->delta -= num_wqebbs;
+ curr_prod_idx = wq->prod_idx;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+ return WQ_WQE_ADDR(wq, (u32)(*prod_idx));
+}
+
+/**
+ * hinic_set_sge - set dma area in scatter gather entry
+ * @sge: scatter gather entry
+ * @addr: dma address
+ * @len: length of relevant data in the dma address
+ **/
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)
+{
+ sge->hi_addr = upper_32_bits(addr);
+ sge->lo_addr = lower_32_bits(addr);
+ sge->len = len;
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h
new file mode 100644
index 000000000..354d0338d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_WQ_H_
+#define _HINIC_PMD_WQ_H_
+
+#define WQS_BLOCKS_PER_PAGE 4
+
+#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size)
+
+#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
+ ((wq)->num_q_pages - 1))
+
+#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
+ ((idx) & ((wq)->num_wqebbs_per_page - 1)))
+
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_PAGE_ADDR_SIZE_SHIFT 3
+#define WQ_PAGE_ADDR(wq, idx) \
+ (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
+ (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))
+
+#define WQ_BLOCK_SIZE 4096UL
+#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
+#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
+
+#define CMDQ_BLOCKS_PER_PAGE 8
+#define CMDQ_BLOCK_SIZE 512UL
+#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \
+ CMDQ_BLOCK_SIZE), PAGE_SIZE)
+
+#define ADDR_4K_ALIGNED(addr) (0 == ((addr) & 0xfff))
+#define ADDR_256K_ALIGNED(addr) (0 == ((addr) & 0x3ffff))
+
+#define WQ_BASE_VADDR(wqs, wq) \
+ (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ + (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_ADDR(wqs, wq) \
+ (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
+ (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
+ (((u64)((cmdq_pages)->cmdq_page_paddr)) \
+ + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
+ (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
+
+#define WQE_SHADOW_PAGE(wq, wqe) \
+ (u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
+ / (wq)->max_wqe_size)
+
+#define WQE_IN_RANGE(wqe, start, end) \
+ (((unsigned long)(wqe) >= (unsigned long)(start)) && \
+ ((unsigned long)(wqe) < (unsigned long)(end)))
+
+#define WQ_NUM_PAGES(num_wqs) \
+ (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
+
+#define WQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \
+ ((idx) << (wq)->wqebb_shift)))
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
+
+#define HINIC_SQ_WQEBB_SIZE 64
+#define HINIC_RQ_WQE_SIZE 32
+#define HINIC_SQ_WQEBB_SHIFT 6
+#define HINIC_RQ_WQEBB_SHIFT 5
+
+struct hinic_sge {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+};
+
+/* Working Queue */
+struct hinic_wq {
+ /* The addresses are 64 bit in the HW */
+ u64 queue_buf_vaddr;
+
+ u16 q_depth;
+ u16 mask;
+ u32 delta;
+
+ u32 cons_idx;
+ u32 prod_idx;
+
+ u64 queue_buf_paddr;
+
+ u32 wqebb_size;
+ u32 wqebb_shift;
+
+ u32 wq_buf_size;
+
+ u32 rsvd[5];
+};
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);
+
+int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,
+ int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
+ u16 q_depth);
+
+void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
+ int cmdq_blocks);
+
+int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
+ u32 wqebb_shift, u16 q_depth, unsigned int socket_id);
+
+void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq);
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);
+
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);
+
+#endif /* _HINIC_PMD_WQ_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/base/meson.build b/src/spdk/dpdk/drivers/net/hinic/base/meson.build
new file mode 100644
index 000000000..6cf947f84
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/base/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Huawei Technologies Co., Ltd
+
+sources = [
+ 'hinic_pmd_api_cmd.c',
+ 'hinic_pmd_cfg.c',
+ 'hinic_pmd_cmdq.c',
+ 'hinic_pmd_eqs.c',
+ 'hinic_pmd_hwdev.c',
+ 'hinic_pmd_hwif.c',
+ 'hinic_pmd_mgmt.c',
+ 'hinic_pmd_niccfg.c',
+ 'hinic_pmd_nicio.c',
+ 'hinic_pmd_wq.c',
+ 'hinic_pmd_mbox.c',
+]
+
+extra_flags = []
+# The driver runs only on arch64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+deps += ['hash']
+
+c_args = cflags
+
+base_lib = static_library('hinic_base', sources,
+ dependencies: [static_rte_eal, static_rte_ethdev, static_rte_bus_pci, static_rte_hash],
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c
new file mode 100644
index 000000000..2f0f33a8d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -0,0 +1,3257 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev_pci.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_errno.h>
+#include <rte_ether.h>
+
+#include "base/hinic_compat.h"
+#include "base/hinic_pmd_hwdev.h"
+#include "base/hinic_pmd_hwif.h"
+#include "base/hinic_pmd_wq.h"
+#include "base/hinic_pmd_cfg.h"
+#include "base/hinic_pmd_mgmt.h"
+#include "base/hinic_pmd_cmdq.h"
+#include "base/hinic_pmd_niccfg.h"
+#include "base/hinic_pmd_nicio.h"
+#include "base/hinic_pmd_mbox.h"
+#include "hinic_pmd_ethdev.h"
+#include "hinic_pmd_tx.h"
+#include "hinic_pmd_rx.h"
+
+/* Vendor ID used by Huawei devices */
+#define HINIC_HUAWEI_VENDOR_ID 0x19E5
+
+/* Hinic devices */
+#define HINIC_DEV_ID_PRD 0x1822
+#define HINIC_DEV_ID_VF 0x375E
+#define HINIC_DEV_ID_VF_HV 0x379E
+
+/* Mezz card for Blade Server */
+#define HINIC_DEV_ID_MEZZ_25GE 0x0210
+#define HINIC_DEV_ID_MEZZ_100GE 0x0205
+
+/* 2*25G and 2*100G card */
+#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206
+#define HINIC_DEV_ID_1822_100GE 0x0200
+
+#define HINIC_SERVICE_MODE_NIC 2
+
+#define HINIC_INTR_CB_UNREG_MAX_RETRIES 10
+
+#define DEFAULT_BASE_COS 4
+#define NR_MAX_COS 8
+
+#define HINIC_MIN_RX_BUF_SIZE 1024
+#define HINIC_MAX_UC_MAC_ADDRS 128
+#define HINIC_MAX_MC_MAC_ADDRS 2048
+
+#define HINIC_DEFAULT_BURST_SIZE 32
+#define HINIC_DEFAULT_NB_QUEUES 1
+#define HINIC_DEFAULT_RING_SIZE 1024
+#define HINIC_MAX_LRO_SIZE 65536
+
+/*
+ * vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define HINIC_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define HINIC_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+#define HINIC_VLAN_FILTER_EN (1U << 0)
+
+#define HINIC_MTU_TO_PKTLEN(mtu) \
+ ((mtu) + ETH_HLEN + ETH_CRC_LEN)
+
+#define HINIC_PKTLEN_TO_MTU(pktlen) \
+ ((pktlen) - (ETH_HLEN + ETH_CRC_LEN))
+
+/* lro numer limit for one packet */
+#define HINIC_LRO_WQE_NUM_DEFAULT 8
+
+/* Driver-specific log messages type */
+int hinic_logtype;
+
+struct hinic_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ u32 offset;
+};
+
+#define HINIC_FUNC_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .offset = offsetof(struct hinic_vport_stats, _stat_item) \
+}
+
+#define HINIC_PORT_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
+}
+
+static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = {
+ HINIC_FUNC_STAT(tx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(rx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(tx_discard_vport),
+ HINIC_FUNC_STAT(rx_discard_vport),
+ HINIC_FUNC_STAT(tx_err_vport),
+ HINIC_FUNC_STAT(rx_err_vport),
+};
+
+#define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \
+ sizeof(hinic_vport_stats_strings[0]))
+
+static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = {
+ HINIC_PORT_STAT(mac_rx_total_pkt_num),
+ HINIC_PORT_STAT(mac_rx_total_oct_num),
+ HINIC_PORT_STAT(mac_rx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_bad_oct_num),
+ HINIC_PORT_STAT(mac_rx_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_good_oct_num),
+ HINIC_PORT_STAT(mac_rx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_rx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_rx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_oct_num),
+ HINIC_PORT_STAT(mac_tx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_bad_oct_num),
+ HINIC_PORT_STAT(mac_tx_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_good_oct_num),
+ HINIC_PORT_STAT(mac_tx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_tx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_tx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_rx_mac_pause_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_rx_mac_control_pkt_num),
+ HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_trans_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_tx_mac_pause_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_tx_mac_control_pkt_num),
+ HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
+};
+
+#define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \
+ sizeof(hinic_phyport_stats_strings[0]))
+
+static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = {
+ {"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)},
+ {"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)},
+};
+
+#define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \
+ sizeof(hinic_rxq_stats_strings[0]))
+
+static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = {
+ {"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)},
+ {"offload_errors", offsetof(struct hinic_txq_stats, off_errs)},
+ {"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
+ {"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
+ {"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
+ {"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)},
+ {"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)},
+};
+
+#define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
+ sizeof(hinic_txq_stats_strings[0]))
+
+static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev)
+{
+ if (HINIC_IS_VF(nic_dev->hwdev)) {
+ return (HINIC_VPORT_XSTATS_NUM +
+ HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
+ HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
+ } else {
+ return (HINIC_VPORT_XSTATS_NUM +
+ HINIC_PHYPORT_XSTATS_NUM +
+ HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
+ HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
+ }
+}
+
+static const struct rte_eth_desc_lim hinic_rx_desc_lim = {
+ .nb_max = HINIC_MAX_QUEUE_DEPTH,
+ .nb_min = HINIC_MIN_QUEUE_DEPTH,
+ .nb_align = HINIC_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim hinic_tx_desc_lim = {
+ .nb_max = HINIC_MAX_QUEUE_DEPTH,
+ .nb_min = HINIC_MIN_QUEUE_DEPTH,
+ .nb_align = HINIC_TXD_ALIGN,
+};
+
+static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific event.
+ *
+ * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
+ */
+static void hinic_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ if (!hinic_test_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) {
+ PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ return;
+ }
+
+ /* aeq0 msg handler */
+ hinic_dev_handle_aeq_event(nic_dev->hwdev, param);
+}
+
+/**
+ * Ethernet device configuration.
+ *
+ * Prepare the driver for a given number of TX and RX queues, mtu size
+ * and configure RSS.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_dev_configure(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_nic_io *nic_io;
+ int err;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ nic_io = nic_dev->hwdev->nic_io;
+
+ nic_dev->num_sq = dev->data->nb_tx_queues;
+ nic_dev->num_rq = dev->data->nb_rx_queues;
+
+ nic_io->num_sqs = dev->data->nb_tx_queues;
+ nic_io->num_rqs = dev->data->nb_rx_queues;
+
+ /* queue pair is max_num(sq, rq) */
+ nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ?
+ nic_dev->num_sq : nic_dev->num_rq;
+ nic_io->num_qps = nic_dev->num_qps;
+
+ if (nic_dev->num_qps > nic_io->max_qps) {
+ PMD_DRV_LOG(ERR,
+ "Queue number out of range, get queue_num:%d, max_queue_num:%d",
+ nic_dev->num_qps, nic_io->max_qps);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ /* mtu size is 256~9600 */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
+ dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ HINIC_MAX_JUMBO_FRAME_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Max rx pkt len out of range, get max_rx_pkt_len:%d, "
+ "expect between %d and %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
+ return -EINVAL;
+ }
+
+ nic_dev->mtu_size =
+ HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /* rss template */
+ err = hinic_config_mq_mode(dev, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Config multi-queue failed");
+ return err;
+ }
+
+ /* init vlan offoad */
+ err = hinic_vlan_offload_set(dev,
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
+ (void)hinic_config_mq_mode(dev, FALSE);
+ return err;
+ }
+
+ /* clear fdir filter flag in function table */
+ hinic_free_fdir_filter(nic_dev);
+
+ return HINIC_OK;
+}
+
+/**
+ * DPDK callback to create the receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_idx
+ * RX queue index.
+ * @param nb_desc
+ * Number of descriptors for receive queue.
+ * @param socket_id
+ * NUMA socket on which memory must be allocated.
+ * @param rx_conf
+ * Thresholds parameters (unused_).
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ int rc;
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_hwdev *hwdev;
+ struct hinic_rxq *rxq;
+ u16 rq_depth, rx_free_thresh;
+ u32 buf_size;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ hwdev = nic_dev->hwdev;
+
+ /* queue depth must be power of 2, otherwise will be aligned up */
+ rq_depth = (nb_desc & (nb_desc - 1)) ?
+ ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum and minimum.
+ */
+ if (rq_depth > HINIC_MAX_QUEUE_DEPTH ||
+ rq_depth < HINIC_MIN_QUEUE_DEPTH) {
+ PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
+ (int)nb_desc, (int)rq_depth,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * The RX descriptor ring will be cleaned after rxq->rx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free RX
+ * descriptors.
+ * The following constraints must be satisfied:
+ * rx_free_thresh must be greater than 0.
+ * rx_free_thresh must be less than the size of the ring minus 1.
+ * When set to zero use default values.
+ */
+ rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ?
+ rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH);
+ if (rx_free_thresh >= (rq_depth - 1)) {
+ PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)rx_free_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -EINVAL;
+ }
+
+ rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s",
+ queue_idx, dev->data->name);
+ return -ENOMEM;
+ }
+ nic_dev->rxqs[queue_idx] = rxq;
+
+ /* alloc rx sq hw wqe page */
+ rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
+ queue_idx, dev->data->name, rq_depth);
+ goto ceate_rq_fail;
+ }
+
+ /* mbuf pool must be assigned before setup rx resources */
+ rxq->mb_pool = mp;
+
+ rc =
+ hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM, &buf_size);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s",
+ dev->data->name);
+ goto adjust_bufsize_fail;
+ }
+
+ /* rx queue info, rearm control */
+ rxq->wq = &hwdev->nic_io->rq_wq[queue_idx];
+ rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr;
+ rxq->nic_dev = nic_dev;
+ rxq->q_id = queue_idx;
+ rxq->q_depth = rq_depth;
+ rxq->buf_len = (u16)buf_size;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->socket_id = socket_id;
+
+ /* the last point cant do mbuf rearm in bulk */
+ rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
+
+ /* device port identifier */
+ rxq->port_id = dev->data->port_id;
+
+ /* alloc rx_cqe and prepare rq_wqe */
+ rc = hinic_setup_rx_resources(rxq);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s",
+ queue_idx, dev->data->name);
+ goto setup_rx_res_err;
+ }
+
+ /* record nic_dev rxq in rte_eth rx_queues */
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return 0;
+
+setup_rx_res_err:
+adjust_bufsize_fail:
+ hinic_destroy_rq(hwdev, queue_idx);
+
+ceate_rq_fail:
+ rte_free(rxq);
+
+ return rc;
+}
+
+static void hinic_reset_rx_queue(struct rte_eth_dev *dev)
+{
+ struct hinic_rxq *rxq;
+ struct hinic_nic_dev *nic_dev;
+ int q_id = 0;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
+ rxq = dev->data->rx_queues[q_id];
+
+ rxq->wq->cons_idx = 0;
+ rxq->wq->prod_idx = 0;
+ rxq->wq->delta = rxq->q_depth;
+ rxq->wq->mask = rxq->q_depth - 1;
+
+ /* alloc mbuf to rq */
+ hinic_rx_alloc_pkts(rxq);
+ }
+}
+
+/**
+ * DPDK callback to configure the transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_idx
+ * Transmit queue index.
+ * @param nb_desc
+ * Number of descriptors for transmit queue.
+ * @param socket_id
+ * NUMA socket on which memory must be allocated.
+ * @param tx_conf
+ * Tx queue configuration parameters.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ int rc;
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_hwdev *hwdev;
+ struct hinic_txq *txq;
+ u16 sq_depth, tx_free_thresh;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ hwdev = nic_dev->hwdev;
+
+ /* queue depth must be power of 2, otherwise will be aligned up */
+ sq_depth = (nb_desc & (nb_desc - 1)) ?
+ ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum and minimum.
+ */
+ if (sq_depth > HINIC_MAX_QUEUE_DEPTH ||
+ sq_depth < HINIC_MIN_QUEUE_DEPTH) {
+ PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
+ (int)nb_desc, (int)sq_depth,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free TX
+ * descriptors.
+ * The following constraints must be satisfied:
+ * tx_free_thresh must be greater than 0.
+ * tx_free_thresh must be less than the size of the ring minus 1.
+ * When set to zero use default values.
+ */
+ tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH);
+ if (tx_free_thresh >= (sq_depth - 1)) {
+ PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh, (int)dev->data->port_id,
+ (int)queue_idx);
+ return -EINVAL;
+ }
+
+ txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s",
+ queue_idx, dev->data->name);
+ return -ENOMEM;
+ }
+ nic_dev->txqs[queue_idx] = txq;
+
+ /* alloc tx sq hw wqepage */
+ rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
+ queue_idx, dev->data->name, sq_depth);
+ goto create_sq_fail;
+ }
+
+ txq->q_id = queue_idx;
+ txq->q_depth = sq_depth;
+ txq->port_id = dev->data->port_id;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->nic_dev = nic_dev;
+ txq->wq = &hwdev->nic_io->sq_wq[queue_idx];
+ txq->sq = &hwdev->nic_io->qps[queue_idx].sq;
+ txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr;
+ txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq);
+ txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
+ sizeof(struct hinic_sq_bufdesc);
+ txq->cos = nic_dev->default_cos;
+ txq->socket_id = socket_id;
+
+ /* alloc software txinfo */
+ rc = hinic_setup_tx_resources(txq);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s",
+ queue_idx, dev->data->name);
+ goto setup_tx_res_fail;
+ }
+
+ /* record nic_dev txq in rte_eth tx_queues */
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return HINIC_OK;
+
+setup_tx_res_fail:
+ hinic_destroy_sq(hwdev, queue_idx);
+
+create_sq_fail:
+ rte_free(txq);
+
+ return rc;
+}
+
+static void hinic_reset_tx_queue(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_txq *txq;
+ struct hinic_nic_io *nic_io;
+ struct hinic_hwdev *hwdev;
+ volatile u32 *ci_addr;
+ int q_id = 0;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ hwdev = nic_dev->hwdev;
+ nic_io = hwdev->nic_io;
+
+ for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
+ txq = dev->data->tx_queues[q_id];
+
+ txq->wq->cons_idx = 0;
+ txq->wq->prod_idx = 0;
+ txq->wq->delta = txq->q_depth;
+ txq->wq->mask = txq->q_depth - 1;
+
+ /* clear hardware ci */
+ ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base,
+ q_id);
+ *ci_addr = 0;
+ }
+}
+
+/**
+ * Get link speed from NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param speed_capa
+ * Pointer to link speed structure.
+ */
+static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u32 supported_link, advertised_link;
+ int err;
+
+#define HINIC_LINK_MODE_SUPPORT_1G (1U << HINIC_GE_BASE_KX)
+
+#define HINIC_LINK_MODE_SUPPORT_10G (1U << HINIC_10GE_BASE_KR)
+
+#define HINIC_LINK_MODE_SUPPORT_25G ((1U << HINIC_25GE_BASE_KR_S) | \
+ (1U << HINIC_25GE_BASE_CR_S) | \
+ (1U << HINIC_25GE_BASE_KR) | \
+ (1U << HINIC_25GE_BASE_CR))
+
+#define HINIC_LINK_MODE_SUPPORT_40G ((1U << HINIC_40GE_BASE_KR4) | \
+ (1U << HINIC_40GE_BASE_CR4))
+
+#define HINIC_LINK_MODE_SUPPORT_100G ((1U << HINIC_100GE_BASE_KR4) | \
+ (1U << HINIC_100GE_BASE_CR4))
+
+ err = hinic_get_link_mode(nic_dev->hwdev,
+ &supported_link, &advertised_link);
+ if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
+ advertised_link == HINIC_SUPPORTED_UNKNOWN) {
+ PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ } else {
+ *speed_capa = 0;
+ if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
+ *speed_capa |= ETH_LINK_SPEED_1G;
+ if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
+ *speed_capa |= ETH_LINK_SPEED_10G;
+ if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
+ *speed_capa |= ETH_LINK_SPEED_25G;
+ if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
+ *speed_capa |= ETH_LINK_SPEED_40G;
+ if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
+ *speed_capa |= ETH_LINK_SPEED_100G;
+ }
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param info
+ * Pointer to Info structure output buffer.
+ */
+static int
+hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ info->max_rx_queues = nic_dev->nic_cap.max_rqs;
+ info->max_tx_queues = nic_dev->nic_cap.max_sqs;
+ info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE;
+ info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE;
+ info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS;
+ info->min_mtu = HINIC_MIN_MTU_SIZE;
+ info->max_mtu = HINIC_MAX_MTU_SIZE;
+ info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE;
+
+ hinic_get_speed_capa(dev, &info->speed_capa);
+ info->rx_queue_offload_capa = 0;
+ info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_RSS_HASH;
+
+ info->tx_queue_offload_capa = 0;
+ info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ info->hash_key_size = HINIC_RSS_KEY_SIZE;
+ info->reta_size = HINIC_RSS_INDIR_SIZE;
+ info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
+ info->rx_desc_lim = hinic_rx_desc_lim;
+ info->tx_desc_lim = hinic_tx_desc_lim;
+
+ /* Driver-preferred Rx/Tx parameters */
+ info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
+ info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
+ info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
+ info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
+ info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
+ info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
+
+ return 0;
+}
+
+static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
+ int err;
+
+ err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to get fw version");
+ return -EINVAL;
+ }
+
+ if (fw_size < strlen(fw_ver) + 1)
+ return (strlen(fw_ver) + 1);
+
+ snprintf(fw_version, fw_size, "%s", fw_ver);
+
+ return 0;
+}
+
+static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl)
+{
+ int err;
+
+ err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to set rx mode");
+ return -EINVAL;
+ }
+ nic_dev->rx_mode_status = rx_mode_ctrl;
+
+ return 0;
+}
+
+static int hinic_rxtx_configure(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int err;
+
+ /* rx configure, if rss enable, need to init default configuration */
+ err = hinic_rx_configure(dev);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Configure rss failed");
+ return err;
+ }
+
+ /* rx mode init */
+ err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed",
+ HINIC_DEFAULT_RX_MODE);
+ goto set_rx_mode_fail;
+ }
+
+ return HINIC_OK;
+
+set_rx_mode_fail:
+ hinic_rx_remove_configure(dev);
+
+ return err;
+}
+
+static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ (void)hinic_config_rx_mode(nic_dev, 0);
+ hinic_rx_remove_configure(dev);
+}
+
+static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
+ struct rte_eth_link *link)
+{
+ int rc;
+ u8 port_link_status = 0;
+ struct nic_port_info port_link_info;
+ struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
+ uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
+ ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
+ ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
+ ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+
+ rc = hinic_get_link_status(nic_hwdev, &port_link_status);
+ if (rc)
+ return rc;
+
+ if (!port_link_status) {
+ link->link_status = ETH_LINK_DOWN;
+ link->link_speed = 0;
+ link->link_duplex = ETH_LINK_HALF_DUPLEX;
+ link->link_autoneg = ETH_LINK_FIXED;
+ return HINIC_OK;
+ }
+
+ memset(&port_link_info, 0, sizeof(port_link_info));
+ rc = hinic_get_port_info(nic_hwdev, &port_link_info);
+ if (rc)
+ return rc;
+
+ link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX];
+ link->link_duplex = port_link_info.duplex;
+ link->link_autoneg = port_link_info.autoneg_state;
+ link->link_status = port_link_status;
+
+ return HINIC_OK;
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion.
+ *
+ * @return
+ * 0 link status changed, -1 link status not changed
+ */
+static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+#define CHECK_INTERVAL 10 /* 10ms */
+#define MAX_REPEAT_TIME 100 /* 1s (100 * 10ms) in total */
+ int rc = HINIC_OK;
+ struct rte_eth_link link;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ unsigned int rep_cnt = MAX_REPEAT_TIME;
+
+ memset(&link, 0, sizeof(link));
+ do {
+ /* Get link status information from hardware */
+ rc = hinic_priv_get_dev_link_status(nic_dev, &link);
+ if (rc != HINIC_OK) {
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Get link status failed");
+ goto out;
+ }
+
+ if (!wait_to_complete || link.link_status)
+ break;
+
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (rep_cnt--);
+
+out:
+ rc = rte_eth_linkstatus_set(dev, &link);
+ return rc;
+}
+
+/**
+ * DPDK callback to bring the link UP.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int hinic_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int ret;
+
+ ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, true);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Enable port tx xsfp failed, dev_name: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ return ret;
+ }
+
+ /* link status follow phy port status, up will open pma */
+ ret = hinic_set_port_enable(nic_dev->hwdev, true);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to bring the link DOWN.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int hinic_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int ret;
+
+ ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, false);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Disable port tx xsfp failed, dev_name: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ return ret;
+ }
+
+ /* link status follow phy port status, up will close pma */
+ ret = hinic_set_port_enable(nic_dev->hwdev, false);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int hinic_dev_start(struct rte_eth_dev *dev)
+{
+ int rc;
+ char *name;
+ struct hinic_nic_dev *nic_dev;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ name = dev->data->name;
+
+ /* reset rx and tx queue */
+ hinic_reset_rx_queue(dev);
+ hinic_reset_tx_queue(dev);
+
+ /* get func rx buf size */
+ hinic_get_func_rx_buf_size(nic_dev);
+
+ /* init txq and rxq context */
+ rc = hinic_init_qp_ctxts(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s",
+ name);
+ goto init_qp_fail;
+ }
+
+ /* rss template */
+ rc = hinic_config_mq_mode(dev, TRUE);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s",
+ name);
+ goto cfg_mq_mode_fail;
+ }
+
+ /* set default mtu */
+ rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s",
+ nic_dev->mtu_size, name);
+ goto set_mtu_fail;
+ }
+
+ /* configure rss rx_mode and other rx or tx default feature */
+ rc = hinic_rxtx_configure(dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s",
+ name);
+ goto cfg_rxtx_fail;
+ }
+
+ /* reactive pf status, so that uP report asyn event */
+ hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+
+ /* open virtual port and ready to start packet receiving */
+ rc = hinic_set_vport_enable(nic_dev->hwdev, true);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name);
+ goto en_vport_fail;
+ }
+
+ /* open physical port and start packet receiving */
+ rc = hinic_set_port_enable(nic_dev->hwdev, true);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s",
+ name);
+ goto en_port_fail;
+ }
+
+ /* update eth_dev link status */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ (void)hinic_link_update(dev, 0);
+
+ hinic_set_bit(HINIC_DEV_START, &nic_dev->dev_status);
+
+ return 0;
+
+en_port_fail:
+ (void)hinic_set_vport_enable(nic_dev->hwdev, false);
+
+en_vport_fail:
+ hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ /* Flush tx && rx chip resources in case of set vport fake fail */
+ (void)hinic_flush_qp_res(nic_dev->hwdev);
+ rte_delay_ms(100);
+
+ hinic_remove_rxtx_configure(dev);
+
+cfg_rxtx_fail:
+set_mtu_fail:
+cfg_mq_mode_fail:
+ hinic_free_qp_ctxts(nic_dev->hwdev);
+
+init_qp_fail:
+ hinic_free_all_rx_mbuf(dev);
+ hinic_free_all_tx_mbuf(dev);
+
+ return rc;
+}
+
+/**
+ * DPDK callback to release the receive queue.
+ *
+ * @param queue
+ * Generic receive queue pointer.
+ */
+static void hinic_rx_queue_release(void *queue)
+{
+ struct hinic_rxq *rxq = queue;
+ struct hinic_nic_dev *nic_dev;
+
+ if (!rxq) {
+ PMD_DRV_LOG(WARNING, "Rxq is null when release");
+ return;
+ }
+ nic_dev = rxq->nic_dev;
+
+ /* free rxq_pkt mbuf */
+ hinic_free_all_rx_mbufs(rxq);
+
+ /* free rxq_cqe, rxq_info */
+ hinic_free_rx_resources(rxq);
+
+ /* free root rq wq */
+ hinic_destroy_rq(nic_dev->hwdev, rxq->q_id);
+
+ nic_dev->rxqs[rxq->q_id] = NULL;
+
+ /* free rxq */
+ rte_free(rxq);
+}
+
+/**
+ * DPDK callback to release the transmit queue.
+ *
+ * @param queue
+ * Generic transmit queue pointer.
+ */
+static void hinic_tx_queue_release(void *queue)
+{
+ struct hinic_txq *txq = queue;
+ struct hinic_nic_dev *nic_dev;
+
+ if (!txq) {
+ PMD_DRV_LOG(WARNING, "Txq is null when release");
+ return;
+ }
+ nic_dev = txq->nic_dev;
+
+ /* free txq_pkt mbuf */
+ hinic_free_all_tx_mbufs(txq);
+
+ /* free txq_info */
+ hinic_free_tx_resources(txq);
+
+ /* free root sq wq */
+ hinic_destroy_sq(nic_dev->hwdev, txq->q_id);
+ nic_dev->txqs[txq->q_id] = NULL;
+
+ /* free txq */
+ rte_free(txq);
+}
+
+static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev)
+{
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
+ hinic_destroy_rq(nic_dev->hwdev, q_id);
+}
+
+static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev)
+{
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
+ hinic_destroy_sq(nic_dev->hwdev, q_id);
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void hinic_dev_stop(struct rte_eth_dev *dev)
+{
+ int rc;
+ char *name;
+ uint16_t port_id;
+ struct hinic_nic_dev *nic_dev;
+ struct rte_eth_link link;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ name = dev->data->name;
+ port_id = dev->data->port_id;
+
+ if (!hinic_test_and_clear_bit(HINIC_DEV_START, &nic_dev->dev_status)) {
+ PMD_DRV_LOG(INFO, "Device %s already stopped", name);
+ return;
+ }
+
+ /* just stop phy port and vport */
+ rc = hinic_set_port_enable(nic_dev->hwdev, false);
+ if (rc)
+ PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d",
+ rc, name, port_id);
+
+ rc = hinic_set_vport_enable(nic_dev->hwdev, false);
+ if (rc)
+ PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d",
+ rc, name, port_id);
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ (void)rte_eth_linkstatus_set(dev, &link);
+
+ /* flush pending io request */
+ rc = hinic_rx_tx_flush(nic_dev->hwdev);
+ if (rc)
+ PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d",
+ rc, name, port_id);
+
+ /* clean rss table and rx_mode */
+ hinic_remove_rxtx_configure(dev);
+
+ /* clean root context */
+ hinic_free_qp_ctxts(nic_dev->hwdev);
+
+ hinic_destroy_fdir_filter(dev);
+
+ /* free mbuf */
+ hinic_free_all_rx_mbuf(dev);
+ hinic_free_all_tx_mbuf(dev);
+}
+
+static void hinic_disable_interrupt(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ int ret, retries = 0;
+
+ hinic_clear_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
+
+ /* disable msix interrupt in hardware */
+ hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE);
+
+ /* disable rte interrupt */
+ ret = rte_intr_disable(&pci_dev->intr_handle);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret);
+
+ do {
+ ret =
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ hinic_dev_interrupt_handler, dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret == -EAGAIN) {
+ rte_delay_ms(100);
+ retries++;
+ } else {
+ PMD_DRV_LOG(ERR, "intr callback unregister failed: %d",
+ ret);
+ break;
+ }
+ } while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES);
+
+ if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES)
+ PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries",
+ retries);
+}
+
+static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable)
+{
+ u32 rx_mode_ctrl = nic_dev->rx_mode_status;
+
+ if (enable)
+ rx_mode_ctrl |= HINIC_RX_MODE_PROMISC;
+ else
+ rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC);
+
+ return hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled,
+ * negative error value otherwise.
+ */
+static int
+hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ int i, err, q_num;
+ u64 rx_discards_pmd = 0;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct hinic_vport_stats vport_stats;
+ struct hinic_rxq *rxq = NULL;
+ struct hinic_rxq_stats rxq_stats;
+ struct hinic_txq *txq = NULL;
+ struct hinic_txq_stats txq_stats;
+
+ err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s",
+ nic_dev->proc_dev_name);
+ return err;
+ }
+
+ /* rx queue stats */
+ q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+ for (i = 0; i < q_num; i++) {
+ rxq = nic_dev->rxqs[i];
+ hinic_rxq_get_stats(rxq, &rxq_stats);
+ stats->q_ipackets[i] = rxq_stats.packets;
+ stats->q_ibytes[i] = rxq_stats.bytes;
+ stats->q_errors[i] = rxq_stats.rx_discards;
+
+ stats->ierrors += rxq_stats.errors;
+ rx_discards_pmd += rxq_stats.rx_discards;
+ dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf;
+ }
+
+ /* tx queue stats */
+ q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+ for (i = 0; i < q_num; i++) {
+ txq = nic_dev->txqs[i];
+ hinic_txq_get_stats(txq, &txq_stats);
+ stats->q_opackets[i] = txq_stats.packets;
+ stats->q_obytes[i] = txq_stats.bytes;
+ stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs);
+ }
+
+ /* vport stats */
+ stats->oerrors += vport_stats.tx_discard_vport;
+
+ stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd;
+
+ stats->ipackets = (vport_stats.rx_unicast_pkts_vport +
+ vport_stats.rx_multicast_pkts_vport +
+ vport_stats.rx_broadcast_pkts_vport -
+ rx_discards_pmd);
+
+ stats->opackets = (vport_stats.tx_unicast_pkts_vport +
+ vport_stats.tx_multicast_pkts_vport +
+ vport_stats.tx_broadcast_pkts_vport);
+
+ stats->ibytes = (vport_stats.rx_unicast_bytes_vport +
+ vport_stats.rx_multicast_bytes_vport +
+ vport_stats.rx_broadcast_bytes_vport);
+
+ stats->obytes = (vport_stats.tx_unicast_bytes_vport +
+ vport_stats.tx_multicast_bytes_vport +
+ vport_stats.tx_broadcast_bytes_vport);
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static int hinic_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ int qid;
+ struct hinic_rxq *rxq = NULL;
+ struct hinic_txq *txq = NULL;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int ret;
+
+ ret = hinic_clear_vport_stats(nic_dev->hwdev);
+ if (ret != 0)
+ return ret;
+
+ for (qid = 0; qid < nic_dev->num_rq; qid++) {
+ rxq = nic_dev->rxqs[qid];
+ hinic_rxq_stats_reset(rxq);
+ }
+
+ for (qid = 0; qid < nic_dev->num_sq; qid++) {
+ txq = nic_dev->txqs[qid];
+ hinic_txq_stats_reset(txq);
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static int hinic_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int ret;
+
+ ret = hinic_dev_stats_reset(dev);
+ if (ret != 0)
+ return ret;
+
+ if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) {
+ ret = hinic_clear_phy_port_stats(nic_dev->hwdev);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+ uint64_t random_value;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random_value = rte_rand();
+ memcpy(&mac_addr->addr_bytes[3], &random_value, 3);
+}
+
+/**
+ * Init mac_vlan table in NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success and stats is filled,
+ * negative error value otherwise.
+ */
+static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev)
+{
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+ uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
+ u16 func_id = 0;
+ int rc = 0;
+
+ rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes);
+ if (rc)
+ return rc;
+
+ rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes,
+ &eth_dev->data->mac_addrs[0]);
+ if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[0]))
+ hinic_gen_random_mac_addr(&eth_dev->data->mac_addrs[0]);
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+ rc = hinic_set_mac(nic_dev->hwdev,
+ eth_dev->data->mac_addrs[0].addr_bytes,
+ 0, func_id);
+ if (rc && rc != HINIC_PF_SET_VF_ALREADY)
+ return rc;
+
+ rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &nic_dev->default_addr);
+
+ return 0;
+}
+
+static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev)
+{
+ u16 func_id;
+ u32 i;
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+
+ for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) {
+ if (rte_is_zero_ether_addr(&nic_dev->mc_list[i]))
+ break;
+
+ hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes,
+ 0, func_id);
+ memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr));
+ }
+}
+
+/**
+ * Deinit mac_vlan table in NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success and stats is filled,
+ * negative error value otherwise.
+ */
+static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
+{
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+ u16 func_id = 0;
+ int rc;
+ int i;
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+
+ for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) {
+ if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[i]))
+ continue;
+
+ rc = hinic_del_mac(nic_dev->hwdev,
+ eth_dev->data->mac_addrs[i].addr_bytes,
+ 0, func_id);
+ if (rc && rc != HINIC_PF_SET_VF_ALREADY)
+ PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s",
+ eth_dev->data->name);
+
+ memset(&eth_dev->data->mac_addrs[i], 0,
+ sizeof(struct rte_ether_addr));
+ }
+
+ /* delete multicast mac addrs */
+ hinic_delete_mc_addr_list(nic_dev);
+}
+
+static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ uint32_t frame_size;
+ int ret = 0;
+
+ PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
+ dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu));
+
+ if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) {
+ PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d",
+ mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE);
+ return -EINVAL;
+ }
+
+ ret = hinic_set_port_mtu(nic_dev->hwdev, mtu);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret);
+ return ret;
+ }
+
+ /* update max frame size */
+ frame_size = HINIC_MTU_TO_PKTLEN(mtu);
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ nic_dev->mtu_size = mtu;
+
+ return ret;
+}
+
+static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev,
+ u16 vlan_id, bool on)
+{
+ u32 vid_idx, vid_bit;
+
+ vid_idx = HINIC_VFTA_IDX(vlan_id);
+ vid_bit = HINIC_VFTA_BIT(vlan_id);
+
+ if (on)
+ nic_dev->vfta[vid_idx] |= vid_bit;
+ else
+ nic_dev->vfta[vid_idx] &= ~vid_bit;
+}
+
+static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev,
+ uint16_t vlan_id)
+{
+ u32 vid_idx, vid_bit;
+
+ vid_idx = HINIC_VFTA_IDX(vlan_id);
+ vid_bit = HINIC_VFTA_BIT(vlan_id);
+
+ return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE;
+}
+
+/**
+ * DPDK callback to set vlan filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param vlan_id
+ * vlan id is used to filter vlan packets
+ * @param enable
+ * enable disable or enable vlan filter function
+ */
+static int hinic_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int enable)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int err = 0;
+ u16 func_id;
+
+ if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+
+ if (enable) {
+ /* If vlanid is already set, just return */
+ if (hinic_find_vlan_filter(nic_dev, vlan_id)) {
+ PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s",
+ vlan_id, nic_dev->proc_dev_name);
+ return 0;
+ }
+
+ err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
+ func_id, TRUE);
+ } else {
+ /* If vlanid can't be found, just return */
+ if (!hinic_find_vlan_filter(nic_dev, vlan_id)) {
+ PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s",
+ vlan_id, nic_dev->proc_dev_name);
+ return 0;
+ }
+
+ err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
+ func_id, FALSE);
+ }
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d",
+ enable ? "Add" : "Remove", func_id, vlan_id, err);
+ return err;
+ }
+
+ hinic_store_vlan_filter(nic_dev, vlan_id, enable);
+
+ PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s",
+ enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name);
+ return 0;
+}
+
+/**
+ * DPDK callback to enable or disable vlan offload.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mask
+ * Definitions used for VLAN setting
+ */
+static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ bool on;
+ int err;
+
+ /* Enable or disable VLAN filter */
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
+ TRUE : FALSE;
+ err = hinic_config_vlan_filter(nic_dev->hwdev, on);
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ PMD_DRV_LOG(WARNING,
+ "Current matching version does not support vlan filter configuration, device: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ } else if (err) {
+ PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d",
+ on ? "enable" : "disable",
+ nic_dev->proc_dev_name,
+ dev->data->port_id, err);
+ return err;
+ }
+
+ PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d",
+ on ? "Enable" : "Disable",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ }
+
+ /* Enable or disable VLAN stripping */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
+ TRUE : FALSE;
+ err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d",
+ on ? "enable" : "disable",
+ nic_dev->proc_dev_name,
+ dev->data->port_id, err);
+ return err;
+ }
+
+ PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d",
+ on ? "Enable" : "Disable",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ PMD_DRV_LOG(ERR, "Don't support vlan qinq, device: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev)
+{
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+ u16 func_id;
+ int i;
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+ for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) {
+ /* If can't find it, continue */
+ if (!hinic_find_vlan_filter(nic_dev, i))
+ continue;
+
+ (void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE);
+ hinic_store_vlan_filter(nic_dev, i, false);
+ }
+}
+
+static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev,
+ bool enable)
+{
+ u32 rx_mode_ctrl = nic_dev->rx_mode_status;
+
+ if (enable)
+ rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL;
+ else
+ rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL);
+
+ return hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
+}
+
+/**
+ * DPDK callback to enable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ int ret = HINIC_OK;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ ret = hinic_set_dev_allmulticast(nic_dev, true);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret);
+ return ret;
+ }
+
+ PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ return 0;
+}
+
+/**
+ * DPDK callback to disable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ int ret = HINIC_OK;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ ret = hinic_set_dev_allmulticast(nic_dev, false);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret);
+ return ret;
+ }
+
+ PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d",
+ nic_dev->proc_dev_name, dev->data->port_id);
+ return 0;
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ int rc = HINIC_OK;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
+ nic_dev->proc_dev_name, dev->data->port_id,
+ dev->data->promiscuous);
+
+ rc = hinic_set_dev_promiscuous(nic_dev, true);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Enable promiscuous failed");
+
+ return rc;
+}
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ int rc = HINIC_OK;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
+ nic_dev->proc_dev_name, dev->data->port_id,
+ dev->data->promiscuous);
+
+ rc = hinic_set_dev_promiscuous(nic_dev, false);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Disable promiscuous failed");
+
+ return rc;
+}
+
+static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct nic_pause_config nic_pause;
+ int err;
+
+ memset(&nic_pause, 0, sizeof(nic_pause));
+
+ err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
+ if (err)
+ return err;
+
+ if (nic_dev->pause_set || !nic_pause.auto_neg) {
+ nic_pause.rx_pause = nic_dev->nic_pause.rx_pause;
+ nic_pause.tx_pause = nic_dev->nic_pause.tx_pause;
+ }
+
+ fc_conf->autoneg = nic_pause.auto_neg;
+
+ if (nic_pause.tx_pause && nic_pause.rx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (nic_pause.tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else if (nic_pause.rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct nic_pause_config nic_pause;
+ int err;
+
+ nic_pause.auto_neg = fc_conf->autoneg;
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ nic_pause.tx_pause = true;
+ else
+ nic_pause.tx_pause = false;
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ nic_pause.rx_pause = true;
+ else
+ nic_pause.rx_pause = false;
+
+ err = hinic_set_pause_config(nic_dev->hwdev, nic_pause);
+ if (err)
+ return err;
+
+ nic_dev->pause_set = true;
+ nic_dev->nic_pause.auto_neg = nic_pause.auto_neg;
+ nic_dev->nic_pause.rx_pause = nic_pause.rx_pause;
+ nic_dev->nic_pause.tx_pause = nic_pause.tx_pause;
+
+ PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n",
+ nic_pause.tx_pause ? "on" : "off",
+ nic_pause.rx_pause ? "on" : "off",
+ nic_pause.auto_neg ? "on" : "off");
+
+ return 0;
+}
+
+/**
+ * DPDK callback to update the RSS hash key and RSS hash type.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+ u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ u64 rss_hf = rss_conf->rss_hf;
+ struct nic_rss_type rss_type = {0};
+ int err = 0;
+
+ if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ PMD_DRV_LOG(WARNING, "RSS is not enabled");
+ return HINIC_OK;
+ }
+
+ if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) {
+ PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d",
+ rss_conf->rss_key_len);
+ return HINIC_ERROR;
+ }
+
+ if (rss_conf->rss_key) {
+ memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
+ err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx,
+ hashkey);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set rss template table failed");
+ goto disable_rss;
+ }
+ }
+
+ rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+ rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+ rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+ rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
+ rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+ rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+ rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+ rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+
+ err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Set rss type table failed");
+ goto disable_rss;
+ }
+
+ return 0;
+
+disable_rss:
+ memset(prio_tc, 0, sizeof(prio_tc));
+ (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
+ return err;
+}
+
+/**
+ * DPDK callback to get the RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_rss_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+ u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
+ struct nic_rss_type rss_type = {0};
+ int err;
+
+ if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ PMD_DRV_LOG(WARNING, "RSS is not enabled");
+ return HINIC_ERROR;
+ }
+
+ err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
+ if (err)
+ return err;
+
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) {
+ memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey));
+ rss_conf->rss_key_len = sizeof(hashkey);
+ }
+
+ err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type);
+ if (err)
+ return err;
+
+ rss_conf->rss_hf = 0;
+ rss_conf->rss_hf |= rss_type.ipv4 ?
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ rss_conf->rss_hf |= rss_type.ipv6 ?
+ (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
+ rss_conf->rss_hf |= rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
+ rss_conf->rss_hf |= rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ rss_conf->rss_hf |= rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+
+ return HINIC_OK;
+}
+
+/**
+ * DPDK callback to update the RSS redirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RSS reta configuration data.
+ * @param reta_size
+ * Size of the RETA table.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
+ int err = 0;
+ u16 i = 0;
+ u16 idx, shift;
+
+ if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
+ return HINIC_OK;
+
+ if (reta_size != NIC_RSS_INDIR_SIZE) {
+ PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
+ return HINIC_ERROR;
+ }
+
+ err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
+ if (err)
+ return err;
+
+ /* update rss indir_tbl */
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+
+ if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
+ PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
+ "exceeds the maximum rxq num: %d", i,
+ reta_conf[idx].reta[shift], nic_dev->num_rq);
+ return -EINVAL;
+ }
+
+ if (reta_conf[idx].mask & (1ULL << shift))
+ indirtbl[i] = reta_conf[idx].reta[shift];
+ }
+
+ err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
+ if (err)
+ goto disable_rss;
+
+ nic_dev->rss_indir_flag = true;
+
+ return 0;
+
+disable_rss:
+ memset(prio_tc, 0, sizeof(prio_tc));
+ (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
+
+ return HINIC_ERROR;
+}
+
+/**
+ * DPDK callback to get the RSS indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RSS reta configuration data.
+ * @param reta_size
+ * Size of the RETA table.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+ int err = 0;
+ u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
+ u16 idx, shift;
+ u16 i = 0;
+
+ if (reta_size != NIC_RSS_INDIR_SIZE) {
+ PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
+ return HINIC_ERROR;
+ }
+
+ err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d",
+ err);
+ return err;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
+ }
+
+ return HINIC_OK;
+}
+
+/**
+ * DPDK callback to get extended device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param xstats
+ * Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
+ *
+ * @return
+ * Number of extended stats on success and stats is filled,
+ * negative error value otherwise.
+ */
+static int hinic_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ u16 qid = 0;
+ u32 i;
+ int err, count;
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_phy_port_stats port_stats;
+ struct hinic_vport_stats vport_stats;
+ struct hinic_rxq *rxq = NULL;
+ struct hinic_rxq_stats rxq_stats;
+ struct hinic_txq *txq = NULL;
+ struct hinic_txq_stats txq_stats;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ count = hinic_xstats_calc_num(nic_dev);
+ if ((int)n < count)
+ return count;
+
+ count = 0;
+
+ /* Get stats from hinic_rxq_stats */
+ for (qid = 0; qid < nic_dev->num_rq; qid++) {
+ rxq = nic_dev->rxqs[qid];
+ hinic_rxq_get_stats(rxq, &rxq_stats);
+
+ for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&rxq_stats) +
+ hinic_rxq_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ /* Get stats from hinic_txq_stats */
+ for (qid = 0; qid < nic_dev->num_sq; qid++) {
+ txq = nic_dev->txqs[qid];
+ hinic_txq_get_stats(txq, &txq_stats);
+
+ for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&txq_stats) +
+ hinic_txq_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ /* Get stats from hinic_vport_stats */
+ err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
+ if (err)
+ return err;
+
+ for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)&vport_stats) +
+ hinic_vport_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ if (HINIC_IS_VF(nic_dev->hwdev))
+ return count;
+
+ /* Get stats from hinic_phy_port_stats */
+ err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats);
+ if (err)
+ return err;
+
+ for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)&port_stats) +
+ hinic_phyport_stats_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
+ return count;
+}
+
+static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hinic_rxq *rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->nb_desc = rxq->q_depth;
+}
+
+static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hinic_txq *txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->q_depth;
+}
+
+/**
+ * DPDK callback to retrieve names of extended device statistics
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param xstats_names
+ * Buffer to insert names into.
+ *
+ * @return
+ * Number of xstats names.
+ */
+static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int count = 0;
+ u16 i = 0, q_num;
+
+ if (xstats_names == NULL)
+ return hinic_xstats_calc_num(nic_dev);
+
+ /* get pmd rxq stats */
+ for (q_num = 0; q_num < nic_dev->num_rq; q_num++) {
+ for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rxq%d_%s_pmd",
+ q_num, hinic_rxq_stats_strings[i].name);
+ count++;
+ }
+ }
+
+ /* get pmd txq stats */
+ for (q_num = 0; q_num < nic_dev->num_sq; q_num++) {
+ for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "txq%d_%s_pmd",
+ q_num, hinic_txq_stats_strings[i].name);
+ count++;
+ }
+ }
+
+ /* get vport stats */
+ for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", hinic_vport_stats_strings[i].name);
+ count++;
+ }
+
+ if (HINIC_IS_VF(nic_dev->hwdev))
+ return count;
+
+ /* get phy port stats */
+ for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", hinic_phyport_stats_strings[i].name);
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * DPDK callback to set mac address
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param addr
+ * Pointer to mac address
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_set_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u16 func_id;
+ int err;
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+ err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes,
+ addr->addr_bytes, 0, func_id);
+ if (err)
+ return err;
+
+ rte_ether_addr_copy(addr, &nic_dev->default_addr);
+
+ PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index, should less than 128.
+ */
+static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u16 func_id;
+ int ret;
+
+ if (index >= HINIC_MAX_UC_MAC_ADDRS) {
+ PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range",
+ index);
+ return;
+ }
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+ ret = hinic_del_mac(nic_dev->hwdev,
+ dev->data->mac_addrs[index].addr_bytes, 0, func_id);
+ if (ret)
+ return;
+
+ memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * Pointer to MAC address
+ * @param index
+ * MAC address index, should less than 128.
+ * @param vmdq
+ * VMDq pool index(not used).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr, uint32_t index,
+ __rte_unused uint32_t vmdq)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ unsigned int i;
+ u16 func_id;
+ int ret;
+
+ if (index >= HINIC_MAX_UC_MAC_ADDRS) {
+ PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index);
+ return -EINVAL;
+ }
+
+ /* First, make sure this address isn't already configured. */
+ for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) {
+ /* Skip this index, it's going to be reconfigured. */
+ if (i == index)
+ continue;
+
+ if (memcmp(&dev->data->mac_addrs[i],
+ mac_addr, sizeof(*mac_addr)))
+ continue;
+
+ PMD_DRV_LOG(INFO, "MAC address already configured");
+ return -EADDRINUSE;
+ }
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+ ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id);
+ if (ret)
+ return ret;
+
+ dev->data->mac_addrs[index] = *mac_addr;
+ return 0;
+}
+
+/**
+ * DPDK callback to set multicast mac address
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mc_addr_set
+ * Pointer to multicast mac address
+ * @param nb_mc_addr
+ * mc addr count
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u16 func_id;
+ int ret;
+ u32 i;
+
+ func_id = hinic_global_func_id(nic_dev->hwdev);
+
+ /* delete old multi_cast addrs firstly */
+ hinic_delete_mc_addr_list(nic_dev);
+
+ if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS)
+ goto allmulti;
+
+ for (i = 0; i < nb_mc_addr; i++) {
+ ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes,
+ 0, func_id);
+ /* if add mc addr failed, set all multi_cast */
+ if (ret) {
+ hinic_delete_mc_addr_list(nic_dev);
+ goto allmulti;
+ }
+
+ rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]);
+ }
+
+ return 0;
+
+allmulti:
+ hinic_dev_allmulticast_enable(dev);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to manage filter control operations
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type, which just supports generic type.
+ * @param filter_op
+ * Filter operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int func_id = hinic_global_func_id(nic_dev->hwdev);
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &hinic_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(INFO, "Filter type (%d) not supported",
+ filter_type);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x,"
+ "filter_op: 0x%x.", func_id, filter_type, filter_op);
+ return 0;
+}
+
+static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev)
+{
+ struct nic_pause_config pause_config = {0};
+ int err;
+
+ pause_config.auto_neg = 0;
+ pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
+ pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
+
+ err = hinic_set_pause_config(nic_dev->hwdev, pause_config);
+ if (err)
+ return err;
+
+ nic_dev->pause_set = true;
+ nic_dev->nic_pause.auto_neg = pause_config.auto_neg;
+ nic_dev->nic_pause.rx_pause = pause_config.rx_pause;
+ nic_dev->nic_pause.tx_pause = pause_config.tx_pause;
+
+ return 0;
+}
+
+static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev)
+{
+ u8 up_tc[HINIC_DCB_UP_MAX] = {0};
+ u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
+ u8 up_bw[HINIC_DCB_UP_MAX] = {0};
+ u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
+ u8 up_strict[HINIC_DCB_UP_MAX] = {0};
+ int i = 0;
+
+ pg_bw[0] = 100;
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++)
+ up_bw[i] = 100;
+
+ return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw,
+ up_pgid, up_bw, up_strict);
+}
+
+static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev)
+{
+ u8 cos_id = 0;
+ int err;
+
+ if (!HINIC_IS_VF(nic_dev->hwdev)) {
+ nic_dev->default_cos =
+ (hinic_global_func_id(nic_dev->hwdev) +
+ DEFAULT_BASE_COS) % NR_MAX_COS;
+ } else {
+ err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d",
+ err);
+ return HINIC_ERROR;
+ }
+
+ nic_dev->default_cos = cos_id;
+ }
+
+ return 0;
+}
+
+static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
+{
+ int err;
+
+ err = hinic_init_default_cos(nic_dev);
+ if (err)
+ return err;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
+ return 0;
+
+ /* Restore DCB configure to default status */
+ err = hinic_set_default_dcb_feature(nic_dev);
+ if (err)
+ return err;
+
+ /* Set pause enable, and up will disable pfc. */
+ err = hinic_set_default_pause_feature(nic_dev);
+ if (err)
+ return err;
+
+ err = hinic_reset_port_link_cfg(nic_dev->hwdev);
+ if (err)
+ return err;
+
+ err = hinic_set_link_status_follow(nic_dev->hwdev,
+ HINIC_LINK_FOLLOW_PORT);
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status");
+ else if (err)
+ return err;
+
+ return hinic_set_anti_attack(nic_dev->hwdev, true);
+}
+
+static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_board_info info = { 0 };
+ int rc;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
+ return 0;
+
+ rc = hinic_get_board_info(nic_dev->hwdev, &info);
+ if (rc)
+ return rc;
+
+ return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK :
+ HINIC_ERROR);
+}
+
+static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev)
+{
+ nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name);
+ if (nic_dev->cpy_mpool == NULL) {
+ nic_dev->cpy_mpool =
+ rte_pktmbuf_pool_create(nic_dev->proc_dev_name,
+ HINIC_COPY_MEMPOOL_DEPTH,
+ 0, 0,
+ HINIC_COPY_MBUF_SIZE,
+ rte_socket_id());
+ if (!nic_dev->cpy_mpool) {
+ PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s",
+ rte_errno, nic_dev->proc_dev_name);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev)
+{
+ if (nic_dev->cpy_mpool != NULL)
+ rte_mempool_free(nic_dev->cpy_mpool);
+}
+
+static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
+{
+ u32 txq_size;
+ u32 rxq_size;
+
+ /* allocate software txq array */
+ txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs);
+ nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL);
+ if (!nic_dev->txqs) {
+ PMD_DRV_LOG(ERR, "Allocate txqs failed");
+ return -ENOMEM;
+ }
+
+ /* allocate software rxq array */
+ rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs);
+ nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL);
+ if (!nic_dev->rxqs) {
+ /* free txqs */
+ kfree(nic_dev->txqs);
+ nic_dev->txqs = NULL;
+
+ PMD_DRV_LOG(ERR, "Allocate rxqs failed");
+ return -ENOMEM;
+ }
+
+ return HINIC_OK;
+}
+
+static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
+{
+ kfree(nic_dev->txqs);
+ nic_dev->txqs = NULL;
+
+ kfree(nic_dev->rxqs);
+ nic_dev->rxqs = NULL;
+}
+
+static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev)
+{
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+ int rc;
+
+ nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev),
+ RTE_CACHE_LINE_SIZE);
+ if (!nic_dev->hwdev) {
+ PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s",
+ eth_dev->data->name);
+ return -ENOMEM;
+ }
+ nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* init osdep*/
+ rc = hinic_osdep_init(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_osdep_fail;
+ }
+
+ /* init_hwif */
+ rc = hinic_hwif_res_init(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_hwif_fail;
+ }
+
+ /* init_cfg_mgmt */
+ rc = init_cfg_mgmt(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_cfgmgnt_fail;
+ }
+
+ /* init_aeqs */
+ rc = hinic_comm_aeqs_init(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_aeqs_fail;
+ }
+
+ /* init_pf_to_mgnt */
+ rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_pf_to_mgmt_fail;
+ }
+
+ /* init mailbox */
+ rc = hinic_comm_func_to_func_init(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_func_to_func_fail;
+ }
+
+ rc = hinic_card_workmode_check(nic_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s",
+ eth_dev->data->name);
+ goto workmode_check_fail;
+ }
+
+ /* do l2nic reset to make chip clear */
+ rc = hinic_l2nic_reset(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s",
+ eth_dev->data->name);
+ goto l2nic_reset_fail;
+ }
+
+ /* init dma and aeq msix attribute table */
+ (void)hinic_init_attr_table(nic_dev->hwdev);
+
+ /* init_cmdqs */
+ rc = hinic_comm_cmdqs_init(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_cmdq_fail;
+ }
+
+ /* set hardware state active */
+ rc = hinic_activate_hwdev_state(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_resources_state_fail;
+ }
+
+ /* init_capability */
+ rc = hinic_init_capability(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_cap_fail;
+ }
+
+ /* get nic capability */
+ if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) {
+ PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s",
+ eth_dev->data->name);
+ rc = -EINVAL;
+ goto nic_check_fail;
+ }
+
+ /* init root cla and function table */
+ rc = hinic_init_nicio(nic_dev->hwdev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_nicio_fail;
+ }
+
+ /* init_software_txrxq */
+ rc = hinic_init_sw_rxtxqs(nic_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_sw_rxtxqs_fail;
+ }
+
+ rc = hinic_copy_mempool_init(nic_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_mpool_fail;
+ }
+
+ /* set hardware feature to default status */
+ rc = hinic_set_default_hw_feature(nic_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s",
+ eth_dev->data->name);
+ goto set_default_hw_feature_fail;
+ }
+
+ return 0;
+
+set_default_hw_feature_fail:
+ hinic_copy_mempool_uninit(nic_dev);
+
+init_mpool_fail:
+ hinic_deinit_sw_rxtxqs(nic_dev);
+
+init_sw_rxtxqs_fail:
+ hinic_deinit_nicio(nic_dev->hwdev);
+
+nic_check_fail:
+init_nicio_fail:
+init_cap_fail:
+ hinic_deactivate_hwdev_state(nic_dev->hwdev);
+
+init_resources_state_fail:
+ hinic_comm_cmdqs_free(nic_dev->hwdev);
+
+init_cmdq_fail:
+l2nic_reset_fail:
+workmode_check_fail:
+ hinic_comm_func_to_func_free(nic_dev->hwdev);
+
+init_func_to_func_fail:
+ hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
+
+init_pf_to_mgmt_fail:
+ hinic_comm_aeqs_free(nic_dev->hwdev);
+
+init_aeqs_fail:
+ free_cfg_mgmt(nic_dev->hwdev);
+
+init_cfgmgnt_fail:
+ hinic_hwif_res_free(nic_dev->hwdev);
+
+init_hwif_fail:
+ hinic_osdep_deinit(nic_dev->hwdev);
+
+init_osdep_fail:
+ rte_free(nic_dev->hwdev);
+ nic_dev->hwdev = NULL;
+
+ return rc;
+}
+
+static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev)
+{
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+
+ (void)hinic_set_link_status_follow(nic_dev->hwdev,
+ HINIC_LINK_FOLLOW_DEFAULT);
+ hinic_copy_mempool_uninit(nic_dev);
+ hinic_deinit_sw_rxtxqs(nic_dev);
+ hinic_deinit_nicio(nic_dev->hwdev);
+ hinic_deactivate_hwdev_state(nic_dev->hwdev);
+ hinic_comm_cmdqs_free(nic_dev->hwdev);
+ hinic_comm_func_to_func_free(nic_dev->hwdev);
+ hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
+ hinic_comm_aeqs_free(nic_dev->hwdev);
+ free_cfg_mgmt(nic_dev->hwdev);
+ hinic_hwif_res_free(nic_dev->hwdev);
+ hinic_osdep_deinit(nic_dev->hwdev);
+ rte_free(nic_dev->hwdev);
+ nic_dev->hwdev = NULL;
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void hinic_dev_close(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ if (hinic_test_and_set_bit(HINIC_DEV_CLOSE, &nic_dev->dev_status)) {
+ PMD_DRV_LOG(WARNING, "Device %s already closed",
+ dev->data->name);
+ return;
+ }
+
+ /* stop device first */
+ hinic_dev_stop(dev);
+
+ /* rx_cqe, rx_info */
+ hinic_free_all_rx_resources(dev);
+
+ /* tx_info */
+ hinic_free_all_tx_resources(dev);
+
+ /* free wq, pi_dma_addr */
+ hinic_free_all_rq(nic_dev);
+
+ /* free wq, db_addr */
+ hinic_free_all_sq(nic_dev);
+
+ /* deinit mac vlan tbl */
+ hinic_deinit_mac_addr(dev);
+ hinic_remove_all_vlanid(dev);
+
+ /* disable hardware and uio interrupt */
+ hinic_disable_interrupt(dev);
+
+ /* deinit nic hardware device */
+ hinic_nic_dev_destroy(dev);
+}
+
+static const struct eth_dev_ops hinic_pmd_ops = {
+ .dev_configure = hinic_dev_configure,
+ .dev_infos_get = hinic_dev_infos_get,
+ .fw_version_get = hinic_fw_version_get,
+ .rx_queue_setup = hinic_rx_queue_setup,
+ .tx_queue_setup = hinic_tx_queue_setup,
+ .dev_start = hinic_dev_start,
+ .dev_set_link_up = hinic_dev_set_link_up,
+ .dev_set_link_down = hinic_dev_set_link_down,
+ .link_update = hinic_link_update,
+ .rx_queue_release = hinic_rx_queue_release,
+ .tx_queue_release = hinic_tx_queue_release,
+ .dev_stop = hinic_dev_stop,
+ .dev_close = hinic_dev_close,
+ .mtu_set = hinic_dev_set_mtu,
+ .vlan_filter_set = hinic_vlan_filter_set,
+ .vlan_offload_set = hinic_vlan_offload_set,
+ .allmulticast_enable = hinic_dev_allmulticast_enable,
+ .allmulticast_disable = hinic_dev_allmulticast_disable,
+ .promiscuous_enable = hinic_dev_promiscuous_enable,
+ .promiscuous_disable = hinic_dev_promiscuous_disable,
+ .flow_ctrl_get = hinic_flow_ctrl_get,
+ .flow_ctrl_set = hinic_flow_ctrl_set,
+ .rss_hash_update = hinic_rss_hash_update,
+ .rss_hash_conf_get = hinic_rss_conf_get,
+ .reta_update = hinic_rss_indirtbl_update,
+ .reta_query = hinic_rss_indirtbl_query,
+ .stats_get = hinic_dev_stats_get,
+ .stats_reset = hinic_dev_stats_reset,
+ .xstats_get = hinic_dev_xstats_get,
+ .xstats_reset = hinic_dev_xstats_reset,
+ .xstats_get_names = hinic_dev_xstats_get_names,
+ .rxq_info_get = hinic_rxq_info_get,
+ .txq_info_get = hinic_txq_info_get,
+ .mac_addr_set = hinic_set_mac_addr,
+ .mac_addr_remove = hinic_mac_addr_remove,
+ .mac_addr_add = hinic_mac_addr_add,
+ .set_mc_addr_list = hinic_set_mc_addr_list,
+ .filter_ctrl = hinic_dev_filter_ctrl,
+};
+
+static const struct eth_dev_ops hinic_pmd_vf_ops = {
+ .dev_configure = hinic_dev_configure,
+ .dev_infos_get = hinic_dev_infos_get,
+ .fw_version_get = hinic_fw_version_get,
+ .rx_queue_setup = hinic_rx_queue_setup,
+ .tx_queue_setup = hinic_tx_queue_setup,
+ .dev_start = hinic_dev_start,
+ .link_update = hinic_link_update,
+ .rx_queue_release = hinic_rx_queue_release,
+ .tx_queue_release = hinic_tx_queue_release,
+ .dev_stop = hinic_dev_stop,
+ .dev_close = hinic_dev_close,
+ .mtu_set = hinic_dev_set_mtu,
+ .vlan_filter_set = hinic_vlan_filter_set,
+ .vlan_offload_set = hinic_vlan_offload_set,
+ .allmulticast_enable = hinic_dev_allmulticast_enable,
+ .allmulticast_disable = hinic_dev_allmulticast_disable,
+ .rss_hash_update = hinic_rss_hash_update,
+ .rss_hash_conf_get = hinic_rss_conf_get,
+ .reta_update = hinic_rss_indirtbl_update,
+ .reta_query = hinic_rss_indirtbl_query,
+ .stats_get = hinic_dev_stats_get,
+ .stats_reset = hinic_dev_stats_reset,
+ .xstats_get = hinic_dev_xstats_get,
+ .xstats_reset = hinic_dev_xstats_reset,
+ .xstats_get_names = hinic_dev_xstats_get_names,
+ .rxq_info_get = hinic_rxq_info_get,
+ .txq_info_get = hinic_txq_info_get,
+ .mac_addr_set = hinic_set_mac_addr,
+ .mac_addr_remove = hinic_mac_addr_remove,
+ .mac_addr_add = hinic_mac_addr_add,
+ .set_mc_addr_list = hinic_set_mc_addr_list,
+ .filter_ctrl = hinic_dev_filter_ctrl,
+};
+
+static int hinic_func_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_ether_addr *eth_addr;
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_filter_info *filter_info;
+ struct hinic_tcam_info *tcam_info;
+ u32 mac_size;
+ int rc;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* EAL is SECONDARY and eth_dev is already created */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_DRV_LOG(INFO, "Initialize %s in secondary process",
+ eth_dev->data->name);
+
+ return 0;
+ }
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+ memset(nic_dev, 0, sizeof(*nic_dev));
+
+ snprintf(nic_dev->proc_dev_name,
+ sizeof(nic_dev->proc_dev_name),
+ "hinic-%.4x:%.2x:%.2x.%x",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ /* alloc mac_addrs */
+ mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr);
+ eth_addr = rte_zmalloc("hinic_mac", mac_size, 0);
+ if (!eth_addr) {
+ PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s",
+ eth_dev->data->name);
+ rc = -ENOMEM;
+ goto eth_addr_fail;
+ }
+ eth_dev->data->mac_addrs = eth_addr;
+
+ mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr);
+ nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0);
+ if (!nic_dev->mc_list) {
+ PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s",
+ eth_dev->data->name);
+ rc = -ENOMEM;
+ goto mc_addr_fail;
+ }
+
+ /*
+ * Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ /* create hardware nic_device */
+ rc = hinic_nic_dev_create(eth_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s",
+ eth_dev->data->name);
+ goto create_nic_dev_fail;
+ }
+
+ if (HINIC_IS_VF(nic_dev->hwdev))
+ eth_dev->dev_ops = &hinic_pmd_vf_ops;
+ else
+ eth_dev->dev_ops = &hinic_pmd_ops;
+
+ rc = hinic_init_mac_addr(eth_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s",
+ eth_dev->data->name);
+ goto init_mac_fail;
+ }
+
+ /* register callback func to eal lib */
+ rc = rte_intr_callback_register(&pci_dev->intr_handle,
+ hinic_dev_interrupt_handler,
+ (void *)eth_dev);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s",
+ eth_dev->data->name);
+ goto reg_intr_cb_fail;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rc = rte_intr_enable(&pci_dev->intr_handle);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s",
+ eth_dev->data->name);
+ goto enable_intr_fail;
+ }
+ hinic_set_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
+
+ /* initialize filter info */
+ filter_info = &nic_dev->filter;
+ tcam_info = &nic_dev->tcam;
+ memset(filter_info, 0, sizeof(struct hinic_filter_info));
+ memset(tcam_info, 0, sizeof(struct hinic_tcam_info));
+ /* initialize 5tuple filter list */
+ TAILQ_INIT(&filter_info->fivetuple_list);
+ TAILQ_INIT(&tcam_info->tcam_list);
+ TAILQ_INIT(&nic_dev->filter_ntuple_list);
+ TAILQ_INIT(&nic_dev->filter_ethertype_list);
+ TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
+ TAILQ_INIT(&nic_dev->hinic_flow_list);
+
+ hinic_set_bit(HINIC_DEV_INIT, &nic_dev->dev_status);
+ PMD_DRV_LOG(INFO, "Initialize %s in primary successfully",
+ eth_dev->data->name);
+
+ return 0;
+
+enable_intr_fail:
+ (void)rte_intr_callback_unregister(&pci_dev->intr_handle,
+ hinic_dev_interrupt_handler,
+ (void *)eth_dev);
+
+reg_intr_cb_fail:
+ hinic_deinit_mac_addr(eth_dev);
+
+init_mac_fail:
+ eth_dev->dev_ops = NULL;
+ hinic_nic_dev_destroy(eth_dev);
+
+create_nic_dev_fail:
+ rte_free(nic_dev->mc_list);
+ nic_dev->mc_list = NULL;
+
+mc_addr_fail:
+ rte_free(eth_addr);
+ eth_dev->data->mac_addrs = NULL;
+
+eth_addr_fail:
+ PMD_DRV_LOG(ERR, "Initialize %s in primary failed",
+ eth_dev->data->name);
+ return rc;
+}
+
+static int hinic_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ (rte_eal_process_type() == RTE_PROC_PRIMARY) ?
+ "primary" : "secondary");
+
+ /* rte_eth_dev rx_burst and tx_burst */
+ eth_dev->rx_pkt_burst = hinic_recv_pkts;
+ eth_dev->tx_pkt_burst = hinic_xmit_pkts;
+
+ return hinic_func_init(eth_dev);
+}
+
+static int hinic_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev;
+
+ nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ hinic_clear_bit(HINIC_DEV_INIT, &nic_dev->dev_status);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ hinic_dev_close(dev);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(nic_dev->mc_list);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ return HINIC_OK;
+}
+
+static struct rte_pci_id pci_id_hinic_map[] = {
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) },
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) },
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) },
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) },
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) },
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) },
+ { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) },
+ {.vendor_id = 0},
+};
+
+static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct hinic_nic_dev), hinic_dev_init);
+}
+
+static int hinic_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit);
+}
+
+static struct rte_pci_driver rte_hinic_pmd = {
+ .id_table = pci_id_hinic_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = hinic_pci_probe,
+ .remove = hinic_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map);
+
+RTE_INIT(hinic_init_log)
+{
+ hinic_logtype = rte_log_register("pmd.net.hinic");
+ if (hinic_logtype >= 0)
+ rte_log_set_level(hinic_logtype, RTE_LOG_INFO);
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h
new file mode 100644
index 000000000..64b2c8105
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_ETHDEV_H_
+#define _HINIC_PMD_ETHDEV_H_
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_core.h>
+
+#include "base/hinic_compat.h"
+#include "base/hinic_pmd_cfg.h"
+
+#define HINIC_DEV_NAME_LEN 32
+#define HINIC_MAX_RX_QUEUES 64
+
+/* mbuf pool for copy invalid mbuf segs */
+#define HINIC_COPY_MEMPOOL_DEPTH 128
+#define HINIC_COPY_MBUF_SIZE 4096
+
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3)
+
+#define HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev) \
+ ((struct hinic_nic_dev *)(dev)->data->dev_private)
+
+#define HINIC_MAX_QUEUE_DEPTH 4096
+#define HINIC_MIN_QUEUE_DEPTH 128
+#define HINIC_TXD_ALIGN 1
+#define HINIC_RXD_ALIGN 1
+
+#define HINIC_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define HINIC_VFTA_SIZE (4096 / HINIC_UINT32_BIT_SIZE)
+
+enum hinic_dev_status {
+ HINIC_DEV_INIT,
+ HINIC_DEV_CLOSE,
+ HINIC_DEV_START,
+ HINIC_DEV_INTR_EN,
+};
+
+#define HINIC_MAX_Q_FILTERS 64 /* hinic just support 64 filter types */
+#define HINIC_PKT_TYPE_FIND_ID(pkt_type) ((pkt_type) - HINIC_MAX_Q_FILTERS)
+
+/* 5tuple filter info */
+struct hinic_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ uint8_t proto; /* l4 protocol. */
+ /*
+ * seven levels (001b-111b), 111b is highest,
+ * used when more than one filter matches.
+ */
+ uint8_t priority;
+
+ /* if mask is 1b, do not compare the response bit domain */
+ uint8_t dst_ip_mask:1,
+ src_ip_mask:1,
+ dst_port_mask:1,
+ src_port_mask:1,
+ proto_mask:1;
+};
+
+/* 5tuple filter structure */
+struct hinic_5tuple_filter {
+ TAILQ_ENTRY(hinic_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct hinic_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+TAILQ_HEAD(hinic_5tuple_filter_list, hinic_5tuple_filter);
+
+/*
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+struct hinic_pkt_filter {
+ uint16_t pkt_proto;
+ uint8_t qid;
+ bool enable;
+};
+
+/* Structure to store filters' info. */
+struct hinic_filter_info {
+ uint8_t pkt_type;
+ uint8_t qid;
+ uint64_t type_mask; /* Bit mask for every used filter */
+ struct hinic_5tuple_filter_list fivetuple_list;
+ struct hinic_pkt_filter pkt_filters[HINIC_MAX_Q_FILTERS];
+};
+
+/* Information about the fdir mode. */
+struct hinic_hw_fdir_mask {
+ uint32_t src_ipv4_mask;
+ uint32_t dst_ipv4_mask;
+ uint16_t src_port_mask;
+ uint16_t dst_port_mask;
+ uint16_t proto_mask;
+ uint16_t tunnel_flag;
+ uint16_t tunnel_inner_src_port_mask;
+ uint16_t tunnel_inner_dst_port_mask;
+ uint16_t dst_ipv6_mask;
+};
+
+/* Flow Director attribute */
+struct hinic_atr_input {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint16_t proto;
+ uint16_t tunnel_flag;
+ uint16_t tunnel_inner_src_port;
+ uint16_t tunnel_inner_dst_port;
+ uint8_t dst_ipv6[16];
+};
+
+enum hinic_fdir_mode {
+ HINIC_FDIR_MODE_NORMAL = 0,
+ HINIC_FDIR_MODE_TCAM = 1,
+};
+
+#define HINIC_PF_MAX_TCAM_FILTERS 1024
+#define HINIC_VF_MAX_TCAM_FILTERS 128
+#define HINIC_SUPPORT_PF_MAX_NUM 4
+#define HINIC_TOTAL_PF_MAX_NUM 16
+#define HINIC_SUPPORT_VF_MAX_NUM 32
+#define HINIC_TCAM_BLOCK_TYPE_PF 0 /* 1024 tcam index of a block */
+#define HINIC_TCAM_BLOCK_TYPE_VF 1 /* 128 tcam index of a block */
+
+#define HINIC_PKT_VF_TCAM_INDEX_START(block_index) \
+ (HINIC_PF_MAX_TCAM_FILTERS * HINIC_SUPPORT_PF_MAX_NUM + \
+ HINIC_VF_MAX_TCAM_FILTERS * (block_index))
+
+TAILQ_HEAD(hinic_tcam_filter_list, hinic_tcam_filter);
+
+struct hinic_tcam_info {
+ struct hinic_tcam_filter_list tcam_list;
+ u8 tcam_index_array[HINIC_PF_MAX_TCAM_FILTERS];
+ u16 tcam_block_index;
+ u16 tcam_rule_nums;
+};
+
+struct tag_tcam_key_mem {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+
+ u32 rsvd0:16;
+ u32 function_id:16;
+
+ u32 protocol:8;
+ /*
+ * tunnel packet, mask must be 0xff, spec value is 1;
+ * normal packet, mask must be 0, spec value is 0;
+ * if tunnal packet, ucode use
+ * sip/dip/protocol/src_port/dst_dport from inner packet
+ */
+ u32 tunnel_flag:8;
+ u32 sip_h:16;
+
+ u32 sip_l:16;
+ u32 dip_h:16;
+
+ u32 dip_l:16;
+ u32 src_port:16;
+
+ u32 dst_port:16;
+ /*
+ * tunnel packet and normal packet,
+ * ext_dip mask must be 0xffffffff
+ */
+ u32 ext_dip_h:16;
+ u32 ext_dip_l:16;
+ u32 rsvd2:16;
+#else
+ u32 function_id:16;
+ u32 rsvd0:16;
+
+ u32 sip_h:16;
+ u32 tunnel_flag:8;
+ u32 protocol:8;
+
+ u32 dip_h:16;
+ u32 sip_l:16;
+
+ u32 src_port:16;
+ u32 dip_l:16;
+
+ u32 ext_dip_h:16;
+ u32 dst_port:16;
+
+ u32 rsvd2:16;
+ u32 ext_dip_l:16;
+#endif
+};
+
+struct tag_tcam_key_ipv6_mem {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+ u32 rsvd0:16;
+ u32 ipv6_flag:1;
+ u32 protocol:7;
+ u32 function_id:8;
+
+ u32 dst_port:16;
+ u32 ipv6_key0:16;
+
+ u32 ipv6_key1:16;
+ u32 ipv6_key2:16;
+
+ u32 ipv6_key3:16;
+ u32 ipv6_key4:16;
+
+ u32 ipv6_key5:16;
+ u32 ipv6_key6:16;
+
+ u32 ipv6_key7:16;
+ u32 rsvd2:16;
+#else
+ u32 function_id:8;
+ u32 protocol:7;
+ u32 ipv6_flag:1;
+ u32 rsvd0:16;
+
+ u32 ipv6_key0:16;
+ u32 dst_port:16;
+
+ u32 ipv6_key2:16;
+ u32 ipv6_key1:16;
+
+ u32 ipv6_key4:16;
+ u32 ipv6_key3:16;
+
+ u32 ipv6_key6:16;
+ u32 ipv6_key5:16;
+
+ u32 rsvd2:16;
+ u32 ipv6_key7:16;
+#endif
+};
+
+struct tag_tcam_key {
+ union {
+ struct tag_tcam_key_mem key_info;
+ struct tag_tcam_key_ipv6_mem key_info_ipv6;
+ };
+
+ union {
+ struct tag_tcam_key_mem key_mask;
+ struct tag_tcam_key_ipv6_mem key_mask_ipv6;
+ };
+};
+
+struct hinic_fdir_rule {
+ struct hinic_hw_fdir_mask mask;
+ struct hinic_atr_input hinic_fdir; /* key of fdir filter */
+ uint8_t queue; /* queue assigned when matched */
+ enum hinic_fdir_mode mode; /* fdir type */
+ u16 tcam_index;
+};
+
+/* ntuple filter list structure */
+struct hinic_ntuple_filter_ele {
+ TAILQ_ENTRY(hinic_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+
+/* ethertype filter list structure */
+struct hinic_ethertype_filter_ele {
+ TAILQ_ENTRY(hinic_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+
+/* fdir filter list structure */
+struct hinic_fdir_rule_ele {
+ TAILQ_ENTRY(hinic_fdir_rule_ele) entries;
+ struct hinic_fdir_rule filter_info;
+};
+
+struct hinic_tcam_filter {
+ TAILQ_ENTRY(hinic_tcam_filter) entries;
+ uint16_t index; /* tcam index */
+ struct tag_tcam_key tcam_key;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+/* hinic_flow memory list structure */
+struct hinic_flow_mem {
+ TAILQ_ENTRY(hinic_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(hinic_ntuple_filter_list, hinic_ntuple_filter_ele);
+TAILQ_HEAD(hinic_ethertype_filter_list, hinic_ethertype_filter_ele);
+TAILQ_HEAD(hinic_fdir_rule_filter_list, hinic_fdir_rule_ele);
+TAILQ_HEAD(hinic_flow_mem_list, hinic_flow_mem);
+
+extern const struct rte_flow_ops hinic_flow_ops;
+
+/* hinic nic_device */
+struct hinic_nic_dev {
+ /* hardware device */
+ struct hinic_hwdev *hwdev;
+ struct hinic_txq **txqs;
+ struct hinic_rxq **rxqs;
+ struct rte_mempool *cpy_mpool;
+ u16 num_qps;
+ u16 num_sq;
+ u16 num_rq;
+ u16 mtu_size;
+ u8 rss_tmpl_idx;
+ u8 rss_indir_flag;
+ u8 num_rss;
+ u8 rx_queue_list[HINIC_MAX_RX_QUEUES];
+
+ bool pause_set;
+ struct nic_pause_config nic_pause;
+
+ u32 vfta[HINIC_VFTA_SIZE]; /* VLAN bitmap */
+
+ struct rte_ether_addr default_addr;
+ struct rte_ether_addr *mc_list;
+ /* info */
+ unsigned int flags;
+ struct nic_service_cap nic_cap;
+ u32 rx_mode_status; /* promisc or allmulticast */
+ unsigned long dev_status;
+
+ char proc_dev_name[HINIC_DEV_NAME_LEN];
+ /* PF0->COS4, PF1->COS5, PF2->COS6, PF3->COS7,
+ * vf: the same with associate pf
+ */
+ u32 default_cos;
+ u32 rx_csum_en;
+
+ struct hinic_filter_info filter;
+ struct hinic_tcam_info tcam;
+ struct hinic_ntuple_filter_list filter_ntuple_list;
+ struct hinic_ethertype_filter_list filter_ethertype_list;
+ struct hinic_fdir_rule_filter_list filter_fdir_rule_list;
+ struct hinic_flow_mem_list hinic_flow_list;
+};
+
+void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev);
+
+void hinic_destroy_fdir_filter(struct rte_eth_dev *dev);
+#endif /* _HINIC_PMD_ETHDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c
new file mode 100644
index 000000000..cc0744da2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c
@@ -0,0 +1,3272 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include "base/hinic_compat.h"
+#include "base/hinic_pmd_hwdev.h"
+#include "base/hinic_pmd_hwif.h"
+#include "base/hinic_pmd_wq.h"
+#include "base/hinic_pmd_cmdq.h"
+#include "base/hinic_pmd_niccfg.h"
+#include "hinic_pmd_ethdev.h"
+
+#define HINIC_MAX_RX_QUEUE_NUM 64
+
+#ifndef UINT8_MAX
+#define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */
+#define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */
+#define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */
+#define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */
+#define ASCII_MAX (0x7F)
+#endif
+
+/* IPSURX MACRO */
+#define PA_ETH_TYPE_ROCE 0
+#define PA_ETH_TYPE_IPV4 1
+#define PA_ETH_TYPE_IPV6 2
+#define PA_ETH_TYPE_OTHER 3
+
+#define PA_IP_PROTOCOL_TYPE_TCP 1
+#define PA_IP_PROTOCOL_TYPE_UDP 2
+#define PA_IP_PROTOCOL_TYPE_ICMP 3
+#define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4
+#define PA_IP_PROTOCOL_TYPE_SCTP 5
+#define PA_IP_PROTOCOL_TYPE_VRRP 112
+
+#define IP_HEADER_PROTOCOL_TYPE_TCP 6
+#define IP_HEADER_PROTOCOL_TYPE_UDP 17
+#define IP_HEADER_PROTOCOL_TYPE_ICMP 1
+#define IP_HEADER_PROTOCOL_TYPE_ICMPV6 58
+
+#define FDIR_TCAM_NORMAL_PACKET 0
+#define FDIR_TCAM_TUNNEL_PACKET 1
+
+#define HINIC_MIN_N_TUPLE_PRIO 1
+#define HINIC_MAX_N_TUPLE_PRIO 7
+
+/* TCAM type mask in hardware */
+#define TCAM_PKT_BGP_SPORT 1
+#define TCAM_PKT_VRRP 2
+#define TCAM_PKT_BGP_DPORT 3
+#define TCAM_PKT_LACP 4
+
+#define TCAM_DIP_IPV4_TYPE 0
+#define TCAM_DIP_IPV6_TYPE 1
+
+#define BGP_DPORT_ID 179
+#define IPPROTO_VRRP 112
+
+/* Packet type defined in hardware to perform filter */
+#define PKT_IGMP_IPV4_TYPE 64
+#define PKT_ICMP_IPV4_TYPE 65
+#define PKT_ICMP_IPV6_TYPE 66
+#define PKT_ICMP_IPV6RS_TYPE 67
+#define PKT_ICMP_IPV6RA_TYPE 68
+#define PKT_ICMP_IPV6NS_TYPE 69
+#define PKT_ICMP_IPV6NA_TYPE 70
+#define PKT_ICMP_IPV6RE_TYPE 71
+#define PKT_DHCP_IPV4_TYPE 72
+#define PKT_DHCP_IPV6_TYPE 73
+#define PKT_LACP_TYPE 74
+#define PKT_ARP_REQ_TYPE 79
+#define PKT_ARP_REP_TYPE 80
+#define PKT_ARP_TYPE 81
+#define PKT_BGPD_DPORT_TYPE 83
+#define PKT_BGPD_SPORT_TYPE 84
+#define PKT_VRRP_TYPE 85
+
+#define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
+ (&((struct hinic_nic_dev *)nic_dev)->filter)
+
+#define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
+ (&((struct hinic_nic_dev *)nic_dev)->tcam)
+
+
+enum hinic_atr_flow_type {
+ HINIC_ATR_FLOW_TYPE_IPV4_DIP = 0x1,
+ HINIC_ATR_FLOW_TYPE_IPV4_SIP = 0x2,
+ HINIC_ATR_FLOW_TYPE_DPORT = 0x3,
+ HINIC_ATR_FLOW_TYPE_SPORT = 0x4,
+};
+
+/* Structure to store fdir's info. */
+struct hinic_fdir_info {
+ uint8_t fdir_flag;
+ uint8_t qid;
+ uint32_t fdir_key;
+};
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline const struct rte_flow_item *
+next_no_void_pattern(const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline const struct rte_flow_action *
+next_no_void_action(const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ /* The first non-void item should be MAC */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
+ const struct rte_flow_action *act,
+ const struct rte_flow_action_queue *act_q,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ /* Parse action */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act = NULL;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q = NULL;
+
+ if (hinic_check_filter_arg(attr, pattern, actions, error))
+ return -rte_errno;
+
+ item = next_no_void_pattern(pattern, NULL);
+ if (hinic_check_ethertype_first_item(item, error))
+ return -rte_errno;
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /*
+ * Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+ (!rte_is_zero_ether_addr(&eth_mask->dst) &&
+ !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /*
+ * If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
+ return -rte_errno;
+
+ if (hinic_check_ethertype_attr_ele(attr, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
+ return -rte_errno;
+
+ /* NIC doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Drop option is unsupported");
+ return -rte_errno;
+ }
+
+ /* Hinic only support LACP/ARP for ether type */
+ if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
+ filter->ether_type != RTE_ETHER_TYPE_ARP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "only lacp/arp type supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
+ attr->priority > HINIC_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+ else
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+static int
+hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ /*
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Flow action type is not QUEUE.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* Check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Next not void item is not END.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+
+ /* The first not void item can be MAC or IPv4 */
+ item = next_no_void_pattern(pattern, NULL);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ *ipv4_item = item;
+ return 0;
+}
+
+static int
+hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item *item = *in_out_item;
+
+ /* Get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /*
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum ||
+ !ipv4_mask->hdr.next_proto_id) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* Get next no void item */
+ *in_out_item = next_no_void_pattern(pattern, item);
+ return 0;
+}
+
+static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_icmp *icmp_mask;
+ const struct rte_flow_item *item = *in_out_item;
+ u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ /* Get TCP or UDP info */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+ (!item->spec || !item->mask)) {
+ memset(filter, 0, ntuple_filter_size);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ memset(filter, 0, ntuple_filter_size);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /*
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, ntuple_filter_size);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, ntuple_filter_size);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
+ icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
+
+ /* ICMP all should be masked. */
+ if (icmp_mask->hdr.icmp_cksum ||
+ icmp_mask->hdr.icmp_ident ||
+ icmp_mask->hdr.icmp_seq_nb ||
+ icmp_mask->hdr.icmp_type ||
+ icmp_mask->hdr.icmp_code) {
+ memset(filter, 0, ntuple_filter_size);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get next no void item */
+ *in_out_item = next_no_void_pattern(pattern, item);
+ return 0;
+}
+
+static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ /* Check if the next not void item is END */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
+ const struct rte_flow_item pattern[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
+ hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
+ hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
+ hinic_ntuple_item_check_end(item, filter, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = NULL;
+
+ if (hinic_check_filter_arg(attr, pattern, actions, error))
+ return -rte_errno;
+
+ if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
+ return -rte_errno;
+
+ if (hinic_check_ntuple_act_ele(item, actions, filter, error))
+ return -rte_errno;
+
+ if (hinic_check_ntuple_attr_ele(attr, filter, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+ if (ret)
+ return ret;
+
+ /* Hinic doesn't support tcp flags */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Hinic doesn't support many priorities */
+ if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
+ filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ /* Fixed value for hinic */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
+static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+
+ /* The first not void item can be MAC or IPv4 or TCP or UDP */
+ item = next_no_void_pattern(pattern, NULL);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter,support mac,ipv4,tcp,udp");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
+ "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* All should be masked. */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter,support mac");
+ return -rte_errno;
+ }
+ /* Check if the next not void item is IPv4 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter,support mac,ipv4");
+ return -rte_errno;
+ }
+ }
+
+ *ip_item = item;
+ return 0;
+}
+
+static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
+ const struct rte_flow_item *item = *in_out_item;
+ int i;
+
+ /* Get the IPv4 info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid fdir filter mask");
+ return -rte_errno;
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /*
+ * Only support src & dst addresses,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, support src,dst ip");
+ return -rte_errno;
+ }
+
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+ rule->mode = HINIC_FDIR_MODE_NORMAL;
+
+ if (item->spec) {
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
+ rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
+ }
+
+ /*
+ * Check if the next not void item is
+ * TCP or UDP or END.
+ */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ANY &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, support tcp, udp, end");
+ return -rte_errno;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid fdir filter mask");
+ return -rte_errno;
+ }
+
+ ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
+
+ /* Only support dst addresses, others should be masked */
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, support dst ipv6");
+ return -rte_errno;
+ }
+
+ /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
+ for (i = 0; i < 16; i++) {
+ if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, do not support src ipv6");
+ return -rte_errno;
+ }
+ }
+
+ if (!item->spec) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, ipv6 spec is NULL");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
+ rule->mask.dst_ipv6_mask |= 1 << i;
+ }
+
+ ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->hinic_fdir.dst_ipv6,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /*
+ * Check if the next not void item is TCP or UDP or ICMP.
+ */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, support tcp, udp, icmp");
+ return -rte_errno;
+ }
+ }
+
+ *in_out_item = item;
+ return 0;
+}
+
+static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
+ __rte_unused const struct rte_flow_item pattern[],
+ __rte_unused struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = *in_out_item;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by normal fdir filter, not support l4");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+
+static int hinic_normal_item_check_end(const struct rte_flow_item *item,
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ /* Check if the next not void item is END */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support end");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ if (hinic_normal_item_check_ether(&item, pattern, error) ||
+ hinic_normal_item_check_ip(&item, pattern, rule, error) ||
+ hinic_normal_item_check_l4(&item, pattern, rule, error) ||
+ hinic_normal_item_check_end(item, rule, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = *in_out_item;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.proto_mask = UINT16_MAX;
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.proto_mask = UINT16_MAX;
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ if (!item->mask) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support src, dst ports");
+ return -rte_errno;
+ }
+
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /*
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir normal tcam filter");
+ return -rte_errno;
+ }
+
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.proto_mask = UINT16_MAX;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
+ if (item->spec) {
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
+ rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /*
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support src, dst ports");
+ return -rte_errno;
+ }
+
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support udp");
+ return -rte_errno;
+ }
+
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.proto_mask = UINT16_MAX;
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
+ if (item->spec) {
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
+ rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
+ }
+ } else {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
+ return -rte_errno;
+ }
+
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter tcam normal, support end");
+ return -rte_errno;
+ }
+
+ /* get next no void item */
+ *in_out_item = item;
+
+ return 0;
+}
+
+static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ if (hinic_normal_item_check_ether(&item, pattern, error) ||
+ hinic_normal_item_check_ip(&item, pattern, rule, error) ||
+ hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
+ hinic_normal_item_check_end(item, rule, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = *in_out_item;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support vxlan");
+ return -rte_errno;
+ }
+
+ *in_out_item = item;
+ } else {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = *in_out_item;
+
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ANY) {
+ (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support tcp/udp");
+ return -rte_errno;
+ }
+
+ *in_out_item = item;
+ }
+
+ return 0;
+}
+
+static int
+hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item *item = *in_out_item;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* Not supported last point for range */
+ if (item->last) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /*
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support src, dst ports");
+ return -rte_errno;
+ }
+
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ (void)memset(rule, 0,
+ sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support tcp");
+ return -rte_errno;
+ }
+
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.tunnel_flag = UINT16_MAX;
+ rule->mask.tunnel_inner_src_port_mask =
+ tcp_mask->hdr.src_port;
+ rule->mask.tunnel_inner_dst_port_mask =
+ tcp_mask->hdr.dst_port;
+ rule->mask.proto_mask = UINT16_MAX;
+
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
+ if (item->spec) {
+ tcp_spec =
+ (const struct rte_flow_item_tcp *)item->spec;
+ rule->hinic_fdir.tunnel_inner_src_port =
+ tcp_spec->hdr.src_port;
+ rule->hinic_fdir.tunnel_inner_dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /*
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support src, dst ports");
+ return -rte_errno;
+ }
+
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support udp");
+ return -rte_errno;
+ }
+
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.tunnel_flag = UINT16_MAX;
+ rule->mask.tunnel_inner_src_port_mask =
+ udp_mask->hdr.src_port;
+ rule->mask.tunnel_inner_dst_port_mask =
+ udp_mask->hdr.dst_port;
+ rule->mask.proto_mask = UINT16_MAX;
+
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
+ if (item->spec) {
+ udp_spec =
+ (const struct rte_flow_item_udp *)item->spec;
+ rule->hinic_fdir.tunnel_inner_src_port =
+ udp_spec->hdr.src_port;
+ rule->hinic_fdir.tunnel_inner_dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.tunnel_flag = UINT16_MAX;
+ } else {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter, support tcp/udp");
+ return -rte_errno;
+ }
+
+ /* get next no void item */
+ *in_out_item = next_no_void_pattern(pattern, item);
+ }
+
+ return 0;
+}
+
+static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
+ const struct rte_flow_item pattern[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ if (hinic_normal_item_check_ether(&item, pattern, error) ||
+ hinic_normal_item_check_ip(&item, pattern, rule, error) ||
+ hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
+ hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
+ hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
+ hinic_normal_item_check_end(item, rule, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
+ const struct rte_flow_action actions[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+
+ /* Check if the first not void action is QUEUE */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* Check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP(optional)
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
+ * dst_addr 1.2.3.5 0xFFFFFFFF
+ * UDP/TCP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = NULL;
+
+ if (hinic_check_filter_arg(attr, pattern, actions, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_item_ele(item, pattern, rule, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_attr_ele(attr, rule, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_act_ele(item, actions, rule, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item can be ANY/TCP/UDP
+ * ACTION:
+ * The first not void action should be QUEUE.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
+ * dst_addr 1.2.3.5 0xFFFFFFFF
+ * UDP/TCP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = NULL;
+
+ if (hinic_check_filter_arg(attr, pattern, actions, error))
+ return -rte_errno;
+
+ if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_attr_ele(attr, rule, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_act_ele(item, actions, rule, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item must be UDP
+ * The next not void item must be VXLAN(optional)
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The next not void item could be ANY or UDP or TCP(optional)
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
+ * dst_addr 1.2.3.5 0xFFFFFFFF
+ * UDP NULL NULL
+ * VXLAN NULL NULL
+ * UDP/TCP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = NULL;
+
+ if (hinic_check_filter_arg(attr, pattern, actions, error))
+ return -rte_errno;
+
+ if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_attr_ele(attr, rule, error))
+ return -rte_errno;
+
+ if (hinic_check_normal_act_ele(item, actions, rule, error))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct hinic_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
+ rule, error);
+ if (!ret)
+ goto step_next;
+
+ ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
+ rule, error);
+ if (!ret)
+ goto step_next;
+
+ ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
+ rule, error);
+ if (ret)
+ return ret;
+
+step_next:
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
+ return ret;
+}
+
+/**
+ * Check if the flow rule is supported by nic.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int hinic_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct hinic_fdir_rule fdir_rule;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = hinic_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = hinic_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
+ ret = hinic_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+
+ return ret;
+}
+
+static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
+ struct hinic_5tuple_filter_info *hinic_filter_info)
+{
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ hinic_filter_info->dst_ip_mask = 0;
+ hinic_filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ hinic_filter_info->dst_ip_mask = 1;
+ hinic_filter_info->dst_ip = 0;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ hinic_filter_info->src_ip_mask = 0;
+ hinic_filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ hinic_filter_info->src_ip_mask = 1;
+ hinic_filter_info->src_ip = 0;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
+ struct hinic_5tuple_filter_info *hinic_filter_info)
+{
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ hinic_filter_info->dst_port_mask = 0;
+ hinic_filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ hinic_filter_info->dst_port_mask = 1;
+ hinic_filter_info->dst_port = 0;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ hinic_filter_info->src_port_mask = 0;
+ hinic_filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ hinic_filter_info->src_port_mask = 1;
+ hinic_filter_info->src_port = 0;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
+ struct hinic_5tuple_filter_info *hinic_filter_info)
+{
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ hinic_filter_info->proto_mask = 0;
+ hinic_filter_info->proto = filter->proto;
+ break;
+ case 0:
+ hinic_filter_info->proto_mask = 1;
+ hinic_filter_info->proto = 0;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct hinic_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
+ filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
+ filter->priority < HINIC_MIN_N_TUPLE_PRIO)
+ return -EINVAL;
+
+ if (ntuple_ip_filter(filter, filter_info) ||
+ ntuple_port_filter(filter, filter_info) ||
+ ntuple_proto_filter(filter, filter_info))
+ return -EINVAL;
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
+}
+
+static inline struct hinic_5tuple_filter *
+hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
+ struct hinic_5tuple_filter_info *key)
+{
+ struct hinic_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct hinic_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+
+ return NULL;
+}
+
+static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
+{
+ struct tag_pa_rule lacp_rule;
+ struct tag_pa_action lacp_action;
+
+ memset(&lacp_rule, 0, sizeof(lacp_rule));
+ memset(&lacp_action, 0, sizeof(lacp_action));
+ /* LACP TCAM rule */
+ lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
+ lacp_rule.l2_header.eth_type.val16 = 0x8809;
+ lacp_rule.l2_header.eth_type.mask16 = 0xffff;
+
+ /* LACP TCAM action */
+ lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
+ lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
+ lacp_action.pkt_type = PKT_LACP_TYPE;
+ lacp_action.pri = 0x0;
+ lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
+
+ return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
+ &lacp_rule, &lacp_action);
+}
+
+static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
+{
+ struct tag_pa_rule bgp_rule;
+ struct tag_pa_action bgp_action;
+
+ memset(&bgp_rule, 0, sizeof(bgp_rule));
+ memset(&bgp_action, 0, sizeof(bgp_action));
+ /* BGP TCAM rule */
+ bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
+ bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
+ bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
+ bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
+ bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
+ bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
+
+ /* BGP TCAM action */
+ bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
+ bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
+ bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
+ bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
+ * results, not need to convert
+ */
+ bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
+
+ return hinic_set_fdir_tcam(nic_dev->hwdev,
+ TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
+}
+
+static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
+{
+ struct tag_pa_rule bgp_rule;
+ struct tag_pa_action bgp_action;
+
+ memset(&bgp_rule, 0, sizeof(bgp_rule));
+ memset(&bgp_action, 0, sizeof(bgp_action));
+ /* BGP TCAM rule */
+ bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
+ bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
+ bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
+ bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
+ bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
+ bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
+
+ /* BGP TCAM action */
+ bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
+ bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
+ bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
+ bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
+ * results, not need to convert
+ */
+ bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
+
+ return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
+ &bgp_rule, &bgp_action);
+}
+
+static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
+{
+ struct tag_pa_rule vrrp_rule;
+ struct tag_pa_action vrrp_action;
+
+ memset(&vrrp_rule, 0, sizeof(vrrp_rule));
+ memset(&vrrp_action, 0, sizeof(vrrp_action));
+ /* VRRP TCAM rule */
+ vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
+ vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
+ vrrp_rule.ip_header.protocol.mask8 = 0xff;
+ vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
+
+ /* VRRP TCAM action */
+ vrrp_action.err_type = 0x3f;
+ vrrp_action.fwd_action = 0x7;
+ vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
+ vrrp_action.pri = 0xf;
+ vrrp_action.push_len = 0xf;
+
+ return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
+ &vrrp_rule, &vrrp_action);
+}
+
+/**
+ * Clear all fdir configuration.
+ *
+ * @param nic_dev
+ * The hardware interface of a Ethernet device.
+ *
+ * @return
+ * 0 on success,
+ * negative error value otherwise.
+ */
+void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
+{
+ (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
+
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
+
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
+
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
+
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
+
+ (void)hinic_flush_tcam_rule(nic_dev->hwdev);
+}
+
+static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
+ struct hinic_filter_info *filter_info)
+{
+ switch (filter->filter_info.proto) {
+ case IPPROTO_TCP:
+ /* Filter type is bgp type if dst_port or src_port is 179 */
+ if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
+ !(filter->filter_info.dst_port_mask)) {
+ filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
+ } else if (filter->filter_info.src_port ==
+ RTE_BE16(BGP_DPORT_ID) &&
+ !(filter->filter_info.src_port_mask)) {
+ filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
+ } else {
+ PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
+ " just support BGP now, proto:0x%x, "
+ "dst_port:0x%x, dst_port_mask:0x%x."
+ "src_port:0x%x, src_port_mask:0x%x.",
+ filter->filter_info.proto,
+ filter->filter_info.dst_port,
+ filter->filter_info.dst_port_mask,
+ filter->filter_info.src_port,
+ filter->filter_info.src_port_mask);
+ return -EINVAL;
+ }
+ break;
+
+ case IPPROTO_VRRP:
+ filter_info->pkt_type = PKT_VRRP_TYPE;
+ break;
+
+ case IPPROTO_ICMP:
+ filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
+ break;
+
+ case IPPROTO_ICMPV6:
+ filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
+ "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
+ "src_port: 0x%x, src_port_mask: 0x%x.",
+ filter->filter_info.proto, filter->filter_info.dst_port,
+ filter->filter_info.dst_port_mask,
+ filter->filter_info.src_port,
+ filter->filter_info.src_port_mask);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
+ struct hinic_filter_info *filter_info, int *index)
+{
+ int type_id;
+
+ type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
+
+ if (type_id > HINIC_MAX_Q_FILTERS - 1) {
+ PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
+ return -EINVAL;
+ }
+
+ if (!(filter_info->type_mask & (1 << type_id))) {
+ filter_info->type_mask |= 1 << type_id;
+ filter->index = type_id;
+ filter_info->pkt_filters[type_id].enable = true;
+ filter_info->pkt_filters[type_id].pkt_proto =
+ filter->filter_info.proto;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter, entries);
+ } else {
+ PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
+ return -EIO;
+ }
+
+ *index = type_id;
+ return 0;
+}
+
+/*
+ * Add a 5tuple filter
+ *
+ * @param dev:
+ * Pointer to struct rte_eth_dev.
+ * @param filter:
+ * Pointer to the filter that will be added.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct hinic_5tuple_filter *filter)
+{
+ struct hinic_filter_info *filter_info =
+ HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i, ret_fw;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ if (hinic_filter_info_init(filter, filter_info) ||
+ hinic_lookup_new_filter(filter, filter_info, &i))
+ return -EFAULT;
+
+ ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
+ filter_info->qid,
+ filter_info->pkt_filters[i].enable,
+ true);
+ if (ret_fw) {
+ PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter_info->qid,
+ filter_info->pkt_filters[filter->index].enable);
+
+ switch (filter->filter_info.proto) {
+ case IPPROTO_TCP:
+ if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
+ ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
+ if (ret_fw) {
+ PMD_DRV_LOG(ERR, "Set dport bgp failed, "
+ "type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
+ filter->queue,
+ filter_info->pkt_filters[i].enable);
+ } else if (filter->filter_info.src_port ==
+ RTE_BE16(BGP_DPORT_ID)) {
+ ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
+ if (ret_fw) {
+ PMD_DRV_LOG(ERR, "Set sport bgp failed, "
+ "type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
+ filter->queue,
+ filter_info->pkt_filters[i].enable);
+ }
+
+ break;
+
+ case IPPROTO_VRRP:
+ ret_fw = hinic_set_vrrp_tcam(nic_dev);
+ if (ret_fw) {
+ PMD_DRV_LOG(ERR, "Set VRRP failed, "
+ "type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+ return -EFAULT;
+ }
+ PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
+ filter->queue,
+ filter_info->pkt_filters[i].enable);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Remove a 5tuple filter
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param filter
+ * The pointer of the filter will be removed.
+ */
+static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct hinic_5tuple_filter *filter)
+{
+ struct hinic_filter_info *filter_info =
+ HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ switch (filter->filter_info.proto) {
+ case IPPROTO_VRRP:
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
+ break;
+
+ case IPPROTO_TCP:
+ if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
+ TCAM_PKT_BGP_DPORT);
+ else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
+ TCAM_PKT_BGP_SPORT);
+ break;
+
+ default:
+ break;
+ }
+
+ hinic_filter_info_init(filter, filter_info);
+
+ filter_info->pkt_filters[filter->index].enable = false;
+ filter_info->pkt_filters[filter->index].pkt_proto = 0;
+
+ PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type,
+ filter_info->pkt_filters[filter->index].qid,
+ filter_info->pkt_filters[filter->index].enable);
+ (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
+ filter_info->pkt_filters[filter->index].qid,
+ filter_info->pkt_filters[filter->index].enable,
+ true);
+
+ filter_info->pkt_type = 0;
+ filter_info->qid = 0;
+ filter_info->pkt_filters[filter->index].qid = 0;
+ filter_info->type_mask &= ~(1 << (filter->index));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+
+ rte_free(filter);
+}
+
+/*
+ * Add or delete a ntuple filter
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param ntuple_filter
+ * Pointer to struct rte_eth_ntuple_filter
+ * @param add
+ * If true, add filter; if false, remove filter
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct hinic_filter_info *filter_info =
+ HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct hinic_5tuple_filter_info filter_5tuple;
+ struct hinic_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "Filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "Filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ filter = rte_zmalloc("hinic_5tuple_filter",
+ sizeof(struct hinic_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ rte_memcpy(&filter->filter_info, &filter_5tuple,
+ sizeof(struct hinic_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+
+ filter_info->qid = ntuple_filter->queue;
+
+ ret = hinic_add_5tuple_filter(dev, filter);
+ if (ret)
+ rte_free(filter);
+
+ return ret;
+ }
+
+ hinic_remove_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+static inline int
+hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
+{
+ if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
+ " ethertype filter", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "Mac compare is not supported");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "Drop option is not supported");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
+ struct hinic_pkt_filter *ethertype_filter)
+{
+ switch (ethertype_filter->pkt_proto) {
+ case RTE_ETHER_TYPE_SLOW:
+ filter_info->pkt_type = PKT_LACP_TYPE;
+ break;
+
+ case RTE_ETHER_TYPE_ARP:
+ filter_info->pkt_type = PKT_ARP_TYPE;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
+ return -EIO;
+ }
+
+ return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
+}
+
+static inline int
+hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
+ struct hinic_pkt_filter *ethertype_filter)
+{
+ int id;
+
+ /* Find LACP or VRRP type id */
+ id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
+ if (id < 0)
+ return -EINVAL;
+
+ if (!(filter_info->type_mask & (1 << id))) {
+ filter_info->type_mask |= 1 << id;
+ filter_info->pkt_filters[id].pkt_proto =
+ ethertype_filter->pkt_proto;
+ filter_info->pkt_filters[id].enable = ethertype_filter->enable;
+ filter_info->qid = ethertype_filter->qid;
+ return id;
+ }
+
+ PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
+ return -EINVAL;
+}
+
+static inline void
+hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= HINIC_MAX_Q_FILTERS)
+ return;
+
+ filter_info->pkt_type = 0;
+ filter_info->type_mask &= ~(1 << idx);
+ filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
+ filter_info->pkt_filters[idx].enable = FALSE;
+ filter_info->pkt_filters[idx].qid = 0;
+}
+
+static inline int
+hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct hinic_filter_info *filter_info =
+ HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct hinic_pkt_filter ethertype_filter;
+ int i;
+ int ret_fw;
+
+ if (hinic_check_ethertype_filter(filter))
+ return -EINVAL;
+
+ if (add) {
+ ethertype_filter.pkt_proto = filter->ether_type;
+ ethertype_filter.enable = TRUE;
+ ethertype_filter.qid = (u8)filter->queue;
+ i = hinic_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (i < 0)
+ return -ENOSPC;
+
+ ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
+ filter_info->pkt_type, filter_info->qid,
+ filter_info->pkt_filters[i].enable, true);
+ if (ret_fw) {
+ PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+
+ hinic_ethertype_filter_remove(filter_info, i);
+ return -ENOENT;
+ }
+ PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+
+ switch (ethertype_filter.pkt_proto) {
+ case RTE_ETHER_TYPE_SLOW:
+ ret_fw = hinic_set_lacp_tcam(nic_dev);
+ if (ret_fw) {
+ PMD_DRV_LOG(ERR, "Add lacp tcam failed");
+ hinic_ethertype_filter_remove(filter_info, i);
+ return -ENOENT;
+ }
+
+ PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
+ break;
+ default:
+ break;
+ }
+ } else {
+ ethertype_filter.pkt_proto = filter->ether_type;
+ i = hinic_ethertype_filter_lookup(filter_info,
+ &ethertype_filter);
+
+ if ((filter_info->type_mask & (1 << i))) {
+ filter_info->pkt_filters[i].enable = FALSE;
+ (void)hinic_set_fdir_filter(nic_dev->hwdev,
+ filter_info->pkt_type,
+ filter_info->pkt_filters[i].qid,
+ filter_info->pkt_filters[i].enable,
+ true);
+
+ PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type,
+ filter_info->pkt_filters[i].qid,
+ filter_info->pkt_filters[i].enable);
+
+ switch (ethertype_filter.pkt_proto) {
+ case RTE_ETHER_TYPE_SLOW:
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
+ TCAM_PKT_LACP);
+ PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
+ break;
+ default:
+ break;
+ }
+
+ hinic_ethertype_filter_remove(filter_info, i);
+
+ } else {
+ PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
+ filter_info->pkt_type, filter->queue,
+ filter_info->pkt_filters[i].enable);
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
+ struct hinic_fdir_info *fdir_info)
+{
+ switch (rule->mask.src_ipv4_mask) {
+ case UINT32_MAX:
+ fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
+ fdir_info->qid = rule->queue;
+ fdir_info->fdir_key = rule->hinic_fdir.src_ip;
+ return 0;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (rule->mask.dst_ipv4_mask) {
+ case UINT32_MAX:
+ fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
+ fdir_info->qid = rule->queue;
+ fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
+ return 0;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ if (fdir_info->fdir_flag == 0) {
+ PMD_DRV_LOG(ERR, "All support mask is NULL.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule, bool add)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct hinic_fdir_info fdir_info;
+ int ret;
+
+ memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
+
+ ret = hinic_fdir_info_init(rule, &fdir_info);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
+ return ret;
+ }
+
+ if (add) {
+ ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
+ true, fdir_info.fdir_key,
+ true, fdir_info.fdir_flag);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
+ fdir_info.fdir_flag, fdir_info.qid,
+ fdir_info.fdir_key);
+ return -ENOENT;
+ }
+ PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
+ fdir_info.fdir_flag, fdir_info.qid,
+ fdir_info.fdir_key);
+ } else {
+ ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
+ false, fdir_info.fdir_key, true,
+ fdir_info.fdir_flag);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
+ fdir_info.fdir_flag, fdir_info.qid,
+ fdir_info.fdir_key);
+ return -ENOENT;
+ }
+ PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
+ fdir_info.fdir_flag, fdir_info.qid,
+ fdir_info.fdir_key);
+ }
+
+ return 0;
+}
+
+static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
+{
+ u8 idx;
+
+ for (idx = 0; idx < len; idx++)
+ key_y[idx] = src_input[idx] & mask[idx];
+}
+
+static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
+{
+ u8 idx;
+
+ for (idx = 0; idx < len; idx++)
+ key_x[idx] = key_y[idx] ^ mask[idx];
+}
+
+static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
+ struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+ tcam_translate_key_y(fdir_tcam_rule->key.y,
+ (u8 *)(&tcam_key->key_info),
+ (u8 *)(&tcam_key->key_mask),
+ TCAM_FLOW_KEY_SIZE);
+ tcam_translate_key_x(fdir_tcam_rule->key.x,
+ fdir_tcam_rule->key.y,
+ (u8 *)(&tcam_key->key_mask),
+ TCAM_FLOW_KEY_SIZE);
+}
+
+static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule,
+ struct tag_tcam_key *tcam_key)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ switch (rule->mask.dst_ipv4_mask) {
+ case UINT32_MAX:
+ tcam_key->key_info.ext_dip_h =
+ (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
+ tcam_key->key_info.ext_dip_l =
+ rule->hinic_fdir.dst_ip & 0xffffU;
+ tcam_key->key_mask.ext_dip_h =
+ (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
+ tcam_key->key_mask.ext_dip_l =
+ rule->mask.dst_ipv4_mask & 0xffffU;
+ break;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ if (rule->mask.dst_port_mask > 0) {
+ tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
+ tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
+ }
+
+ if (rule->mask.src_port_mask > 0) {
+ tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
+ tcam_key->key_mask.src_port = rule->mask.src_port_mask;
+ }
+
+ switch (rule->mask.tunnel_flag) {
+ case UINT16_MAX:
+ tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
+ tcam_key->key_mask.tunnel_flag = UINT8_MAX;
+ break;
+
+ case 0:
+ tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
+ tcam_key->key_mask.tunnel_flag = 0;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
+ return -EINVAL;
+ }
+
+ if (rule->mask.tunnel_inner_dst_port_mask > 0) {
+ tcam_key->key_info.dst_port =
+ rule->hinic_fdir.tunnel_inner_dst_port;
+ tcam_key->key_mask.dst_port =
+ rule->mask.tunnel_inner_dst_port_mask;
+ }
+
+ if (rule->mask.tunnel_inner_src_port_mask > 0) {
+ tcam_key->key_info.src_port =
+ rule->hinic_fdir.tunnel_inner_src_port;
+ tcam_key->key_mask.src_port =
+ rule->mask.tunnel_inner_src_port_mask;
+ }
+
+ switch (rule->mask.proto_mask) {
+ case UINT16_MAX:
+ tcam_key->key_info.protocol = rule->hinic_fdir.proto;
+ tcam_key->key_mask.protocol = UINT8_MAX;
+ break;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
+ return -EINVAL;
+ }
+
+ tcam_key->key_mask.function_id = UINT16_MAX;
+ tcam_key->key_info.function_id =
+ hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
+
+ return 0;
+}
+
+static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule,
+ struct tag_tcam_key *tcam_key)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ switch (rule->mask.dst_ipv6_mask) {
+ case UINT16_MAX:
+ tcam_key->key_info_ipv6.ipv6_key0 =
+ ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[1];
+ tcam_key->key_info_ipv6.ipv6_key1 =
+ ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[3];
+ tcam_key->key_info_ipv6.ipv6_key2 =
+ ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[5];
+ tcam_key->key_info_ipv6.ipv6_key3 =
+ ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[7];
+ tcam_key->key_info_ipv6.ipv6_key4 =
+ ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[9];
+ tcam_key->key_info_ipv6.ipv6_key5 =
+ ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[11];
+ tcam_key->key_info_ipv6.ipv6_key6 =
+ ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[13];
+ tcam_key->key_info_ipv6.ipv6_key7 =
+ ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[15];
+ tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
+ break;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
+ return -EINVAL;
+ }
+
+ if (rule->mask.dst_port_mask > 0) {
+ tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
+ tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
+ }
+
+ switch (rule->mask.proto_mask) {
+ case UINT16_MAX:
+ tcam_key->key_info_ipv6.protocol =
+ (rule->hinic_fdir.proto) & 0x7F;
+ tcam_key->key_mask_ipv6.protocol = 0x7F;
+ break;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
+ return -EINVAL;
+ }
+
+ tcam_key->key_info_ipv6.ipv6_flag = 1;
+ tcam_key->key_mask_ipv6.ipv6_flag = 1;
+
+ tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
+ tcam_key->key_info_ipv6.function_id =
+ (u8)hinic_global_func_id(nic_dev->hwdev);
+
+ return 0;
+}
+
+static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule,
+ struct tag_tcam_key *tcam_key,
+ struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+ int ret = -1;
+
+ if (rule->mask.dst_ipv4_mask == UINT32_MAX)
+ ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
+ else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
+ ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
+
+ if (ret < 0)
+ return ret;
+
+ fdir_tcam_rule->data.qid = rule->queue;
+
+ tcam_key_calculate(tcam_key, fdir_tcam_rule);
+
+ return 0;
+}
+
+static inline struct hinic_tcam_filter *
+hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
+ struct tag_tcam_key *key)
+{
+ struct hinic_tcam_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->tcam_key,
+ sizeof(struct tag_tcam_key)) == 0) {
+ return it;
+ }
+ }
+
+ return NULL;
+}
+
+static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
+ struct hinic_tcam_info *tcam_info,
+ struct hinic_tcam_filter *tcam_filter,
+ u16 *tcam_index)
+{
+ int index;
+ int max_index;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
+ max_index = HINIC_VF_MAX_TCAM_FILTERS;
+ else
+ max_index = HINIC_PF_MAX_TCAM_FILTERS;
+
+ for (index = 0; index < max_index; index++) {
+ if (tcam_info->tcam_index_array[index] == 0)
+ break;
+ }
+
+ if (index == max_index) {
+ PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
+ hinic_global_func_id(nic_dev->hwdev), max_index);
+ return -EINVAL;
+ }
+
+ tcam_filter->index = index;
+ *tcam_index = index;
+
+ return 0;
+}
+
+static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
+ struct hinic_tcam_filter *tcam_filter,
+ struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+ struct hinic_tcam_info *tcam_info =
+ HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u16 index = 0;
+ u16 tcam_block_index = 0;
+ int rc;
+
+ if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
+ return -EINVAL;
+
+ if (tcam_info->tcam_rule_nums == 0) {
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ rc = hinic_alloc_tcam_block(nic_dev->hwdev,
+ HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
+ return -EFAULT;
+ }
+ } else {
+ rc = hinic_alloc_tcam_block(nic_dev->hwdev,
+ HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
+ return -EFAULT;
+ }
+ }
+
+ tcam_info->tcam_block_index = tcam_block_index;
+ } else {
+ tcam_block_index = tcam_info->tcam_block_index;
+ }
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ fdir_tcam_rule->index =
+ HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
+ } else {
+ fdir_tcam_rule->index =
+ tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
+ }
+
+ rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
+ "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
+ hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
+ fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
+ tcam_info->tcam_rule_nums + 1);
+
+ if (tcam_info->tcam_rule_nums == 0) {
+ rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
+ if (rc < 0) {
+ (void)hinic_del_tcam_rule(nic_dev->hwdev,
+ fdir_tcam_rule->index);
+ return rc;
+ }
+ }
+
+ TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
+
+ tcam_info->tcam_index_array[index] = 1;
+ tcam_info->tcam_rule_nums++;
+
+ return 0;
+}
+
+static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
+ struct hinic_tcam_filter *tcam_filter)
+{
+ struct hinic_tcam_info *tcam_info =
+ HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ u32 index = 0;
+ u16 tcam_block_index = tcam_info->tcam_block_index;
+ int rc;
+ u8 block_type = 0;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
+ tcam_filter->index;
+ block_type = HINIC_TCAM_BLOCK_TYPE_VF;
+ } else {
+ index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
+ tcam_filter->index;
+ block_type = HINIC_TCAM_BLOCK_TYPE_PF;
+ }
+
+ rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
+ "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
+ hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
+ tcam_info->tcam_rule_nums - 1);
+
+ TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
+
+ tcam_info->tcam_index_array[tcam_filter->index] = 0;
+
+ rte_free(tcam_filter);
+
+ tcam_info->tcam_rule_nums--;
+
+ if (tcam_info->tcam_rule_nums == 0) {
+ (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
+ &tcam_block_index);
+ }
+
+ return 0;
+}
+
+static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule, bool add)
+{
+ struct hinic_tcam_info *tcam_info =
+ HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+ struct hinic_tcam_filter *tcam_filter;
+ struct tag_tcam_cfg_rule fdir_tcam_rule;
+ struct tag_tcam_key tcam_key;
+ int ret;
+
+ memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
+ memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
+
+ ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
+ return ret;
+ }
+
+ tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
+ &tcam_key);
+ if (tcam_filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "Filter exists.");
+ return -EEXIST;
+ }
+ if (tcam_filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "Filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ tcam_filter = rte_zmalloc("hinic_5tuple_filter",
+ sizeof(struct hinic_tcam_filter), 0);
+ if (tcam_filter == NULL)
+ return -ENOMEM;
+ (void)rte_memcpy(&tcam_filter->tcam_key,
+ &tcam_key, sizeof(struct tag_tcam_key));
+ tcam_filter->queue = fdir_tcam_rule.data.qid;
+
+ ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
+ if (ret < 0) {
+ rte_free(tcam_filter);
+ return ret;
+ }
+
+ rule->tcam_index = fdir_tcam_rule.index;
+
+ } else {
+ PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
+ ret = hinic_del_tcam_filter(dev, tcam_filter);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct hinic_fdir_rule fdir_rule;
+ struct rte_flow *flow = NULL;
+ struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
+ struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
+ struct hinic_fdir_rule_ele *fdir_rule_ptr;
+ struct hinic_flow_mem *hinic_flow_mem_ptr;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
+ return NULL;
+ }
+
+ hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
+ sizeof(struct hinic_flow_mem), 0);
+ if (!hinic_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
+ rte_free(flow);
+ return NULL;
+ }
+
+ hinic_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
+ entries);
+
+ /* Add ntuple filter */
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = hinic_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
+ sizeof(struct hinic_ntuple_filter_ele), 0);
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+
+ PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
+ hinic_global_func_id(nic_dev->hwdev));
+ return flow;
+ }
+ goto out;
+ }
+
+ /* Add ethertype filter */
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
+ &ethertype_filter, error);
+ if (!ret) {
+ ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
+ TRUE);
+ if (!ret) {
+ ethertype_filter_ptr =
+ rte_zmalloc("hinic_ethertype_filter",
+ sizeof(struct hinic_ethertype_filter_ele), 0);
+ rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
+ PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
+ hinic_global_func_id(nic_dev->hwdev));
+ return flow;
+ }
+ goto out;
+ }
+
+ /* Add fdir filter */
+ memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
+ ret = hinic_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
+ ret = hinic_add_del_fdir_filter(dev,
+ &fdir_rule, TRUE);
+ } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
+ ret = hinic_add_del_tcam_fdir_filter(dev,
+ &fdir_rule, TRUE);
+ } else {
+ PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
+ goto out;
+ }
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
+ sizeof(struct hinic_fdir_rule_ele), 0);
+ rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
+ sizeof(struct hinic_fdir_rule));
+ TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
+ hinic_global_func_id(nic_dev->hwdev));
+ return flow;
+ }
+ goto out;
+ }
+
+out:
+ TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(hinic_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/* Destroy a flow rule on hinic. */
+static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct hinic_fdir_rule fdir_rule;
+ struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
+ struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
+ struct hinic_fdir_rule_ele *fdir_rule_ptr;
+ struct hinic_flow_mem *hinic_flow_mem_ptr;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ethertype_filter,
+ &ethertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = hinic_add_del_ethertype_filter(dev,
+ &ethertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
+ rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct hinic_fdir_rule));
+ if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
+ ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
+ } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
+ ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
+ FALSE);
+ } else {
+ PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
+ ret = -EINVAL;
+ }
+ if (!ret) {
+ TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
+ if (hinic_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&nic_dev->hinic_flow_list,
+ hinic_flow_mem_ptr, entries);
+ rte_free(hinic_flow_mem_ptr);
+ break;
+ }
+ }
+ rte_free(flow);
+
+ PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
+ hinic_global_func_id(nic_dev->hwdev));
+
+ return ret;
+}
+
+/* Remove all the n-tuple filters */
+static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct hinic_filter_info *filter_info =
+ HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct hinic_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ hinic_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* Remove all the ether type filters */
+static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct hinic_filter_info *filter_info =
+ HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
+ int ret = 0;
+
+ if (filter_info->type_mask &
+ (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
+ hinic_ethertype_filter_remove(filter_info,
+ HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
+ ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
+ filter_info->qid, false, true);
+
+ (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
+ }
+
+ if (filter_info->type_mask &
+ (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
+ hinic_ethertype_filter_remove(filter_info,
+ HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
+ ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
+ filter_info->qid, false, true);
+ }
+
+ if (ret)
+ PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
+ filter_info->pkt_type);
+}
+
+/* Remove all the ether type filters */
+static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct hinic_tcam_info *tcam_info =
+ HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
+ struct hinic_tcam_filter *tcam_filter_ptr;
+
+ while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
+ (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
+
+ (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
+
+ (void)hinic_flush_tcam_rule(nic_dev->hwdev);
+}
+
+static void hinic_filterlist_flush(struct rte_eth_dev *dev)
+{
+ struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
+ struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
+ struct hinic_fdir_rule_ele *fdir_rule_ptr;
+ struct hinic_flow_mem *hinic_flow_mem_ptr;
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ while ((ntuple_filter_ptr =
+ TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
+ TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr =
+ TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
+ TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr =
+ TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
+ TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((hinic_flow_mem_ptr =
+ TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
+ TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
+ entries);
+ rte_free(hinic_flow_mem_ptr->flow);
+ rte_free(hinic_flow_mem_ptr);
+ }
+}
+
+/* Destroy all flow rules associated with a port on hinic. */
+static int hinic_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ hinic_clear_all_ntuple_filter(dev);
+ hinic_clear_all_ethertype_filter(dev);
+ hinic_clear_all_fdir_filter(dev);
+ hinic_filterlist_flush(dev);
+
+ PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
+ hinic_global_func_id(nic_dev->hwdev));
+ return 0;
+}
+
+void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
+{
+ hinic_clear_all_ntuple_filter(dev);
+ hinic_clear_all_ethertype_filter(dev);
+ hinic_clear_all_fdir_filter(dev);
+ hinic_filterlist_flush(dev);
+}
+
+const struct rte_flow_ops hinic_flow_ops = {
+ .validate = hinic_flow_validate,
+ .create = hinic_flow_create,
+ .destroy = hinic_flow_destroy,
+ .flush = hinic_flow_flush,
+};
+
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c
new file mode 100644
index 000000000..a49769a86
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c
@@ -0,0 +1,1089 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <rte_ether.h>
+#include <rte_mbuf.h>
+#ifdef __ARM64_NEON__
+#include <arm_neon.h>
+#endif
+
+#include "base/hinic_compat.h"
+#include "base/hinic_pmd_hwdev.h"
+#include "base/hinic_pmd_wq.h"
+#include "base/hinic_pmd_niccfg.h"
+#include "base/hinic_pmd_nicio.h"
+#include "hinic_pmd_ethdev.h"
+#include "hinic_pmd_rx.h"
+
+/* rxq wq operations */
+#define HINIC_GET_RQ_WQE_MASK(rxq) \
+ ((rxq)->wq->mask)
+
+#define HINIC_GET_RQ_LOCAL_CI(rxq) \
+ (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
+
+#define HINIC_GET_RQ_LOCAL_PI(rxq) \
+ (((rxq)->wq->prod_idx) & HINIC_GET_RQ_WQE_MASK(rxq))
+
+#define HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt) \
+ do { \
+ (rxq)->wq->cons_idx += (wqebb_cnt); \
+ (rxq)->wq->delta += (wqebb_cnt); \
+ } while (0)
+
+#define HINIC_UPDATE_RQ_HW_PI(rxq, pi) \
+ (*((rxq)->pi_virt_addr) = \
+ cpu_to_be16((pi) & HINIC_GET_RQ_WQE_MASK(rxq)))
+
+#define HINIC_GET_RQ_FREE_WQEBBS(rxq) ((rxq)->wq->delta - 1)
+
+/* rxq cqe done and status bit */
+#define HINIC_GET_RX_DONE_BE(status) \
+ ((status) & 0x80U)
+
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+
+#define RQ_CQE_SGE_VLAN_SHIFT 0
+#define RQ_CQE_SGE_LEN_SHIFT 16
+
+#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU
+#define RQ_CQE_SGE_LEN_MASK 0xFFFFU
+
+#define RQ_CQE_SGE_GET(val, member) \
+ (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK)
+
+#define HINIC_GET_RX_VLAN_TAG(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, VLAN)
+
+#define HINIC_GET_RX_PKT_LEN(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, LEN)
+
+#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
+#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25
+#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26
+#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27
+
+#define RQ_CQE_STATUS_BP_EN_SHIFT 30
+#define RQ_CQE_STATUS_RXDONE_SHIFT 31
+#define RQ_CQE_STATUS_FLUSH_SHIFT 28
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
+#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U
+#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U
+#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U
+#define RQ_CQE_STATUS_BP_EN_MASK 0X1U
+#define RQ_CQE_STATUS_RXDONE_MASK 0x1U
+#define RQ_CQE_STATUS_FLUSH_MASK 0x1U
+
+#define RQ_CQE_STATUS_GET(val, member) \
+ (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \
+ RQ_CQE_STATUS_##member##_MASK)
+
+#define RQ_CQE_STATUS_CLEAR(val, member) \
+ ((val) & (~(RQ_CQE_STATUS_##member##_MASK << \
+ RQ_CQE_STATUS_##member##_SHIFT)))
+
+#define HINIC_GET_RX_CSUM_ERR(status) \
+ RQ_CQE_STATUS_GET(status, CSUM_ERR)
+
+#define HINIC_GET_RX_DONE(status) \
+ RQ_CQE_STATUS_GET(status, RXDONE)
+
+#define HINIC_GET_RX_FLUSH(status) \
+ RQ_CQE_STATUS_GET(status, FLUSH)
+
+#define HINIC_GET_RX_BP_EN(status) \
+ RQ_CQE_STATUS_GET(status, BP_EN)
+
+#define HINIC_GET_RX_NUM_LRO(status) \
+ RQ_CQE_STATUS_GET(status, NUM_LRO)
+
+/* RQ_CTRL */
+#define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
+#define RQ_CTRL_COMPLETE_LEN_SHIFT 27
+#define RQ_CTRL_LEN_SHIFT 29
+
+#define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
+#define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U
+#define RQ_CTRL_COMPLETE_LEN_MASK 0x3U
+#define RQ_CTRL_LEN_MASK 0x3U
+
+#define RQ_CTRL_SET(val, member) \
+ (((val) & RQ_CTRL_##member##_MASK) << RQ_CTRL_##member##_SHIFT)
+
+#define RQ_CTRL_GET(val, member) \
+ (((val) >> RQ_CTRL_##member##_SHIFT) & RQ_CTRL_##member##_MASK)
+
+#define RQ_CTRL_CLEAR(val, member) \
+ ((val) & (~(RQ_CTRL_##member##_MASK << RQ_CTRL_##member##_SHIFT)))
+
+#define RQ_CQE_PKT_NUM_SHIFT 1
+#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19
+#define RQ_CQE_PKT_LAST_LEN_SHIFT 6
+#define RQ_CQE_SUPER_CQE_EN_SHIFT 0
+
+#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU
+#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU
+#define RQ_CQE_PKT_NUM_MASK 0x1FU
+#define RQ_CQE_SUPER_CQE_EN_MASK 0x1
+
+#define RQ_CQE_PKT_NUM_GET(val, member) \
+ (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK)
+
+#define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM)
+
+#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \
+ (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK)
+
+#define HINIC_GET_SUPER_CQE_EN(pkt_info) \
+ RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN)
+
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19
+#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U
+
+#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24
+#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \
+ RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
+ RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
+
+#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
+
+#define HINIC_GET_RSS_TYPES(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE)
+
+#define HINIC_GET_RX_PKT_TYPE(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_PKT_UMBCAST(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST)
+
+#define RQ_CQE_STATUS_CSUM_BYPASS_VAL 0x80U
+#define RQ_CQE_STATUS_CSUM_ERR_IP_MASK 0x39U
+#define RQ_CQE_STATUS_CSUM_ERR_L4_MASK 0x46U
+#define RQ_CQE_STATUS_CSUM_ERR_OTHER 0x100U
+
+#define HINIC_CSUM_ERR_BYPASSED(csum_err) \
+ ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL)
+
+#define HINIC_CSUM_ERR_IP(csum_err) \
+ ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK)
+
+#define HINIC_CSUM_ERR_L4(csum_err) \
+ ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK)
+
+#define HINIC_CSUM_ERR_OTHER(csum_err) \
+ ((csum_err) == RQ_CQE_STATUS_CSUM_ERR_OTHER)
+
+
+void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_rxq *rxq;
+ u16 q_id;
+ u16 buf_size = 0;
+
+ for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
+ rxq = nic_dev->rxqs[q_id];
+
+ if (rxq == NULL)
+ continue;
+
+ if (q_id == 0)
+ buf_size = rxq->buf_len;
+
+ buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;
+ }
+
+ nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
+}
+
+int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 rq_depth, unsigned int socket_id)
+{
+ int err;
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_qp *qp = &nic_io->qps[q_id];
+ struct hinic_rq *rq = &qp->rq;
+
+ /* in case of hardware still generate interrupt, do not use msix 0 */
+ rq->msix_entry_idx = 1;
+ rq->q_id = q_id;
+ rq->rq_depth = rq_depth;
+ nic_io->rq_depth = rq_depth;
+
+ err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
+ HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth, socket_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
+ return err;
+ }
+ rq->wq = &nic_io->rq_wq[q_id];
+
+ rq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(hwdev,
+ HINIC_PAGE_SIZE, &rq->pi_dma_addr, socket_id);
+ if (!rq->pi_virt_addr) {
+ PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
+ err = -ENOMEM;
+ goto rq_pi_alloc_err;
+ }
+
+ return HINIC_OK;
+
+rq_pi_alloc_err:
+ hinic_wq_free(hwdev, &nic_io->rq_wq[q_id]);
+
+ return err;
+}
+
+void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_qp *qp = &nic_io->qps[q_id];
+ struct hinic_rq *rq = &qp->rq;
+
+ if (qp->rq.wq == NULL)
+ return;
+
+ dma_free_coherent_volatile(hwdev, HINIC_PAGE_SIZE,
+ (volatile void *)rq->pi_virt_addr,
+ rq->pi_dma_addr);
+ hinic_wq_free(nic_io->hwdev, qp->rq.wq);
+ qp->rq.wq = NULL;
+}
+
+static void
+hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,
+ dma_addr_t cqe_dma)
+{
+ struct hinic_rq_wqe *rq_wqe = wqe;
+ struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
+ struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
+ struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
+ u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
+
+ ctrl->ctrl_fmt =
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
+ RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
+
+ hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
+
+ buf_desc->addr_high = upper_32_bits(buf_addr);
+ buf_desc->addr_low = lower_32_bits(buf_addr);
+}
+
+void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
+{
+ if (!rxq || !stats)
+ return;
+
+ memcpy(stats, &rxq->rxq_stats, sizeof(rxq->rxq_stats));
+}
+
+void hinic_rxq_stats_reset(struct hinic_rxq *rxq)
+{
+ struct hinic_rxq_stats *rxq_stats;
+
+ if (rxq == NULL)
+ return;
+
+ rxq_stats = &rxq->rxq_stats;
+ memset(rxq_stats, 0, sizeof(*rxq_stats));
+}
+
+static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq, unsigned int socket_id)
+{
+ size_t cqe_mem_size;
+
+ cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
+ rxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev->hwdev,
+ cqe_mem_size, &rxq->cqe_start_paddr, socket_id);
+ if (!rxq->cqe_start_vaddr) {
+ PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
+ return -ENOMEM;
+ }
+
+ rxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
+
+ return HINIC_OK;
+}
+
+static void hinic_rx_free_cqe(struct hinic_rxq *rxq)
+{
+ size_t cqe_mem_size;
+
+ cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
+ dma_free_coherent(rxq->nic_dev->hwdev, cqe_mem_size,
+ rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
+ rxq->cqe_start_vaddr = NULL;
+}
+
+static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
+{
+ struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+ struct hinic_rq_wqe *rq_wqe;
+ dma_addr_t buf_dma_addr, cqe_dma_addr;
+ u16 pi = 0;
+ int i;
+
+ buf_dma_addr = 0;
+ cqe_dma_addr = rxq->cqe_start_paddr;
+ for (i = 0; i < rxq->q_depth; i++) {
+ rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
+ if (!rq_wqe) {
+ PMD_DRV_LOG(ERR, "Get rq wqe failed");
+ break;
+ }
+
+ hinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr);
+ cqe_dma_addr += sizeof(struct hinic_rq_cqe);
+
+ hinic_cpu_to_be32(rq_wqe, sizeof(struct hinic_rq_wqe));
+ }
+
+ hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i);
+
+ return i;
+}
+
+/* alloc cqe and prepare rqe */
+int hinic_setup_rx_resources(struct hinic_rxq *rxq)
+{
+ u64 rx_info_sz;
+ int err, pkts;
+
+ rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
+ rxq->rx_info = rte_zmalloc_socket("rx_info", rx_info_sz,
+ RTE_CACHE_LINE_SIZE, rxq->socket_id);
+ if (!rxq->rx_info)
+ return -ENOMEM;
+
+ err = hinic_rx_alloc_cqe(rxq, rxq->socket_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
+ goto rx_cqe_err;
+ }
+
+ pkts = hinic_rx_fill_wqe(rxq);
+ if (pkts != rxq->q_depth) {
+ PMD_DRV_LOG(ERR, "Fill rx wqe failed");
+ err = -ENOMEM;
+ goto rx_fill_err;
+ }
+
+ return 0;
+
+rx_fill_err:
+ hinic_rx_free_cqe(rxq);
+
+rx_cqe_err:
+ rte_free(rxq->rx_info);
+ rxq->rx_info = NULL;
+
+ return err;
+}
+
+void hinic_free_rx_resources(struct hinic_rxq *rxq)
+{
+ if (rxq->rx_info == NULL)
+ return;
+
+ hinic_rx_free_cqe(rxq);
+ rte_free(rxq->rx_info);
+ rxq->rx_info = NULL;
+}
+
+void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
+{
+ u16 q_id;
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+
+ for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
+ if (eth_dev->data->rx_queues != NULL)
+ eth_dev->data->rx_queues[q_id] = NULL;
+
+ if (nic_dev->rxqs[q_id] == NULL)
+ continue;
+
+ hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
+ hinic_free_rx_resources(nic_dev->rxqs[q_id]);
+ kfree(nic_dev->rxqs[q_id]);
+ nic_dev->rxqs[q_id] = NULL;
+ }
+}
+
+void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)
+{
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
+ hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
+}
+
+static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,
+ struct rte_mbuf *head_mbuf,
+ u32 remain_pkt_len)
+{
+ struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+ struct rte_mbuf *cur_mbuf, *rxm = NULL;
+ struct hinic_rx_info *rx_info;
+ u16 sw_ci, rx_buf_len = rxq->buf_len;
+ u32 pkt_len;
+
+ while (remain_pkt_len > 0) {
+ sw_ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
+ rx_info = &rxq->rx_info[sw_ci];
+
+ hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
+
+ pkt_len = remain_pkt_len > rx_buf_len ?
+ rx_buf_len : remain_pkt_len;
+ remain_pkt_len -= pkt_len;
+
+ cur_mbuf = rx_info->mbuf;
+ cur_mbuf->data_len = (u16)pkt_len;
+ cur_mbuf->next = NULL;
+
+ head_mbuf->pkt_len += cur_mbuf->data_len;
+ head_mbuf->nb_segs++;
+
+ if (!rxm)
+ head_mbuf->next = cur_mbuf;
+ else
+ rxm->next = cur_mbuf;
+
+ rxm = cur_mbuf;
+ }
+}
+
+static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
+{
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ (void)hinic_rss_cfg(nic_dev->hwdev, 0,
+ nic_dev->rss_tmpl_idx, 0, prio_tc);
+}
+
+static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
+ u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+
+ if (rss_conf->rss_key == NULL)
+ memcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE);
+ else
+ memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
+
+ return hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
+}
+
+static void hinic_fill_rss_type(struct nic_rss_type *rss_type,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ u64 rss_hf = rss_conf->rss_hf;
+
+ rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
+ rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
+ rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
+ rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
+ rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
+ rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
+ rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
+ rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
+}
+
+static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)
+{
+ u8 rss_queue_count = nic_dev->num_rss;
+ int i = 0, j;
+
+ if (rss_queue_count == 0) {
+ /* delete q_id from indir tbl */
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir[i] = 0xFF; /* Invalid value in indir tbl */
+ } else {
+ while (i < HINIC_RSS_INDIR_SIZE)
+ for (j = 0; (j < rss_queue_count) &&
+ (i < HINIC_RSS_INDIR_SIZE); j++)
+ indir[i++] = nic_dev->rx_queue_list[j];
+ }
+}
+
+static int hinic_rss_init(struct hinic_nic_dev *nic_dev,
+ __rte_unused u8 *rq2iq_map,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0};
+ struct nic_rss_type rss_type = {0};
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ u8 tmpl_idx = 0xFF, num_tc = 0;
+ int err;
+
+ tmpl_idx = nic_dev->rss_tmpl_idx;
+
+ err = hinic_rss_key_init(nic_dev, rss_conf);
+ if (err)
+ return err;
+
+ if (!nic_dev->rss_indir_flag) {
+ hinic_fillout_indir_tbl(nic_dev, indir_tbl);
+ err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx,
+ indir_tbl);
+ if (err)
+ return err;
+ }
+
+ hinic_fill_rss_type(&rss_type, rss_conf);
+ err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
+ if (err)
+ return err;
+
+ err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP);
+ if (err)
+ return err;
+
+ return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
+}
+
+static void
+hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id)
+{
+ u8 rss_queue_count = nic_dev->num_rss;
+
+ RTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1));
+
+ nic_dev->rx_queue_list[rss_queue_count] = queue_id;
+ nic_dev->num_rss++;
+}
+
+/**
+ * hinic_setup_num_qps - determine num_qps from rss_tmpl_id
+ * @nic_dev: pointer to the private ethernet device
+ * Return: 0 on Success, error code otherwise.
+ **/
+static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
+{
+ int err, i;
+
+ if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
+ nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+ nic_dev->num_rss = 0;
+ if (nic_dev->num_rq > 1) {
+ /* get rss template id */
+ err = hinic_rss_template_alloc(nic_dev->hwdev,
+ &nic_dev->rss_tmpl_idx);
+ if (err) {
+ PMD_DRV_LOG(WARNING, "Alloc rss template failed");
+ return err;
+ }
+ nic_dev->flags |= ETH_MQ_RX_RSS_FLAG;
+ for (i = 0; i < nic_dev->num_rq; i++)
+ hinic_add_rq_to_rx_queue_list(nic_dev, i);
+ }
+ }
+
+ return 0;
+}
+
+static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
+{
+ if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (hinic_rss_template_free(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx))
+ PMD_DRV_LOG(WARNING, "Free rss template failed");
+
+ nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;
+ }
+}
+
+static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on)
+{
+ int ret = 0;
+
+ if (on) {
+ ret = hinic_setup_num_qps(nic_dev);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Setup num_qps failed");
+ } else {
+ hinic_destroy_num_qps(nic_dev);
+ }
+
+ return ret;
+}
+
+int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ int ret = 0;
+
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ ret = hinic_config_mq_rx_rss(nic_dev, on);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int hinic_rx_configure(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ struct rte_eth_rss_conf rss_conf =
+ dev->data->dev_conf.rx_adv_conf.rss_conf;
+ int err;
+ bool lro_en;
+ int max_lro_size;
+ int lro_wqe_num;
+ int buf_size;
+
+ if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ if (rss_conf.rss_hf == 0) {
+ rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;
+ } else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {
+ PMD_DRV_LOG(ERR, "Do not support rss offload all");
+ goto rss_config_err;
+ }
+
+ err = hinic_rss_init(nic_dev, NULL, &rss_conf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init rss failed");
+ goto rss_config_err;
+ }
+ }
+
+ /* Enable both L3/L4 rx checksum offload */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+
+ err = hinic_set_rx_csum_offload(nic_dev->hwdev,
+ HINIC_RX_CSUM_OFFLOAD_EN);
+ if (err)
+ goto rx_csum_ofl_err;
+
+ /* config lro */
+ lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
+ true : false;
+ max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size;
+ buf_size = nic_dev->hwdev->nic_io->rq_buf_size;
+ lro_wqe_num = max_lro_size / buf_size ? (max_lro_size / buf_size) : 1;
+
+ err = hinic_set_rx_lro(nic_dev->hwdev, lro_en, lro_en, lro_wqe_num);
+ if (err) {
+ PMD_DRV_LOG(ERR, "%s %s lro failed, err: %d, max_lro_size: %d",
+ dev->data->name, lro_en ? "Enable" : "Disable",
+ err, max_lro_size);
+ goto set_rx_lro_err;
+ }
+
+ return 0;
+
+set_rx_lro_err:
+rx_csum_ofl_err:
+rss_config_err:
+
+ hinic_destroy_num_qps(nic_dev);
+
+ return HINIC_ERROR;
+}
+
+static void hinic_rx_remove_lro(struct hinic_nic_dev *nic_dev)
+{
+ int err;
+
+ err = hinic_set_rx_lro(nic_dev->hwdev, false, false, 0);
+ if (err)
+ PMD_DRV_LOG(ERR, "%s disable LRO failed",
+ nic_dev->proc_dev_name);
+}
+
+void hinic_rx_remove_configure(struct rte_eth_dev *dev)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+
+ if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
+ hinic_rss_deinit(nic_dev);
+ hinic_destroy_num_qps(nic_dev);
+ }
+
+ hinic_rx_remove_lro(nic_dev);
+}
+
+void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq)
+{
+ struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+ struct hinic_rx_info *rx_info;
+ int free_wqebbs =
+ hinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1;
+ volatile struct hinic_rq_cqe *rx_cqe;
+ u16 ci;
+
+ while (free_wqebbs++ < rxq->q_depth) {
+ ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);
+
+ rx_cqe = &rxq->rx_cqe[ci];
+
+ /* clear done bit */
+ rx_cqe->status = 0;
+
+ rx_info = &rxq->rx_info[ci];
+ rte_pktmbuf_free(rx_info->mbuf);
+ rx_info->mbuf = NULL;
+
+ hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);
+ }
+}
+
+static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32,
+ volatile void *src_be32)
+{
+#if defined(__X86_64_SSE__)
+ volatile __m128i *wqe_be = (volatile __m128i *)src_be32;
+ __m128i *wqe_le = (__m128i *)dst_le32;
+ __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
+ 11, 4, 5, 6, 7, 0, 1, 2, 3);
+
+ /* l2nic just use first 128 bits */
+ wqe_le[0] = _mm_shuffle_epi8(wqe_be[0], shuf_mask);
+#elif defined(__ARM64_NEON__)
+ volatile uint8x16_t *wqe_be = (volatile uint8x16_t *)src_be32;
+ uint8x16_t *wqe_le = (uint8x16_t *)dst_le32;
+ const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
+ 9, 8, 15, 14, 13, 12};
+
+ /* l2nic just use first 128 bits */
+ wqe_le[0] = vqtbl1q_u8(wqe_be[0], shuf_mask);
+#else
+ u32 i;
+ volatile u32 *wqe_be = (volatile u32 *)src_be32;
+ u32 *wqe_le = (u32 *)dst_le32;
+
+#define HINIC_L2NIC_RQ_CQE_USED 4 /* 4Bytes unit */
+
+ for (i = 0; i < HINIC_L2NIC_RQ_CQE_USED; i++) {
+ *wqe_le = rte_be_to_cpu_32(*wqe_be);
+ wqe_be++;
+ wqe_le++;
+ }
+#endif
+}
+
+static inline uint64_t hinic_rx_rss_hash(uint32_t offload_type,
+ uint32_t cqe_hass_val,
+ uint32_t *rss_hash)
+{
+ uint32_t rss_type;
+
+ rss_type = HINIC_GET_RSS_TYPES(offload_type);
+ if (likely(rss_type != 0)) {
+ *rss_hash = cqe_hass_val;
+ return PKT_RX_RSS_HASH;
+ }
+
+ return 0;
+}
+
+static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)
+{
+ uint32_t checksum_err;
+ uint64_t flags;
+ struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+
+ if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
+ return PKT_RX_IP_CKSUM_UNKNOWN;
+
+ /* most case checksum is ok */
+ checksum_err = HINIC_GET_RX_CSUM_ERR(status);
+ if (likely(checksum_err == 0))
+ return (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+
+ /* If BYPASS bit set, all other status indications should be ignored */
+ if (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err)))
+ return PKT_RX_IP_CKSUM_UNKNOWN;
+
+ flags = 0;
+
+ /* IP checksum error */
+ if (HINIC_CSUM_ERR_IP(checksum_err))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ /* L4 checksum error */
+ if (HINIC_CSUM_ERR_L4(checksum_err))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ if (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err)))
+ flags = PKT_RX_L4_CKSUM_NONE;
+
+ rxq->rxq_stats.errors++;
+
+ return flags;
+}
+
+static inline uint64_t hinic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,
+ uint16_t *vlan_tci)
+{
+ uint16_t vlan_tag;
+
+ vlan_tag = HINIC_GET_RX_VLAN_TAG(vlan_len);
+ if (!HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) || 0 == vlan_tag) {
+ *vlan_tci = 0;
+ return 0;
+ }
+
+ *vlan_tci = vlan_tag;
+
+ return PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+}
+
+static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq,
+ struct rte_mbuf **mbufs,
+ u32 exp_mbuf_cnt)
+{
+ int rc;
+ u32 avail_cnt;
+
+ rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, exp_mbuf_cnt);
+ if (likely(rc == HINIC_OK)) {
+ avail_cnt = exp_mbuf_cnt;
+ } else {
+ avail_cnt = 0;
+ rxq->rxq_stats.rx_nombuf += exp_mbuf_cnt;
+ }
+
+ return avail_cnt;
+}
+
+static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq,
+ dma_addr_t *dma_addr)
+{
+ struct rte_mbuf *mbuf = NULL;
+ int rc;
+
+ rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, &mbuf, 1);
+ if (unlikely(rc != HINIC_OK))
+ return NULL;
+
+ *dma_addr = rte_mbuf_data_iova_default(mbuf);
+
+ return mbuf;
+}
+
+static inline void hinic_rearm_rxq_mbuf(struct hinic_rxq *rxq)
+{
+ u16 pi;
+ u32 i, free_wqebbs, rearm_wqebbs, exp_wqebbs;
+ dma_addr_t dma_addr;
+ struct hinic_rq_wqe *rq_wqe;
+ struct rte_mbuf **rearm_mbufs;
+
+ /* check free wqebb fo rearm */
+ free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
+ if (unlikely(free_wqebbs < rxq->rx_free_thresh))
+ return;
+
+ /* get rearm mbuf array */
+ pi = HINIC_GET_RQ_LOCAL_PI(rxq);
+ rearm_mbufs = (struct rte_mbuf **)(&rxq->rx_info[pi]);
+
+ /* check rxq free wqebbs turn around */
+ exp_wqebbs = rxq->q_depth - pi;
+ if (free_wqebbs < exp_wqebbs)
+ exp_wqebbs = free_wqebbs;
+
+ /* alloc mbuf in bulk */
+ rearm_wqebbs = hinic_rx_alloc_mbuf_bulk(rxq, rearm_mbufs, exp_wqebbs);
+ if (unlikely(rearm_wqebbs == 0))
+ return;
+
+ /* rearm rx mbuf */
+ rq_wqe = WQ_WQE_ADDR(rxq->wq, (u32)pi);
+ for (i = 0; i < rearm_wqebbs; i++) {
+ dma_addr = rte_mbuf_data_iova_default(rearm_mbufs[i]);
+ rq_wqe->buf_desc.addr_high =
+ cpu_to_be32(upper_32_bits(dma_addr));
+ rq_wqe->buf_desc.addr_low =
+ cpu_to_be32(lower_32_bits(dma_addr));
+ rq_wqe++;
+ }
+ rxq->wq->prod_idx += rearm_wqebbs;
+ rxq->wq->delta -= rearm_wqebbs;
+
+ /* update rq hw_pi */
+ rte_wmb();
+ HINIC_UPDATE_RQ_HW_PI(rxq, pi + rearm_wqebbs);
+}
+
+void hinic_rx_alloc_pkts(struct hinic_rxq *rxq)
+{
+ struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+ struct hinic_rq_wqe *rq_wqe;
+ struct hinic_rx_info *rx_info;
+ struct rte_mbuf *mb;
+ dma_addr_t dma_addr;
+ u16 pi = 0;
+ int i, free_wqebbs;
+
+ free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);
+ for (i = 0; i < free_wqebbs; i++) {
+ mb = hinic_rx_alloc_mbuf(rxq, &dma_addr);
+ if (unlikely(!mb)) {
+ rxq->rxq_stats.rx_nombuf++;
+ break;
+ }
+
+ rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
+ if (unlikely(!rq_wqe)) {
+ rte_pktmbuf_free(mb);
+ break;
+ }
+
+ /* fill buffer address only */
+ rq_wqe->buf_desc.addr_high =
+ cpu_to_be32(upper_32_bits(dma_addr));
+ rq_wqe->buf_desc.addr_low =
+ cpu_to_be32(lower_32_bits(dma_addr));
+
+ rx_info = &rxq->rx_info[pi];
+ rx_info->mbuf = mb;
+ }
+
+ if (likely(i > 0)) {
+ rte_wmb();
+ HINIC_UPDATE_RQ_HW_PI(rxq, pi + 1);
+ }
+}
+
+u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
+{
+ struct rte_mbuf *rxm;
+ struct hinic_rxq *rxq = rx_queue;
+ struct hinic_rx_info *rx_info;
+ volatile struct hinic_rq_cqe *rx_cqe;
+ u16 rx_buf_len, pkts = 0;
+ u16 sw_ci, ci_mask, wqebb_cnt = 0;
+ u32 pkt_len, status, vlan_len, lro_num;
+ u64 rx_bytes = 0;
+ struct hinic_rq_cqe cqe;
+ u32 offload_type, rss_hash;
+
+ rx_buf_len = rxq->buf_len;
+
+ /* 1. get polling start ci */
+ ci_mask = HINIC_GET_RQ_WQE_MASK(rxq);
+ sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);
+
+ while (pkts < nb_pkts) {
+ /* 2. current ci is done */
+ rx_cqe = &rxq->rx_cqe[sw_ci];
+ status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ if (!HINIC_GET_RX_DONE_BE(status))
+ break;
+
+ /* convert cqe and get packet length */
+ hinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe);
+ vlan_len = cqe.vlan_len;
+
+ rx_info = &rxq->rx_info[sw_ci];
+ rxm = rx_info->mbuf;
+
+ /* 3. next ci point and prefetch */
+ sw_ci++;
+ sw_ci &= ci_mask;
+
+ /* prefetch next mbuf first 64B */
+ rte_prefetch0(rxq->rx_info[sw_ci].mbuf);
+
+ /* 4. jumbo frame process */
+ pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);
+ if (likely(pkt_len <= rx_buf_len)) {
+ rxm->data_len = pkt_len;
+ rxm->pkt_len = pkt_len;
+ wqebb_cnt++;
+ } else {
+ rxm->data_len = rx_buf_len;
+ rxm->pkt_len = rx_buf_len;
+
+ /* if receive jumbo, updating ci will be done by
+ * hinic_recv_jumbo_pkt function.
+ */
+ HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1);
+ wqebb_cnt = 0;
+ hinic_recv_jumbo_pkt(rxq, rxm, pkt_len - rx_buf_len);
+ sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);
+ }
+
+ /* 5. vlan/checksum/rss/pkt_type/gro offload */
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->port = rxq->port_id;
+ offload_type = cqe.offload_type;
+
+ /* vlan offload */
+ rxm->ol_flags |= hinic_rx_vlan(offload_type, vlan_len,
+ &rxm->vlan_tci);
+
+ /* checksum offload */
+ rxm->ol_flags |= hinic_rx_csum(cqe.status, rxq);
+
+ /* rss hash offload */
+ rss_hash = cqe.rss_hash;
+ rxm->ol_flags |= hinic_rx_rss_hash(offload_type, rss_hash,
+ &rxm->hash.rss);
+
+ /* lro offload */
+ lro_num = HINIC_GET_RX_NUM_LRO(cqe.status);
+ if (unlikely(lro_num != 0)) {
+ rxm->ol_flags |= PKT_RX_LRO;
+ rxm->tso_segsz = pkt_len / lro_num;
+ }
+
+ /* 6. clear done bit */
+ rx_cqe->status = 0;
+
+ rx_bytes += pkt_len;
+ rx_pkts[pkts++] = rxm;
+ }
+
+ if (pkts) {
+ /* 7. update ci */
+ HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt);
+
+ /* do packet stats */
+ rxq->rxq_stats.packets += pkts;
+ rxq->rxq_stats.bytes += rx_bytes;
+ }
+ rxq->rxq_stats.burst_pkts = pkts;
+
+ /* 8. rearm mbuf to rxq */
+ hinic_rearm_rxq_mbuf(rxq);
+
+ return pkts;
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h
new file mode 100644
index 000000000..49fa56517
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_RX_H_
+#define _HINIC_PMD_RX_H_
+
+#define HINIC_DEFAULT_RX_FREE_THRESH 32
+
+#define HINIC_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_FRAG_IPV4 |\
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+enum rq_completion_fmt {
+ RQ_COMPLETE_SGE = 1
+};
+
+struct hinic_rq_ctrl {
+ u32 ctrl_fmt;
+};
+
+struct hinic_rq_cqe {
+ u32 status;
+ u32 vlan_len;
+ u32 offload_type;
+ u32 rss_hash;
+
+ u32 rsvd[4];
+} __rte_cache_aligned;
+
+struct hinic_rq_cqe_sect {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_rq_bufdesc {
+ u32 addr_high;
+ u32 addr_low;
+};
+
+struct hinic_rq_wqe {
+ struct hinic_rq_ctrl ctrl;
+ u32 rsvd;
+ struct hinic_rq_cqe_sect cqe_sect;
+ struct hinic_rq_bufdesc buf_desc;
+};
+
+struct hinic_rxq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 rx_nombuf;
+ u64 errors;
+ u64 rx_discards;
+ u64 burst_pkts;
+};
+
+/* Attention, Do not add any member in hinic_rx_info
+ * as rxq bulk rearm mode will write mbuf in rx_info
+ */
+struct hinic_rx_info {
+ struct rte_mbuf *mbuf;
+};
+
+struct hinic_rxq {
+ struct hinic_wq *wq;
+ volatile u16 *pi_virt_addr;
+
+ u16 port_id;
+ u16 q_id;
+ u16 q_depth;
+ u16 buf_len;
+
+ u16 rx_free_thresh;
+ u16 rxinfo_align_end;
+
+ u32 socket_id;
+
+ unsigned long status;
+ struct hinic_rxq_stats rxq_stats;
+
+ struct hinic_nic_dev *nic_dev;
+
+ struct hinic_rx_info *rx_info;
+ volatile struct hinic_rq_cqe *rx_cqe;
+
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ struct rte_mempool *mb_pool;
+};
+
+int hinic_setup_rx_resources(struct hinic_rxq *rxq);
+
+void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev);
+
+void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev);
+
+void hinic_free_rx_resources(struct hinic_rxq *rxq);
+
+u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts);
+
+void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq);
+
+void hinic_rx_alloc_pkts(struct hinic_rxq *rxq);
+
+void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
+
+void hinic_rxq_stats_reset(struct hinic_rxq *rxq);
+
+int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on);
+
+int hinic_rx_configure(struct rte_eth_dev *dev);
+
+void hinic_rx_remove_configure(struct rte_eth_dev *dev);
+
+void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev);
+
+int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 rq_depth, unsigned int socket_id);
+
+void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id);
+
+#endif /* _HINIC_PMD_RX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c
new file mode 100644
index 000000000..4d999678f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c
@@ -0,0 +1,1334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <rte_mbuf.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#ifdef __ARM64_NEON__
+#include <arm_neon.h>
+#endif
+
+#include "base/hinic_compat.h"
+#include "base/hinic_pmd_hwdev.h"
+#include "base/hinic_pmd_hwif.h"
+#include "base/hinic_pmd_wq.h"
+#include "base/hinic_pmd_nicio.h"
+#include "base/hinic_pmd_niccfg.h"
+#include "hinic_pmd_ethdev.h"
+#include "hinic_pmd_tx.h"
+
+/* packet header and tx offload info */
+#define ETHER_LEN_NO_VLAN 14
+#define ETHER_LEN_WITH_VLAN 18
+#define HEADER_LEN_OFFSET 2
+#define VXLANLEN 8
+#define MAX_PLD_OFFSET 221
+#define MAX_SINGLE_SGE_SIZE 65536
+#define TSO_ENABLE 1
+#define TX_MSS_DEFAULT 0x3E00
+#define TX_MSS_MIN 0x50
+
+#define HINIC_NONTSO_PKT_MAX_SGE 17 /* non-tso max sge 17 */
+#define HINIC_NONTSO_SEG_NUM_INVALID(num) \
+ ((num) > HINIC_NONTSO_PKT_MAX_SGE)
+
+#define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */
+#define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE)
+
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET 1
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET 0
+
+/* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
+#define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4))
+
+#define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask)
+
+/* SQ_CTRL */
+#define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define SQ_CTRL_TASKSECT_LEN_SHIFT 16
+#define SQ_CTRL_DATA_FORMAT_SHIFT 22
+#define SQ_CTRL_LEN_SHIFT 29
+#define SQ_CTRL_OWNER_SHIFT 31
+
+#define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
+#define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU
+#define SQ_CTRL_DATA_FORMAT_MASK 0x1U
+#define SQ_CTRL_LEN_MASK 0x3U
+#define SQ_CTRL_OWNER_MASK 0x1U
+
+#define SQ_CTRL_SET(val, member) \
+ (((val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT)
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
+#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
+#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
+#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
+#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
+#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
+#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
+#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU
+#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \
+ SQ_CTRL_QUEUE_INFO_##member##_SHIFT)
+
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \
+ SQ_CTRL_QUEUE_INFO_##member##_MASK)
+
+#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \
+ ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \
+ SQ_CTRL_QUEUE_INFO_##member##_SHIFT)))
+
+#define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
+#define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8
+#define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
+#define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
+#define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
+#define SQ_TASK_INFO0_UFO_AVD_SHIFT 14
+#define SQ_TASK_INFO0_TSO_UFO_SHIFT 15
+#define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
+
+#define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU
+#define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U
+#define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U
+#define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U
+#define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U
+#define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U
+#define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U
+#define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU
+
+#define SQ_TASK_INFO0_SET(val, member) \
+ (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \
+ SQ_TASK_INFO0_##member##_SHIFT)
+
+#define SQ_TASK_INFO1_MD_TYPE_SHIFT 8
+#define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
+#define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
+
+#define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU
+#define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU
+#define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU
+
+#define SQ_TASK_INFO1_SET(val, member) \
+ (((val) & SQ_TASK_INFO1_##member##_MASK) << \
+ SQ_TASK_INFO1_##member##_SHIFT)
+
+#define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
+#define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
+#define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
+#define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
+
+#define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU
+#define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU
+#define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U
+#define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U
+
+#define SQ_TASK_INFO2_SET(val, member) \
+ (((val) & SQ_TASK_INFO2_##member##_MASK) << \
+ SQ_TASK_INFO2_##member##_SHIFT)
+
+#define SQ_TASK_INFO4_L2TYPE_SHIFT 31
+
+#define SQ_TASK_INFO4_L2TYPE_MASK 0x1U
+
+#define SQ_TASK_INFO4_SET(val, member) \
+ (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \
+ SQ_TASK_INFO4_##member##_SHIFT)
+
+/* SQ_DB */
+#define SQ_DB_OFF 0x00000800
+#define SQ_DB_INFO_HI_PI_SHIFT 0
+#define SQ_DB_INFO_QID_SHIFT 8
+#define SQ_DB_INFO_CFLAG_SHIFT 23
+#define SQ_DB_INFO_COS_SHIFT 24
+#define SQ_DB_INFO_TYPE_SHIFT 27
+
+#define SQ_DB_INFO_HI_PI_MASK 0xFFU
+#define SQ_DB_INFO_QID_MASK 0x3FFU
+#define SQ_DB_INFO_CFLAG_MASK 0x1U
+#define SQ_DB_INFO_COS_MASK 0x7U
+#define SQ_DB_INFO_TYPE_MASK 0x1FU
+#define SQ_DB_INFO_SET(val, member) \
+ (((u32)(val) & SQ_DB_INFO_##member##_MASK) << \
+ SQ_DB_INFO_##member##_SHIFT)
+
+#define SQ_DB 1
+#define SQ_CFLAG_DP 0 /* CFLAG_DATA_PATH */
+
+#define SQ_DB_PI_LOW_MASK 0xFF
+#define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK)
+#define SQ_DB_PI_HI_SHIFT 8
+#define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT)
+#define SQ_DB_ADDR(sq, pi) \
+ ((u64 *)((u8 __iomem *)((sq)->db_addr) + SQ_DB_OFF) + SQ_DB_PI_LOW(pi))
+
+/* txq wq operations */
+#define HINIC_GET_SQ_WQE_MASK(txq) ((txq)->wq->mask)
+
+#define HINIC_GET_SQ_HW_CI(txq) \
+ ((be16_to_cpu(*(txq)->cons_idx_addr)) & HINIC_GET_SQ_WQE_MASK(txq))
+
+#define HINIC_GET_SQ_LOCAL_CI(txq) \
+ (((txq)->wq->cons_idx) & HINIC_GET_SQ_WQE_MASK(txq))
+
+#define HINIC_UPDATE_SQ_LOCAL_CI(txq, wqebb_cnt) \
+ do { \
+ (txq)->wq->cons_idx += wqebb_cnt; \
+ (txq)->wq->delta += wqebb_cnt; \
+ } while (0)
+
+#define HINIC_GET_SQ_FREE_WQEBBS(txq) ((txq)->wq->delta - 1)
+
+#define HINIC_IS_SQ_EMPTY(txq) (((txq)->wq->delta) == ((txq)->q_depth))
+
+#define BUF_DESC_SIZE_SHIFT 4
+
+#define HINIC_SQ_WQE_SIZE(num_sge) \
+ (sizeof(struct hinic_sq_ctrl) + sizeof(struct hinic_sq_task) + \
+ (unsigned int)((num_sge) << BUF_DESC_SIZE_SHIFT))
+
+#define HINIC_SQ_WQEBB_CNT(num_sge) \
+ (int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \
+ HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT)
+
+
+static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb)
+{
+#if defined(__X86_64_SSE__)
+ int i;
+ __m128i *wqe_line = (__m128i *)data;
+ __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
+ 11, 4, 5, 6, 7, 0, 1, 2, 3);
+
+ for (i = 0; i < nr_wqebb; i++) {
+ /* convert 64B wqebb using 4 SSE instructions */
+ wqe_line[0] = _mm_shuffle_epi8(wqe_line[0], shuf_mask);
+ wqe_line[1] = _mm_shuffle_epi8(wqe_line[1], shuf_mask);
+ wqe_line[2] = _mm_shuffle_epi8(wqe_line[2], shuf_mask);
+ wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask);
+ wqe_line += 4;
+ }
+#elif defined(__ARM64_NEON__)
+ int i;
+ uint8x16_t *wqe_line = (uint8x16_t *)data;
+ const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
+ 9, 8, 15, 14, 13, 12};
+
+ for (i = 0; i < nr_wqebb; i++) {
+ wqe_line[0] = vqtbl1q_u8(wqe_line[0], shuf_mask);
+ wqe_line[1] = vqtbl1q_u8(wqe_line[1], shuf_mask);
+ wqe_line[2] = vqtbl1q_u8(wqe_line[2], shuf_mask);
+ wqe_line[3] = vqtbl1q_u8(wqe_line[3], shuf_mask);
+ wqe_line += 4;
+ }
+#else
+ hinic_cpu_to_be32(data, nr_wqebb * HINIC_SQ_WQEBB_SIZE);
+#endif
+}
+
+static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge)
+{
+#if defined(__X86_64_SSE__)
+ int i;
+ __m128i *sge_line = (__m128i *)data;
+ __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
+ 11, 4, 5, 6, 7, 0, 1, 2, 3);
+
+ for (i = 0; i < nr_sge; i++) {
+ /* convert 16B sge using 1 SSE instructions */
+ *sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask);
+ sge_line++;
+ }
+#elif defined(__ARM64_NEON__)
+ int i;
+ uint8x16_t *sge_line = (uint8x16_t *)data;
+ const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
+ 9, 8, 15, 14, 13, 12};
+
+ for (i = 0; i < nr_sge; i++) {
+ *sge_line = vqtbl1q_u8(*sge_line, shuf_mask);
+ sge_line++;
+ }
+#else
+ hinic_cpu_to_be32(data, nr_sge * sizeof(struct hinic_sq_bufdesc));
+#endif
+}
+
+void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
+{
+ if (!txq || !stats) {
+ PMD_DRV_LOG(ERR, "Txq or stats is NULL");
+ return;
+ }
+
+ memcpy(stats, &txq->txq_stats, sizeof(txq->txq_stats));
+}
+
+void hinic_txq_stats_reset(struct hinic_txq *txq)
+{
+ struct hinic_txq_stats *txq_stats;
+
+ if (txq == NULL)
+ return;
+
+ txq_stats = &txq->txq_stats;
+ memset(txq_stats, 0, sizeof(*txq_stats));
+}
+
+static inline struct rte_mbuf *hinic_copy_tx_mbuf(struct hinic_nic_dev *nic_dev,
+ struct rte_mbuf *mbuf,
+ u16 sge_cnt)
+{
+ struct rte_mbuf *dst_mbuf;
+ u32 offset = 0;
+ u16 i;
+
+ if (unlikely(!nic_dev->cpy_mpool))
+ return NULL;
+
+ dst_mbuf = rte_pktmbuf_alloc(nic_dev->cpy_mpool);
+ if (unlikely(!dst_mbuf))
+ return NULL;
+
+ dst_mbuf->data_off = 0;
+ for (i = 0; i < sge_cnt; i++) {
+ rte_memcpy((char *)dst_mbuf->buf_addr + offset,
+ (char *)mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len);
+ dst_mbuf->data_len += mbuf->data_len;
+ offset += mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ dst_mbuf->pkt_len = dst_mbuf->data_len;
+
+ return dst_mbuf;
+}
+
+static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq,
+ struct rte_mbuf *mbuf,
+ struct hinic_sq_bufdesc *sges,
+ struct hinic_wqe_info *sqe_info)
+{
+ dma_addr_t dma_addr;
+ u16 i, around_sges;
+ u16 nb_segs = sqe_info->sge_cnt - sqe_info->cpy_mbuf_cnt;
+ u16 real_nb_segs = mbuf->nb_segs;
+ struct hinic_sq_bufdesc *sge_idx = sges;
+
+ if (unlikely(sqe_info->around)) {
+ /* parts of wqe is in sq bottom while parts
+ * of wqe is in sq head
+ */
+ i = 0;
+ for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr;
+ sge_idx++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
+ dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
+ hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
+ mbuf->data_len);
+ mbuf = mbuf->next;
+ i++;
+ }
+
+ around_sges = nb_segs - i;
+ sge_idx = (struct hinic_sq_bufdesc *)
+ ((void *)txq->sq_head_addr);
+ for (; i < nb_segs; i++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
+ dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
+ hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
+ mbuf->data_len);
+ mbuf = mbuf->next;
+ sge_idx++;
+ }
+
+ /* covert sges at head to big endian */
+ hinic_sge_cpu_to_be32((void *)txq->sq_head_addr, around_sges);
+ } else {
+ /* wqe is in continuous space */
+ for (i = 0; i < nb_segs; i++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
+ dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
+ hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
+ mbuf->data_len);
+ mbuf = mbuf->next;
+ sge_idx++;
+ }
+ }
+
+ /* for now: support non-tso over 17 sge, copy the last 2 mbuf */
+ if (unlikely(sqe_info->cpy_mbuf_cnt != 0)) {
+ /* copy invalid mbuf segs to a valid buffer, lost performance */
+ txq->txq_stats.cpy_pkts += 1;
+ mbuf = hinic_copy_tx_mbuf(txq->nic_dev, mbuf,
+ real_nb_segs - nb_segs);
+ if (unlikely(!mbuf))
+ return false;
+
+ txq->tx_info[sqe_info->pi].cpy_mbuf = mbuf;
+
+ /* deal with the last mbuf */
+ dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
+ hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
+ mbuf->data_len);
+ if (unlikely(sqe_info->around))
+ hinic_sge_cpu_to_be32((void *)sge_idx, 1);
+ }
+
+ return true;
+}
+
+static inline void hinic_fill_sq_wqe_header(struct hinic_sq_ctrl *ctrl,
+ u32 queue_info, int nr_descs,
+ u8 owner)
+{
+ u32 ctrl_size, task_size, bufdesc_size;
+
+ ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
+ task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
+ bufdesc_size = HINIC_BUF_DESC_SIZE(nr_descs);
+
+ ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
+ SQ_CTRL_SET(task_size, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(ctrl_size, LEN) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ ctrl->queue_info = queue_info;
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC);
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) {
+ ctrl->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS);
+ } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) {
+ /* mss should not be less than 80 */
+ ctrl->queue_info =
+ SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info, MSS);
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS);
+ }
+}
+
+static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info
+ *poff_info,
+ struct hinic_wqe_info *sqe_info)
+{
+ u32 total_len, limit_len, checked_len, left_len, adjust_mss;
+ u32 i, first_mss_sges, left_sges;
+ struct rte_mbuf *mbuf_head, *mbuf_pre;
+
+ left_sges = mbuf->nb_segs;
+ mbuf_head = mbuf;
+
+ /* tso sge number validation */
+ if (unlikely(left_sges >= HINIC_NONTSO_PKT_MAX_SGE)) {
+ checked_len = 0;
+ adjust_mss = mbuf->tso_segsz >= TX_MSS_MIN ?
+ mbuf->tso_segsz : TX_MSS_MIN;
+ limit_len = adjust_mss + poff_info->payload_offset;
+ first_mss_sges = HINIC_NONTSO_PKT_MAX_SGE;
+
+ /* each continues 17 mbufs segmust do one check */
+ while (left_sges >= HINIC_NONTSO_PKT_MAX_SGE) {
+ /* total len of first 16 mbufs must equal
+ * or more than limit_len
+ */
+ total_len = 0;
+ for (i = 0; i < first_mss_sges; i++) {
+ total_len += mbuf->data_len;
+ mbuf_pre = mbuf;
+ mbuf = mbuf->next;
+ if (total_len >= limit_len) {
+ limit_len = adjust_mss;
+ break;
+ }
+ }
+
+ checked_len += total_len;
+
+ /* try to copy if not valid */
+ if (unlikely(first_mss_sges == i)) {
+ left_sges -= first_mss_sges;
+ checked_len -= mbuf_pre->data_len;
+
+ left_len = mbuf_head->pkt_len - checked_len;
+ if (left_len > HINIC_COPY_MBUF_SIZE)
+ return false;
+
+ sqe_info->sge_cnt = mbuf_head->nb_segs -
+ left_sges;
+ sqe_info->cpy_mbuf_cnt = 1;
+
+ return true;
+ }
+ first_mss_sges = (HINIC_NONTSO_PKT_MAX_SGE - 1);
+
+ /* continue next 16 mbufs */
+ left_sges -= (i + 1);
+ } /* end of while */
+ }
+
+ sqe_info->sge_cnt = mbuf_head->nb_segs;
+ return true;
+}
+
+static inline void
+hinic_set_l4_csum_info(struct hinic_sq_task *task,
+ u32 *queue_info, struct hinic_tx_offload_info *poff_info)
+{
+ u32 tcp_udp_cs, sctp = 0;
+ u16 l2hdr_len;
+
+ if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE))
+ sctp = 1;
+
+ tcp_udp_cs = poff_info->inner_l4_tcp_udp;
+
+ if (poff_info->tunnel_type == TUNNEL_UDP_CSUM ||
+ poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
+ l2hdr_len = poff_info->outer_l2_len;
+
+ task->pkt_info2 |=
+ SQ_TASK_INFO2_SET(poff_info->outer_l3_type, OUTER_L3TYPE) |
+ SQ_TASK_INFO2_SET(poff_info->outer_l3_len, OUTER_L3LEN);
+ task->pkt_info2 |=
+ SQ_TASK_INFO2_SET(poff_info->tunnel_type, TUNNEL_L4TYPE) |
+ SQ_TASK_INFO2_SET(poff_info->tunnel_length, TUNNEL_L4LEN);
+ } else {
+ l2hdr_len = poff_info->inner_l2_len;
+ }
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN);
+ task->pkt_info1 |=
+ SQ_TASK_INFO1_SET(poff_info->inner_l3_len, INNER_L3LEN);
+ task->pkt_info0 |=
+ SQ_TASK_INFO0_SET(poff_info->inner_l3_type, INNER_L3TYPE);
+ task->pkt_info1 |=
+ SQ_TASK_INFO1_SET(poff_info->inner_l4_len, INNER_L4LEN);
+ task->pkt_info0 |=
+ SQ_TASK_INFO0_SET(poff_info->inner_l4_type, L4OFFLOAD);
+ *queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(poff_info->payload_offset, PLDOFF) |
+ SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) |
+ SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP);
+}
+
+static inline void
+hinic_set_tso_info(struct hinic_sq_task *task,
+ u32 *queue_info, struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *poff_info)
+{
+ hinic_set_l4_csum_info(task, queue_info, poff_info);
+
+ /* wqe for tso */
+ task->pkt_info0 |=
+ SQ_TASK_INFO0_SET(poff_info->inner_l3_type, INNER_L3TYPE);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(TSO_ENABLE, TSO_UFO);
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(TSO_ENABLE, TSO);
+ /* qsf was initialized in prepare_sq_wqe */
+ *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mbuf->tso_segsz, MSS);
+}
+
+static inline void
+hinic_set_vlan_tx_offload(struct hinic_sq_task *task,
+ u32 *queue_info, u16 vlan_tag, u16 vlan_pri)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI);
+}
+
+static inline void
+hinic_fill_tx_offload_info(struct rte_mbuf *mbuf,
+ struct hinic_sq_task *task, u32 *queue_info,
+ struct hinic_tx_offload_info *tx_off_info)
+{
+ u16 vlan_tag;
+ uint64_t ol_flags = mbuf->ol_flags;
+
+ /* clear DW0~2 of task section for offload */
+ task->pkt_info0 = 0;
+ task->pkt_info1 = 0;
+ task->pkt_info2 = 0;
+
+ /* Base VLAN */
+ if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) {
+ vlan_tag = mbuf->vlan_tci;
+ hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
+ vlan_tag >> VLAN_PRIO_SHIFT);
+ }
+
+ /* non checksum or tso */
+ if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)))
+ return;
+
+ if ((ol_flags & PKT_TX_TCP_SEG))
+ /* set tso info for task and qsf */
+ hinic_set_tso_info(task, queue_info, mbuf, tx_off_info);
+ else /* just support l4 checksum offload */
+ hinic_set_l4_csum_info(task, queue_info, tx_off_info);
+}
+
+static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
+{
+ struct hinic_tx_info *tx_info;
+ struct rte_mbuf *mbuf, *m, *mbuf_free[HINIC_MAX_TX_FREE_BULK];
+ int i, nb_free = 0;
+ u16 hw_ci, sw_ci, sq_mask;
+ int wqebb_cnt = 0;
+
+ hw_ci = HINIC_GET_SQ_HW_CI(txq);
+ sw_ci = HINIC_GET_SQ_LOCAL_CI(txq);
+ sq_mask = HINIC_GET_SQ_WQE_MASK(txq);
+
+ for (i = 0; i < txq->tx_free_thresh; ++i) {
+ tx_info = &txq->tx_info[sw_ci];
+ if (hw_ci == sw_ci ||
+ (((hw_ci - sw_ci) & sq_mask) < tx_info->wqebb_cnt))
+ break;
+
+ sw_ci = (sw_ci + tx_info->wqebb_cnt) & sq_mask;
+
+ if (unlikely(tx_info->cpy_mbuf != NULL)) {
+ rte_pktmbuf_free(tx_info->cpy_mbuf);
+ tx_info->cpy_mbuf = NULL;
+ }
+
+ wqebb_cnt += tx_info->wqebb_cnt;
+ mbuf = tx_info->mbuf;
+
+ if (likely(mbuf->nb_segs == 1)) {
+ m = rte_pktmbuf_prefree_seg(mbuf);
+ tx_info->mbuf = NULL;
+
+ if (unlikely(m == NULL))
+ continue;
+
+ mbuf_free[nb_free++] = m;
+ if (unlikely(m->pool != mbuf_free[0]->pool ||
+ nb_free >= HINIC_MAX_TX_FREE_BULK)) {
+ rte_mempool_put_bulk(mbuf_free[0]->pool,
+ (void **)mbuf_free, (nb_free - 1));
+ nb_free = 0;
+ mbuf_free[nb_free++] = m;
+ }
+ } else {
+ rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
+ }
+ }
+
+ if (nb_free > 0)
+ rte_mempool_put_bulk(mbuf_free[0]->pool, (void **)mbuf_free,
+ nb_free);
+
+ HINIC_UPDATE_SQ_LOCAL_CI(txq, wqebb_cnt);
+}
+
+static inline struct hinic_sq_wqe *
+hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt,
+ struct hinic_wqe_info *wqe_info)
+{
+ u32 cur_pi, end_pi;
+ u16 remain_wqebbs;
+ struct hinic_sq *sq = txq->sq;
+ struct hinic_wq *wq = txq->wq;
+
+ /* record current pi */
+ cur_pi = MASKED_WQE_IDX(wq, wq->prod_idx);
+ end_pi = cur_pi + wqebb_cnt;
+
+ /* update next pi and delta */
+ wq->prod_idx += wqebb_cnt;
+ wq->delta -= wqebb_cnt;
+
+ /* return current pi and owner */
+ wqe_info->pi = cur_pi;
+ wqe_info->owner = sq->owner;
+ wqe_info->around = 0;
+ wqe_info->seq_wqebbs = wqebb_cnt;
+
+ if (unlikely(end_pi >= txq->q_depth)) {
+ /* update owner of next prod_idx */
+ sq->owner = !sq->owner;
+
+ /* turn around to head */
+ if (unlikely(end_pi > txq->q_depth)) {
+ wqe_info->around = 1;
+ remain_wqebbs = txq->q_depth - cur_pi;
+ wqe_info->seq_wqebbs = remain_wqebbs;
+ }
+ }
+
+ return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi);
+}
+
+static inline uint16_t
+hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
+{
+ struct ipv4_psd_header {
+ uint32_t src_addr; /* IP address of source host. */
+ uint32_t dst_addr; /* IP address of destination host. */
+ uint8_t zero; /* zero. */
+ uint8_t proto; /* L4 protocol type. */
+ uint16_t len; /* L4 length. */
+ } psd_hdr;
+ uint8_t ihl;
+
+ psd_hdr.src_addr = ipv4_hdr->src_addr;
+ psd_hdr.dst_addr = ipv4_hdr->dst_addr;
+ psd_hdr.zero = 0;
+ psd_hdr.proto = ipv4_hdr->next_proto_id;
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ psd_hdr.len = 0;
+ } else {
+ /* ipv4_hdr->version_ihl is uint8_t big endian, ihl locates
+ * lower 4 bits and unit is 4 bytes
+ */
+ ihl = (ipv4_hdr->version_ihl & 0xF) << 2;
+ psd_hdr.len =
+ rte_cpu_to_be_16(rte_be_to_cpu_16(ipv4_hdr->total_length) -
+ ihl);
+ }
+ return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
+}
+
+static inline uint16_t
+hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
+{
+ uint32_t sum;
+ struct {
+ uint32_t len; /* L4 length. */
+ uint32_t proto; /* L4 protocol - top 3 bytes must be zero */
+ } psd_hdr;
+
+ psd_hdr.proto = (ipv6_hdr->proto << 24);
+ if (ol_flags & PKT_TX_TCP_SEG)
+ psd_hdr.len = 0;
+ else
+ psd_hdr.len = ipv6_hdr->payload_len;
+
+ sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+ sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr), 0);
+ sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
+ return __rte_raw_cksum_reduce(sum);
+}
+
+static inline void
+hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info,
+ int outer_cs_flag)
+{
+ uint64_t ol_flags = m->ol_flags;
+
+ if (outer_cs_flag == 1) {
+ if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+ off_info->payload_offset = m->outer_l2_len +
+ m->outer_l3_len + m->l2_len + m->l3_len;
+ } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ off_info->payload_offset = m->outer_l2_len +
+ m->outer_l3_len + m->l2_len +
+ m->l3_len + m->l4_len;
+ }
+ } else {
+ if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+ off_info->payload_offset = m->l2_len + m->l3_len;
+ } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ off_info->payload_offset = m->l2_len + m->l3_len +
+ m->l4_len;
+ }
+ }
+}
+
+static inline void
+hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+ struct hinic_tx_offload_info *off_info)
+{
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_vlan_hdr *vlan_hdr;
+ struct rte_ipv4_hdr *ip4h;
+ u16 pkt_type;
+ u8 *hdr;
+
+ hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
+ eth_hdr = (struct rte_ether_hdr *)hdr;
+ pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+ off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
+ vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
+ pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ } else {
+ off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
+ }
+
+ if (pkt_type == RTE_ETHER_TYPE_IPV4) {
+ ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
+ off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
+ HEADER_LEN_OFFSET;
+ } else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+ /* not support ipv6 extension header */
+ off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
+ }
+}
+
+static inline int
+hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
+ struct hinic_tx_offload_info *off_info)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_vlan_hdr *vlan_hdr;
+ u16 eth_type = 0;
+ uint64_t inner_l3_offset;
+ uint64_t ol_flags = m->ol_flags;
+
+ /* Check if the packets set available offload flags */
+ if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
+ return 0;
+
+ /* Support only vxlan offload */
+ if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
+ !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+ return -ENOTSUP;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (rte_validate_tx_offload(m) != 0)
+ return -EINVAL;
+#endif
+
+ if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+ if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & PKT_TX_OUTER_IPV6) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ inner_l3_offset = m->l2_len + m->outer_l2_len +
+ m->outer_l3_len;
+ off_info->outer_l2_len = m->outer_l2_len;
+ off_info->outer_l3_len = m->outer_l3_len;
+ /* just support vxlan tunneling pkt */
+ off_info->inner_l2_len = m->l2_len - VXLANLEN -
+ sizeof(*udp_hdr);
+ off_info->inner_l3_len = m->l3_len;
+ off_info->inner_l4_len = m->l4_len;
+ off_info->tunnel_length = m->l2_len;
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+ hinic_get_pld_offset(m, off_info,
+ HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+ } else {
+ inner_l3_offset = m->l2_len;
+ hinic_analyze_tx_info(m, off_info);
+ /* just support vxlan tunneling pkt */
+ off_info->inner_l2_len = m->l2_len - VXLANLEN -
+ sizeof(*udp_hdr) - off_info->outer_l2_len -
+ off_info->outer_l3_len;
+ off_info->inner_l3_len = m->l3_len;
+ off_info->inner_l4_len = m->l4_len;
+ off_info->tunnel_length = m->l2_len -
+ off_info->outer_l2_len - off_info->outer_l3_len;
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+ hinic_get_pld_offset(m, off_info,
+ HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+ }
+ } else {
+ inner_l3_offset = m->l2_len;
+ off_info->inner_l2_len = m->l2_len;
+ off_info->inner_l3_len = m->l3_len;
+ off_info->inner_l4_len = m->l4_len;
+ off_info->tunnel_type = NOT_TUNNEL;
+
+ hinic_get_pld_offset(m, off_info,
+ HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+ }
+
+ /* invalid udp or tcp header */
+ if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
+ return -EINVAL;
+
+ /* Process outter udp pseudo-header checksum */
+ if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) ||
+ (ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & PKT_TX_OUTER_IPV6))) {
+
+ /* inner_l4_tcp_udp csum should be setted to calculate outter
+ * udp checksum when vxlan packets without inner l3 and l4
+ */
+ off_info->inner_l4_tcp_udp = 1;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (eth_type == RTE_ETHER_TYPE_VLAN) {
+ vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+ eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ }
+
+ if (eth_type == RTE_ETHER_TYPE_IPV4) {
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->outer_l2_len);
+ off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ ipv4_hdr->hdr_checksum = 0;
+
+ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+ m->outer_l3_len);
+ udp_hdr->dgram_cksum = 0;
+ } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
+ off_info->outer_l3_type = IPV6_PKT;
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->outer_l2_len);
+
+ udp_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ (m->outer_l2_len +
+ m->outer_l3_len));
+ udp_hdr->dgram_cksum = 0;
+ }
+ } else if (ol_flags & PKT_TX_OUTER_IPV4) {
+ off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+ off_info->inner_l4_tcp_udp = 1;
+ off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ }
+
+ if (ol_flags & PKT_TX_IPV4)
+ off_info->inner_l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD :
+ IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ else if (ol_flags & PKT_TX_IPV6)
+ off_info->inner_l3_type = IPV6_PKT;
+
+ /* Process the pseudo-header checksum */
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ inner_l3_offset);
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+
+ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+ m->l3_len);
+ udp_hdr->dgram_cksum =
+ hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+ } else {
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ inner_l3_offset);
+
+ udp_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ (inner_l3_offset + m->l3_len));
+ udp_hdr->dgram_cksum =
+ hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+ }
+
+ off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 1;
+ } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ inner_l3_offset);
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+
+ /* non-TSO tcp */
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
+ m->l3_len);
+ tcp_hdr->cksum =
+ hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+ } else {
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ inner_l3_offset);
+ /* non-TSO tcp */
+ tcp_hdr =
+ rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
+ (inner_l3_offset + m->l3_len));
+ tcp_hdr->cksum =
+ hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+ }
+
+ off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 1;
+ } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) {
+ off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
+ off_info->inner_l4_tcp_udp = 0;
+ off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
+ }
+
+ return 0;
+}
+
+static inline bool hinic_get_sge_txoff_info(struct rte_mbuf *mbuf_pkt,
+ struct hinic_wqe_info *sqe_info,
+ struct hinic_tx_offload_info
+ *off_info)
+{
+ u16 i, total_len, sge_cnt = mbuf_pkt->nb_segs;
+ struct rte_mbuf *mbuf;
+ int ret;
+
+ memset(off_info, 0, sizeof(*off_info));
+
+ ret = hinic_tx_offload_pkt_prepare(mbuf_pkt, off_info);
+ if (unlikely(ret))
+ return false;
+
+ sqe_info->cpy_mbuf_cnt = 0;
+
+ /* non tso mbuf */
+ if (likely(!(mbuf_pkt->ol_flags & PKT_TX_TCP_SEG))) {
+ if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) {
+ /* non tso packet len must less than 64KB */
+ return false;
+ } else if (unlikely(HINIC_NONTSO_SEG_NUM_INVALID(sge_cnt))) {
+ /* non tso packet buffer number must less than 17
+ * the mbuf segs more than 17 must copy to one buffer
+ */
+ total_len = 0;
+ mbuf = mbuf_pkt;
+ for (i = 0; i < (HINIC_NONTSO_PKT_MAX_SGE - 1) ; i++) {
+ total_len += mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ /* default support copy total 4k mbuf segs */
+ if ((u32)(total_len + (u16)HINIC_COPY_MBUF_SIZE) <
+ mbuf_pkt->pkt_len)
+ return false;
+
+ sqe_info->sge_cnt = HINIC_NONTSO_PKT_MAX_SGE;
+ sqe_info->cpy_mbuf_cnt = 1;
+ return true;
+ }
+
+ /* valid non tso mbuf */
+ sqe_info->sge_cnt = sge_cnt;
+ } else {
+ /* tso mbuf */
+ if (unlikely(HINIC_TSO_SEG_NUM_INVALID(sge_cnt)))
+ /* too many mbuf segs */
+ return false;
+
+ /* check tso mbuf segs are valid or not */
+ if (unlikely(!hinic_is_tso_sge_valid(mbuf_pkt,
+ off_info, sqe_info)))
+ return false;
+ }
+
+ return true;
+}
+
+static inline void hinic_sq_write_db(struct hinic_sq *sq, int cos)
+{
+ u16 prod_idx;
+ u32 hi_prod_idx;
+ struct hinic_sq_db sq_db;
+
+ prod_idx = MASKED_SQ_IDX(sq, sq->wq->prod_idx);
+ hi_prod_idx = SQ_DB_PI_HIGH(prod_idx);
+
+ sq_db.db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) |
+ SQ_DB_INFO_SET(SQ_DB, TYPE) |
+ SQ_DB_INFO_SET(SQ_CFLAG_DP, CFLAG) |
+ SQ_DB_INFO_SET(cos, COS) |
+ SQ_DB_INFO_SET(sq->q_id, QID);
+
+ /* Data should be written to HW in Big Endian Format */
+ sq_db.db_info = cpu_to_be32(sq_db.db_info);
+
+ /* Write all before the doorbell */
+ rte_wmb();
+ writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx));
+}
+
+u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts)
+{
+ int free_wqebb_cnt, wqe_wqebb_cnt;
+ u32 queue_info, tx_bytes = 0;
+ u16 nb_tx;
+ struct hinic_wqe_info sqe_info;
+ struct hinic_tx_offload_info off_info;
+ struct rte_mbuf *mbuf_pkt;
+ struct hinic_txq *txq = tx_queue;
+ struct hinic_tx_info *tx_info;
+ struct hinic_sq_wqe *sq_wqe;
+ struct hinic_sq_task *task;
+
+ /* reclaim tx mbuf before xmit new packet */
+ if (HINIC_GET_SQ_FREE_WQEBBS(txq) < txq->tx_free_thresh)
+ hinic_xmit_mbuf_cleanup(txq);
+
+ /* tx loop routine */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ mbuf_pkt = *tx_pkts++;
+ queue_info = 0;
+
+ /* 1. parse sge and tx offlod info from mbuf */
+ if (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt,
+ &sqe_info, &off_info))) {
+ txq->txq_stats.off_errs++;
+ break;
+ }
+
+ /* 2. try to get enough wqebb */
+ wqe_wqebb_cnt = HINIC_SQ_WQEBB_CNT(sqe_info.sge_cnt);
+ free_wqebb_cnt = HINIC_GET_SQ_FREE_WQEBBS(txq);
+ if (unlikely(wqe_wqebb_cnt > free_wqebb_cnt)) {
+ /* reclaim again */
+ hinic_xmit_mbuf_cleanup(txq);
+ free_wqebb_cnt = HINIC_GET_SQ_FREE_WQEBBS(txq);
+ if (unlikely(wqe_wqebb_cnt > free_wqebb_cnt)) {
+ txq->txq_stats.tx_busy += (nb_pkts - nb_tx);
+ break;
+ }
+ }
+
+ /* 3. get sq tail wqe address from wqe_page,
+ * sq have enough wqebb for this packet
+ */
+ sq_wqe = hinic_get_sq_wqe(txq, wqe_wqebb_cnt, &sqe_info);
+
+ /* 4. fill sq wqe sge section */
+ if (unlikely(!hinic_mbuf_dma_map_sge(txq, mbuf_pkt,
+ sq_wqe->buf_descs,
+ &sqe_info))) {
+ hinic_return_sq_wqe(txq->nic_dev->hwdev, txq->q_id,
+ wqe_wqebb_cnt, sqe_info.owner);
+ txq->txq_stats.off_errs++;
+ break;
+ }
+
+ /* 5. fill sq wqe task section and queue info */
+ task = &sq_wqe->task;
+
+ /* tx packet offload configure */
+ hinic_fill_tx_offload_info(mbuf_pkt, task, &queue_info,
+ &off_info);
+
+ /* 6. record tx info */
+ tx_info = &txq->tx_info[sqe_info.pi];
+ tx_info->mbuf = mbuf_pkt;
+ tx_info->wqebb_cnt = wqe_wqebb_cnt;
+
+ /* 7. fill sq wqe header section */
+ hinic_fill_sq_wqe_header(&sq_wqe->ctrl, queue_info,
+ sqe_info.sge_cnt, sqe_info.owner);
+
+ /* 8.convert continue or bottom wqe byteorder to big endian */
+ hinic_sq_wqe_cpu_to_be32(sq_wqe, sqe_info.seq_wqebbs);
+
+ tx_bytes += mbuf_pkt->pkt_len;
+ }
+
+ /* 9. write sq doorbell in burst mode */
+ if (nb_tx) {
+ hinic_sq_write_db(txq->sq, txq->cos);
+
+ txq->txq_stats.packets += nb_tx;
+ txq->txq_stats.bytes += tx_bytes;
+ }
+ txq->txq_stats.burst_pkts = nb_tx;
+
+ return nb_tx;
+}
+
+void hinic_free_all_tx_mbufs(struct hinic_txq *txq)
+{
+ u16 ci;
+ struct hinic_nic_dev *nic_dev = txq->nic_dev;
+ struct hinic_tx_info *tx_info;
+ int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev,
+ txq->q_id) + 1;
+
+ while (free_wqebbs < txq->q_depth) {
+ ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id);
+
+ tx_info = &txq->tx_info[ci];
+
+ if (unlikely(tx_info->cpy_mbuf != NULL)) {
+ rte_pktmbuf_free(tx_info->cpy_mbuf);
+ tx_info->cpy_mbuf = NULL;
+ }
+
+ rte_pktmbuf_free(tx_info->mbuf);
+ hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id,
+ tx_info->wqebb_cnt);
+
+ free_wqebbs += tx_info->wqebb_cnt;
+ tx_info->mbuf = NULL;
+ }
+}
+
+void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev)
+{
+ u16 q_id;
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+
+ for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
+ if (eth_dev->data->tx_queues != NULL)
+ eth_dev->data->tx_queues[q_id] = NULL;
+
+ if (nic_dev->txqs[q_id] == NULL)
+ continue;
+
+ /* stop tx queue free tx mbuf */
+ hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
+ hinic_free_tx_resources(nic_dev->txqs[q_id]);
+
+ /* free txq */
+ kfree(nic_dev->txqs[q_id]);
+ nic_dev->txqs[q_id] = NULL;
+ }
+}
+
+void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev)
+{
+ u16 q_id;
+ struct hinic_nic_dev *nic_dev =
+ HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
+
+ for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
+ /* stop tx queue free tx mbuf */
+ hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
+}
+
+int hinic_setup_tx_resources(struct hinic_txq *txq)
+{
+ u64 tx_info_sz;
+
+ tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
+ txq->tx_info = rte_zmalloc_socket("tx_info", tx_info_sz,
+ RTE_CACHE_LINE_SIZE, txq->socket_id);
+ if (!txq->tx_info)
+ return -ENOMEM;
+
+ return HINIC_OK;
+}
+
+void hinic_free_tx_resources(struct hinic_txq *txq)
+{
+ if (txq->tx_info == NULL)
+ return;
+
+ rte_free(txq->tx_info);
+ txq->tx_info = NULL;
+}
+
+int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 sq_depth, unsigned int socket_id)
+{
+ int err;
+ struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct hinic_qp *qp = &nic_io->qps[q_id];
+ struct hinic_sq *sq = &qp->sq;
+ void __iomem *db_addr;
+ volatile u32 *ci_addr;
+
+ sq->sq_depth = sq_depth;
+ nic_io->sq_depth = sq_depth;
+
+ /* alloc wq */
+ err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id],
+ HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth,
+ socket_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ");
+ return err;
+ }
+
+ /* alloc sq doorbell space */
+ err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init db addr");
+ goto alloc_db_err;
+ }
+
+ /* clear hardware ci */
+ ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id);
+ *ci_addr = 0;
+
+ sq->q_id = q_id;
+ sq->wq = &nic_io->sq_wq[q_id];
+ sq->owner = 1;
+ sq->cons_idx_addr = (volatile u16 *)ci_addr;
+ sq->db_addr = db_addr;
+
+ return HINIC_OK;
+
+alloc_db_err:
+ hinic_wq_free(nic_io->hwdev, &nic_io->sq_wq[q_id]);
+
+ return err;
+}
+
+void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io;
+ struct hinic_qp *qp;
+
+ nic_io = hwdev->nic_io;
+ qp = &nic_io->qps[q_id];
+
+ if (qp->sq.wq == NULL)
+ return;
+
+ hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr);
+ hinic_wq_free(nic_io->hwdev, qp->sq.wq);
+ qp->sq.wq = NULL;
+}
diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h
new file mode 100644
index 000000000..d98abad8d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_TX_H_
+#define _HINIC_PMD_TX_H_
+
+#define HINIC_DEFAULT_TX_FREE_THRESH 32
+#define HINIC_MAX_TX_FREE_BULK 64
+
+#define HINIC_GET_WQ_HEAD(txq) ((txq)->wq->queue_buf_vaddr)
+
+#define HINIC_GET_WQ_TAIL(txq) \
+ ((txq)->wq->queue_buf_vaddr + (txq)->wq->wq_buf_size)
+
+#define HINIC_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM | \
+ PKT_TX_SCTP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+enum sq_wqe_type {
+ SQ_NORMAL_WQE = 0,
+};
+
+/* tx offload info */
+struct hinic_tx_offload_info {
+ u8 outer_l2_len;
+ u8 outer_l3_type;
+ u16 outer_l3_len;
+
+ u8 inner_l2_len;
+ u8 inner_l3_type;
+ u16 inner_l3_len;
+
+ u8 tunnel_length;
+ u8 tunnel_type;
+ u8 inner_l4_type;
+ u8 inner_l4_len;
+
+ u16 payload_offset;
+ u8 inner_l4_tcp_udp;
+ u8 rsvd0;
+};
+
+/* tx sge info */
+struct hinic_wqe_info {
+ u16 pi;
+ u16 owner;
+ u16 around;
+ u16 seq_wqebbs;
+ u16 sge_cnt;
+ u16 cpy_mbuf_cnt;
+};
+
+struct hinic_sq_ctrl {
+ u32 ctrl_fmt;
+ u32 queue_info;
+};
+
+struct hinic_sq_task {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 ufo_v6_identify;
+ u32 pkt_info4;
+ u32 rsvd5;
+};
+
+struct hinic_sq_bufdesc {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_sq_wqe {
+ /* sq wqe control section */
+ struct hinic_sq_ctrl ctrl;
+
+ /* sq task control section */
+ struct hinic_sq_task task;
+
+ /* sq sge section start address, 1~127 sges */
+ struct hinic_sq_bufdesc buf_descs[0];
+};
+
+struct hinic_txq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 rl_drop;
+ u64 tx_busy;
+ u64 off_errs;
+ u64 cpy_pkts;
+ u64 burst_pkts;
+ u64 sge_len0;
+ u64 mbuf_null;
+};
+
+struct hinic_tx_info {
+ struct rte_mbuf *mbuf;
+ int wqebb_cnt;
+ struct rte_mbuf *cpy_mbuf;
+};
+
+struct hinic_txq {
+ /* cacheline0 */
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_wq *wq;
+ struct hinic_sq *sq;
+ volatile u16 *cons_idx_addr;
+ struct hinic_tx_info *tx_info;
+
+ u16 tx_free_thresh;
+ u16 port_id;
+ u16 q_id;
+ u16 q_depth;
+ u32 cos;
+ u32 socket_id;
+
+ /* cacheline1 */
+ struct hinic_txq_stats txq_stats;
+ u64 sq_head_addr;
+ u64 sq_bot_sge_addr;
+};
+
+int hinic_setup_tx_resources(struct hinic_txq *txq);
+
+void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev);
+
+void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev);
+
+void hinic_free_tx_resources(struct hinic_txq *txq);
+
+u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts);
+
+void hinic_free_all_tx_mbufs(struct hinic_txq *txq);
+
+void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
+
+void hinic_txq_stats_reset(struct hinic_txq *txq);
+
+int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 sq_depth, unsigned int socket_id);
+
+void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id);
+
+#endif /* _HINIC_PMD_TX_H_ */
diff --git a/src/spdk/dpdk/drivers/net/hinic/meson.build b/src/spdk/dpdk/drivers/net/hinic/meson.build
new file mode 100644
index 000000000..bc7e24639
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Huawei Technologies Co., Ltd
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'hinic_pmd_ethdev.c',
+ 'hinic_pmd_rx.c',
+ 'hinic_pmd_tx.c',
+ 'hinic_pmd_flow.c',
+ )
+
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map b/src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};