summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/common/octeontx2
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/common/octeontx2')
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/Makefile39
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h1391
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h305
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h482
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h212
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h184
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h209
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h56
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h34
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/meson.build26
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c291
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h171
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c1043
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h154
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h95
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h63
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c254
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h28
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c462
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h1773
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c183
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h43
-rw-r--r--src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map45
23 files changed, 7543 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/Makefile b/src/spdk/dpdk/drivers/common/octeontx2/Makefile
new file mode 100644
index 000000000..260da8dd3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_octeontx2.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_common_octeontx2_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += otx2_dev.c
+SRCS-y += otx2_irq.c
+SRCS-y += otx2_mbox.c
+SRCS-y += otx2_common.c
+SRCS-y += otx2_sec_idev.c
+
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_ethdev -lrte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h
new file mode 100644
index 000000000..e3b68505b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_nix.h
@@ -0,0 +1,1391 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_NIX_HW_H__
+#define __OTX2_NIX_HW_H__
+
+/* Register offsets */
+
+#define NIX_AF_CFG (0x0ull)
+#define NIX_AF_STATUS (0x10ull)
+#define NIX_AF_NDC_CFG (0x18ull)
+#define NIX_AF_CONST (0x20ull)
+#define NIX_AF_CONST1 (0x28ull)
+#define NIX_AF_CONST2 (0x30ull)
+#define NIX_AF_CONST3 (0x38ull)
+#define NIX_AF_SQ_CONST (0x40ull)
+#define NIX_AF_CQ_CONST (0x48ull)
+#define NIX_AF_RQ_CONST (0x50ull)
+#define NIX_AF_PSE_CONST (0x60ull)
+#define NIX_AF_TL1_CONST (0x70ull)
+#define NIX_AF_TL2_CONST (0x78ull)
+#define NIX_AF_TL3_CONST (0x80ull)
+#define NIX_AF_TL4_CONST (0x88ull)
+#define NIX_AF_MDQ_CONST (0x90ull)
+#define NIX_AF_MC_MIRROR_CONST (0x98ull)
+#define NIX_AF_LSO_CFG (0xa8ull)
+#define NIX_AF_BLK_RST (0xb0ull)
+#define NIX_AF_TX_TSTMP_CFG (0xc0ull)
+#define NIX_AF_RX_CFG (0xd0ull)
+#define NIX_AF_AVG_DELAY (0xe0ull)
+#define NIX_AF_CINT_DELAY (0xf0ull)
+#define NIX_AF_RX_MCAST_BASE (0x100ull)
+#define NIX_AF_RX_MCAST_CFG (0x110ull)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x120ull)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x130ull)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x140ull)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x148ull)
+#define NIX_AF_LF_RST (0x150ull)
+#define NIX_AF_GEN_INT (0x160ull)
+#define NIX_AF_GEN_INT_W1S (0x168ull)
+#define NIX_AF_GEN_INT_ENA_W1S (0x170ull)
+#define NIX_AF_GEN_INT_ENA_W1C (0x178ull)
+#define NIX_AF_ERR_INT (0x180ull)
+#define NIX_AF_ERR_INT_W1S (0x188ull)
+#define NIX_AF_ERR_INT_ENA_W1S (0x190ull)
+#define NIX_AF_ERR_INT_ENA_W1C (0x198ull)
+#define NIX_AF_RAS (0x1a0ull)
+#define NIX_AF_RAS_W1S (0x1a8ull)
+#define NIX_AF_RAS_ENA_W1S (0x1b0ull)
+#define NIX_AF_RAS_ENA_W1C (0x1b8ull)
+#define NIX_AF_RVU_INT (0x1c0ull)
+#define NIX_AF_RVU_INT_W1S (0x1c8ull)
+#define NIX_AF_RVU_INT_ENA_W1S (0x1d0ull)
+#define NIX_AF_RVU_INT_ENA_W1C (0x1d8ull)
+#define NIX_AF_TCP_TIMER (0x1e0ull)
+#define NIX_AF_RX_DEF_OL2 (0x200ull)
+#define NIX_AF_RX_DEF_OIP4 (0x210ull)
+#define NIX_AF_RX_DEF_IIP4 (0x220ull)
+#define NIX_AF_RX_DEF_OIP6 (0x230ull)
+#define NIX_AF_RX_DEF_IIP6 (0x240ull)
+#define NIX_AF_RX_DEF_OTCP (0x250ull)
+#define NIX_AF_RX_DEF_ITCP (0x260ull)
+#define NIX_AF_RX_DEF_OUDP (0x270ull)
+#define NIX_AF_RX_DEF_IUDP (0x280ull)
+#define NIX_AF_RX_DEF_OSCTP (0x290ull)
+#define NIX_AF_RX_DEF_ISCTP (0x2a0ull)
+#define NIX_AF_RX_DEF_IPSECX(a) (0x2b0ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x300ull)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x3e0ull)
+#define NIX_AF_NDC_TX_SYNC (0x3f0ull)
+#define NIX_AF_AQ_CFG (0x400ull)
+#define NIX_AF_AQ_BASE (0x410ull)
+#define NIX_AF_AQ_STATUS (0x420ull)
+#define NIX_AF_AQ_DOOR (0x430ull)
+#define NIX_AF_AQ_DONE_WAIT (0x440ull)
+#define NIX_AF_AQ_DONE (0x450ull)
+#define NIX_AF_AQ_DONE_ACK (0x460ull)
+#define NIX_AF_AQ_DONE_TIMER (0x470ull)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x490ull)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x498ull)
+#define NIX_AF_RX_LINKX_CFG(a) (0x540ull | (uint64_t)(a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x550ull)
+#define NIX_AF_RX_LINKX_WRR_CFG(a) (0x560ull | (uint64_t)(a) << 16)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x640ull)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x648ull)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x650ull)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x660ull)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x668ull)
+#define NIX_AF_TX_NPC_CAPTURE_RESPX(a) (0x680ull | (uint64_t)(a) << 3)
+#define NIX_AF_SEB_ACTIVE_CYCLES_PCX(a) (0x6c0ull | (uint64_t)(a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_HEAD(a) (0x710ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_TAIL(a) (0x720ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730ull | (uint64_t)(a) << 16)
+#define NIX_AF_SMQX_NXT_HEAD(a) (0x740ull | (uint64_t)(a) << 16)
+#define NIX_AF_SQM_ACTIVE_CYCLES_PC (0x770ull)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800ull)
+#define NIX_AF_PSE_SHAPER_CFG (0x810ull)
+#define NIX_AF_PSE_ACTIVE_CYCLES_PC (0x8c0ull)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900ull | (uint64_t)(a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xa00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xa10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xa20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xa30ull | (uint64_t)(a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40ull)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xa60ull | (uint64_t)(a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xac0ull | (uint64_t)(a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xb00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xb10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xc00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xc10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xc20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xc50ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xc70ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xc80ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xcc0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xcc8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xcd0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xcd8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xd20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xd30ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xd40ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xd50ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xd60ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xd70ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xd80ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xd90ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xe00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xe10ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xe20ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xe30ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xe40ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xe50ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xe70ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xe80ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xe88ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xec0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xec8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xed0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xed8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) \
+ (0x1000ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) \
+ (0x1010ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_CIR(a) \
+ (0x1020ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_PIR(a) \
+ (0x1030ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) \
+ (0x1040ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) \
+ (0x1050ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) \
+ (0x1070ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) \
+ (0x1080ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_PARENT(a) \
+ (0x1088ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) \
+ (0x10c0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) \
+ (0x10c8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) \
+ (0x10d0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) \
+ (0x10d8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) \
+ (0x1200ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) \
+ (0x1210ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_CIR(a) \
+ (0x1220ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_PIR(a) \
+ (0x1230ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) \
+ (0x1240ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) \
+ (0x1250ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) \
+ (0x1270ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) \
+ (0x1280ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_PARENT(a) \
+ (0x1288ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) \
+ (0x12c0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) \
+ (0x12c8ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) \
+ (0x12d0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) \
+ (0x12d8ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) \
+ (0x1400ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) \
+ (0x1410ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_CIR(a) \
+ (0x1420ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_PIR(a) \
+ (0x1430ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) \
+ (0x1440ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) \
+ (0x1450ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) \
+ (0x1470ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_PARENT(a) \
+ (0x1480ull | (uint64_t)(a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) \
+ (0x14c0ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3_TL2X_CFG(a) \
+ (0x1600ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) \
+ (0x1610ull | (uint64_t)(a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) \
+ (0x1700ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) \
+ (0x1800ull | (uint64_t)(a) << 18 | (uint64_t)(b) << 3)
+#define NIX_AF_TX_MCASTX(a) \
+ (0x1900ull | (uint64_t)(a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) \
+ (0x1a00ull | (uint64_t)(a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) \
+ (0x1a10ull | (uint64_t)(a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) \
+ (0x1a20ull | (uint64_t)(a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) \
+ (0x1a30ull | (uint64_t)(a) << 15)
+#define NIX_AF_CINT_TIMERX(a) \
+ (0x1a40ull | (uint64_t)(a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) \
+ (0x1b00ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_CFG(a) \
+ (0x4000ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) \
+ (0x4020ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) \
+ (0x4028ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) \
+ (0x4030ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) \
+ (0x4040ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) \
+ (0x4050ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) \
+ (0x4060ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) \
+ (0x4070ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) \
+ (0x4080ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) \
+ (0x4090ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) \
+ (0x40a0ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) \
+ (0x40c0ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) \
+ (0x40d0ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) \
+ (0x4100ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) \
+ (0x4110ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) \
+ (0x4120ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) \
+ (0x4130ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) \
+ (0x4140ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) \
+ (0x4148ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) \
+ (0x4150ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) \
+ (0x4158ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) \
+ (0x4170ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) \
+ (0x4180ull | (uint64_t)(a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) \
+ (0x4200ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) \
+ (0x4300ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) \
+ (0x4400ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) \
+ (0x4500ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) \
+ (0x4600ull | (uint64_t)(a) << 17 | (uint64_t)(b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700ull)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710ull)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720ull)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730ull)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) \
+ (0x4800ull | (uint64_t)(a) << 16)
+#define NIX_PRIV_AF_INT_CFG (0x8000000ull)
+#define NIX_PRIV_LFX_CFG(a) \
+ (0x8000010ull | (uint64_t)(a) << 8)
+#define NIX_PRIV_LFX_INT_CFG(a) \
+ (0x8000020ull | (uint64_t)(a) << 8)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030ull)
+
+#define NIX_LF_RX_SECRETX(a) (0x0ull | (uint64_t)(a) << 3)
+#define NIX_LF_CFG (0x100ull)
+#define NIX_LF_GINT (0x200ull)
+#define NIX_LF_GINT_W1S (0x208ull)
+#define NIX_LF_GINT_ENA_W1C (0x210ull)
+#define NIX_LF_GINT_ENA_W1S (0x218ull)
+#define NIX_LF_ERR_INT (0x220ull)
+#define NIX_LF_ERR_INT_W1S (0x228ull)
+#define NIX_LF_ERR_INT_ENA_W1C (0x230ull)
+#define NIX_LF_ERR_INT_ENA_W1S (0x238ull)
+#define NIX_LF_RAS (0x240ull)
+#define NIX_LF_RAS_W1S (0x248ull)
+#define NIX_LF_RAS_ENA_W1C (0x250ull)
+#define NIX_LF_RAS_ENA_W1S (0x258ull)
+#define NIX_LF_SQ_OP_ERR_DBG (0x260ull)
+#define NIX_LF_MNQ_ERR_DBG (0x270ull)
+#define NIX_LF_SEND_ERR_DBG (0x280ull)
+#define NIX_LF_TX_STATX(a) (0x300ull | (uint64_t)(a) << 3)
+#define NIX_LF_RX_STATX(a) (0x400ull | (uint64_t)(a) << 3)
+#define NIX_LF_OP_SENDX(a) (0x800ull | (uint64_t)(a) << 3)
+#define NIX_LF_RQ_OP_INT (0x900ull)
+#define NIX_LF_RQ_OP_OCTS (0x910ull)
+#define NIX_LF_RQ_OP_PKTS (0x920ull)
+#define NIX_LF_RQ_OP_DROP_OCTS (0x930ull)
+#define NIX_LF_RQ_OP_DROP_PKTS (0x940ull)
+#define NIX_LF_RQ_OP_RE_PKTS (0x950ull)
+#define NIX_LF_OP_IPSEC_DYNO_CNT (0x980ull)
+#define NIX_LF_SQ_OP_INT (0xa00ull)
+#define NIX_LF_SQ_OP_OCTS (0xa10ull)
+#define NIX_LF_SQ_OP_PKTS (0xa20ull)
+#define NIX_LF_SQ_OP_STATUS (0xa30ull)
+#define NIX_LF_SQ_OP_DROP_OCTS (0xa40ull)
+#define NIX_LF_SQ_OP_DROP_PKTS (0xa50ull)
+#define NIX_LF_CQ_OP_INT (0xb00ull)
+#define NIX_LF_CQ_OP_DOOR (0xb30ull)
+#define NIX_LF_CQ_OP_STATUS (0xb40ull)
+#define NIX_LF_QINTX_CNT(a) (0xc00ull | (uint64_t)(a) << 12)
+#define NIX_LF_QINTX_INT(a) (0xc10ull | (uint64_t)(a) << 12)
+#define NIX_LF_QINTX_ENA_W1S(a) (0xc20ull | (uint64_t)(a) << 12)
+#define NIX_LF_QINTX_ENA_W1C(a) (0xc30ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_CNT(a) (0xd00ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_WAIT(a) (0xd10ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_INT(a) (0xd20ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_INT_W1S(a) (0xd30ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_ENA_W1S(a) (0xd40ull | (uint64_t)(a) << 12)
+#define NIX_LF_CINTX_ENA_W1C(a) (0xd50ull | (uint64_t)(a) << 12)
+
+
+/* Enum offsets */
+
+#define NIX_TX_VTAGOP_NOP (0x0ull)
+#define NIX_TX_VTAGOP_INSERT (0x1ull)
+#define NIX_TX_VTAGOP_REPLACE (0x2ull)
+
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NIX_INTF_RX (0x0ull)
+#define NIX_INTF_TX (0x1ull)
+
+#define NIX_TXLAYER_OL3 (0x0ull)
+#define NIX_TXLAYER_OL4 (0x1ull)
+#define NIX_TXLAYER_IL3 (0x2ull)
+#define NIX_TXLAYER_IL4 (0x3ull)
+
+#define NIX_SUBDC_NOP (0x0ull)
+#define NIX_SUBDC_EXT (0x1ull)
+#define NIX_SUBDC_CRC (0x2ull)
+#define NIX_SUBDC_IMM (0x3ull)
+#define NIX_SUBDC_SG (0x4ull)
+#define NIX_SUBDC_MEM (0x5ull)
+#define NIX_SUBDC_JUMP (0x6ull)
+#define NIX_SUBDC_WORK (0x7ull)
+#define NIX_SUBDC_SOD (0xfull)
+
+#define NIX_STYPE_STF (0x0ull)
+#define NIX_STYPE_STT (0x1ull)
+#define NIX_STYPE_STP (0x2ull)
+
+#define NIX_STAT_LF_TX_TX_UCAST (0x0ull)
+#define NIX_STAT_LF_TX_TX_BCAST (0x1ull)
+#define NIX_STAT_LF_TX_TX_MCAST (0x2ull)
+#define NIX_STAT_LF_TX_TX_DROP (0x3ull)
+#define NIX_STAT_LF_TX_TX_OCTS (0x4ull)
+
+#define NIX_STAT_LF_RX_RX_OCTS (0x0ull)
+#define NIX_STAT_LF_RX_RX_UCAST (0x1ull)
+#define NIX_STAT_LF_RX_RX_BCAST (0x2ull)
+#define NIX_STAT_LF_RX_RX_MCAST (0x3ull)
+#define NIX_STAT_LF_RX_RX_DROP (0x4ull)
+#define NIX_STAT_LF_RX_RX_DROP_OCTS (0x5ull)
+#define NIX_STAT_LF_RX_RX_FCS (0x6ull)
+#define NIX_STAT_LF_RX_RX_ERR (0x7ull)
+#define NIX_STAT_LF_RX_RX_DRP_BCAST (0x8ull)
+#define NIX_STAT_LF_RX_RX_DRP_MCAST (0x9ull)
+#define NIX_STAT_LF_RX_RX_DRP_L3BCAST (0xaull)
+#define NIX_STAT_LF_RX_RX_DRP_L3MCAST (0xbull)
+
+#define NIX_SQOPERR_SQ_OOR (0x0ull)
+#define NIX_SQOPERR_SQ_CTX_FAULT (0x1ull)
+#define NIX_SQOPERR_SQ_CTX_POISON (0x2ull)
+#define NIX_SQOPERR_SQ_DISABLED (0x3ull)
+#define NIX_SQOPERR_MAX_SQE_SIZE_ERR (0x4ull)
+#define NIX_SQOPERR_SQE_OFLOW (0x5ull)
+#define NIX_SQOPERR_SQB_NULL (0x6ull)
+#define NIX_SQOPERR_SQB_FAULT (0x7ull)
+
+#define NIX_XQESZ_W64 (0x0ull)
+#define NIX_XQESZ_W16 (0x1ull)
+
+#define NIX_VTAGSIZE_T4 (0x0ull)
+#define NIX_VTAGSIZE_T8 (0x1ull)
+
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+#define NIX_RX_ACTIONOP_PF_FUNC_DROP (0x5ull)
+#define NIX_RX_ACTIONOP_MIRROR (0x6ull)
+
+#define NIX_RX_VTAGACTION_VTAG0_RELPTR (0x0ull)
+#define NIX_RX_VTAGACTION_VTAG1_RELPTR (0x4ull)
+#define NIX_RX_VTAGACTION_VTAG_VALID (0x1ull)
+#define NIX_TX_VTAGACTION_VTAG0_RELPTR \
+ (sizeof(struct nix_inst_hdr_s) + 2 * 6)
+#define NIX_TX_VTAGACTION_VTAG1_RELPTR \
+ (sizeof(struct nix_inst_hdr_s) + 2 * 6 + 4)
+#define NIX_RQINT_DROP (0x0ull)
+#define NIX_RQINT_RED (0x1ull)
+#define NIX_RQINT_R2 (0x2ull)
+#define NIX_RQINT_R3 (0x3ull)
+#define NIX_RQINT_R4 (0x4ull)
+#define NIX_RQINT_R5 (0x5ull)
+#define NIX_RQINT_R6 (0x6ull)
+#define NIX_RQINT_R7 (0x7ull)
+
+#define NIX_MAXSQESZ_W16 (0x0ull)
+#define NIX_MAXSQESZ_W8 (0x1ull)
+
+#define NIX_LSOALG_NOP (0x0ull)
+#define NIX_LSOALG_ADD_SEGNUM (0x1ull)
+#define NIX_LSOALG_ADD_PAYLEN (0x2ull)
+#define NIX_LSOALG_ADD_OFFSET (0x3ull)
+#define NIX_LSOALG_TCP_FLAGS (0x4ull)
+
+#define NIX_MNQERR_SQ_CTX_FAULT (0x0ull)
+#define NIX_MNQERR_SQ_CTX_POISON (0x1ull)
+#define NIX_MNQERR_SQB_FAULT (0x2ull)
+#define NIX_MNQERR_SQB_POISON (0x3ull)
+#define NIX_MNQERR_TOTAL_ERR (0x4ull)
+#define NIX_MNQERR_LSO_ERR (0x5ull)
+#define NIX_MNQERR_CQ_QUERY_ERR (0x6ull)
+#define NIX_MNQERR_MAX_SQE_SIZE_ERR (0x7ull)
+#define NIX_MNQERR_MAXLEN_ERR (0x8ull)
+#define NIX_MNQERR_SQE_SIZEM1_ZERO (0x9ull)
+
+#define NIX_MDTYPE_RSVD (0x0ull)
+#define NIX_MDTYPE_FLUSH (0x1ull)
+#define NIX_MDTYPE_PMD (0x2ull)
+
+#define NIX_NDC_TX_PORT_LMT (0x0ull)
+#define NIX_NDC_TX_PORT_ENQ (0x1ull)
+#define NIX_NDC_TX_PORT_MNQ (0x2ull)
+#define NIX_NDC_TX_PORT_DEQ (0x3ull)
+#define NIX_NDC_TX_PORT_DMA (0x4ull)
+#define NIX_NDC_TX_PORT_XQE (0x5ull)
+
+#define NIX_NDC_RX_PORT_AQ (0x0ull)
+#define NIX_NDC_RX_PORT_CQ (0x1ull)
+#define NIX_NDC_RX_PORT_CINT (0x2ull)
+#define NIX_NDC_RX_PORT_MC (0x3ull)
+#define NIX_NDC_RX_PORT_PKT (0x4ull)
+#define NIX_NDC_RX_PORT_RQ (0x5ull)
+
+#define NIX_RE_OPCODE_RE_NONE (0x0ull)
+#define NIX_RE_OPCODE_RE_PARTIAL (0x1ull)
+#define NIX_RE_OPCODE_RE_JABBER (0x2ull)
+#define NIX_RE_OPCODE_RE_FCS (0x7ull)
+#define NIX_RE_OPCODE_RE_FCS_RCV (0x8ull)
+#define NIX_RE_OPCODE_RE_TERMINATE (0x9ull)
+#define NIX_RE_OPCODE_RE_RX_CTL (0xbull)
+#define NIX_RE_OPCODE_RE_SKIP (0xcull)
+#define NIX_RE_OPCODE_RE_DMAPKT (0xfull)
+#define NIX_RE_OPCODE_UNDERSIZE (0x10ull)
+#define NIX_RE_OPCODE_OVERSIZE (0x11ull)
+#define NIX_RE_OPCODE_OL2_LENMISM (0x12ull)
+
+#define NIX_REDALG_STD (0x0ull)
+#define NIX_REDALG_SEND (0x1ull)
+#define NIX_REDALG_STALL (0x2ull)
+#define NIX_REDALG_DISCARD (0x3ull)
+
+#define NIX_RX_MCOP_RQ (0x0ull)
+#define NIX_RX_MCOP_RSS (0x1ull)
+
+#define NIX_RX_PERRCODE_NPC_RESULT_ERR (0x2ull)
+#define NIX_RX_PERRCODE_MCAST_FAULT (0x4ull)
+#define NIX_RX_PERRCODE_MIRROR_FAULT (0x5ull)
+#define NIX_RX_PERRCODE_MCAST_POISON (0x6ull)
+#define NIX_RX_PERRCODE_MIRROR_POISON (0x7ull)
+#define NIX_RX_PERRCODE_DATA_FAULT (0x8ull)
+#define NIX_RX_PERRCODE_MEMOUT (0x9ull)
+#define NIX_RX_PERRCODE_BUFS_OFLOW (0xaull)
+#define NIX_RX_PERRCODE_OL3_LEN (0x10ull)
+#define NIX_RX_PERRCODE_OL4_LEN (0x11ull)
+#define NIX_RX_PERRCODE_OL4_CHK (0x12ull)
+#define NIX_RX_PERRCODE_OL4_PORT (0x13ull)
+#define NIX_RX_PERRCODE_IL3_LEN (0x20ull)
+#define NIX_RX_PERRCODE_IL4_LEN (0x21ull)
+#define NIX_RX_PERRCODE_IL4_CHK (0x22ull)
+#define NIX_RX_PERRCODE_IL4_PORT (0x23ull)
+
+#define NIX_SENDCRCALG_CRC32 (0x0ull)
+#define NIX_SENDCRCALG_CRC32C (0x1ull)
+#define NIX_SENDCRCALG_ONES16 (0x2ull)
+
+#define NIX_SENDL3TYPE_NONE (0x0ull)
+#define NIX_SENDL3TYPE_IP4 (0x2ull)
+#define NIX_SENDL3TYPE_IP4_CKSUM (0x3ull)
+#define NIX_SENDL3TYPE_IP6 (0x4ull)
+
+#define NIX_SENDL4TYPE_NONE (0x0ull)
+#define NIX_SENDL4TYPE_TCP_CKSUM (0x1ull)
+#define NIX_SENDL4TYPE_SCTP_CKSUM (0x2ull)
+#define NIX_SENDL4TYPE_UDP_CKSUM (0x3ull)
+
+#define NIX_SENDLDTYPE_LDD (0x0ull)
+#define NIX_SENDLDTYPE_LDT (0x1ull)
+#define NIX_SENDLDTYPE_LDWB (0x2ull)
+
+#define NIX_SENDMEMALG_SET (0x0ull)
+#define NIX_SENDMEMALG_SETTSTMP (0x1ull)
+#define NIX_SENDMEMALG_SETRSLT (0x2ull)
+#define NIX_SENDMEMALG_ADD (0x8ull)
+#define NIX_SENDMEMALG_SUB (0x9ull)
+#define NIX_SENDMEMALG_ADDLEN (0xaull)
+#define NIX_SENDMEMALG_SUBLEN (0xbull)
+#define NIX_SENDMEMALG_ADDMBUF (0xcull)
+#define NIX_SENDMEMALG_SUBMBUF (0xdull)
+
+#define NIX_SENDMEMDSZ_B64 (0x0ull)
+#define NIX_SENDMEMDSZ_B32 (0x1ull)
+#define NIX_SENDMEMDSZ_B16 (0x2ull)
+#define NIX_SENDMEMDSZ_B8 (0x3ull)
+
+#define NIX_SEND_STATUS_GOOD (0x0ull)
+#define NIX_SEND_STATUS_SQ_CTX_FAULT (0x1ull)
+#define NIX_SEND_STATUS_SQ_CTX_POISON (0x2ull)
+#define NIX_SEND_STATUS_SQB_FAULT (0x3ull)
+#define NIX_SEND_STATUS_SQB_POISON (0x4ull)
+#define NIX_SEND_STATUS_SEND_HDR_ERR (0x5ull)
+#define NIX_SEND_STATUS_SEND_EXT_ERR (0x6ull)
+#define NIX_SEND_STATUS_JUMP_FAULT (0x7ull)
+#define NIX_SEND_STATUS_JUMP_POISON (0x8ull)
+#define NIX_SEND_STATUS_SEND_CRC_ERR (0x10ull)
+#define NIX_SEND_STATUS_SEND_IMM_ERR (0x11ull)
+#define NIX_SEND_STATUS_SEND_SG_ERR (0x12ull)
+#define NIX_SEND_STATUS_SEND_MEM_ERR (0x13ull)
+#define NIX_SEND_STATUS_INVALID_SUBDC (0x14ull)
+#define NIX_SEND_STATUS_SUBDC_ORDER_ERR (0x15ull)
+#define NIX_SEND_STATUS_DATA_FAULT (0x16ull)
+#define NIX_SEND_STATUS_DATA_POISON (0x17ull)
+#define NIX_SEND_STATUS_NPC_DROP_ACTION (0x20ull)
+#define NIX_SEND_STATUS_LOCK_VIOL (0x21ull)
+#define NIX_SEND_STATUS_NPC_UCAST_CHAN_ERR (0x22ull)
+#define NIX_SEND_STATUS_NPC_MCAST_CHAN_ERR (0x23ull)
+#define NIX_SEND_STATUS_NPC_MCAST_ABORT (0x24ull)
+#define NIX_SEND_STATUS_NPC_VTAG_PTR_ERR (0x25ull)
+#define NIX_SEND_STATUS_NPC_VTAG_SIZE_ERR (0x26ull)
+#define NIX_SEND_STATUS_SEND_MEM_FAULT (0x27ull)
+
+#define NIX_SQINT_LMT_ERR (0x0ull)
+#define NIX_SQINT_MNQ_ERR (0x1ull)
+#define NIX_SQINT_SEND_ERR (0x2ull)
+#define NIX_SQINT_SQB_ALLOC_FAIL (0x3ull)
+
+#define NIX_XQE_TYPE_INVALID (0x0ull)
+#define NIX_XQE_TYPE_RX (0x1ull)
+#define NIX_XQE_TYPE_RX_IPSECS (0x2ull)
+#define NIX_XQE_TYPE_RX_IPSECH (0x3ull)
+#define NIX_XQE_TYPE_RX_IPSECD (0x4ull)
+#define NIX_XQE_TYPE_SEND (0x8ull)
+
+#define NIX_AQ_COMP_NOTDONE (0x0ull)
+#define NIX_AQ_COMP_GOOD (0x1ull)
+#define NIX_AQ_COMP_SWERR (0x2ull)
+#define NIX_AQ_COMP_CTX_POISON (0x3ull)
+#define NIX_AQ_COMP_CTX_FAULT (0x4ull)
+#define NIX_AQ_COMP_LOCKERR (0x5ull)
+#define NIX_AQ_COMP_SQB_ALLOC_FAIL (0x6ull)
+
+#define NIX_AF_INT_VEC_RVU (0x0ull)
+#define NIX_AF_INT_VEC_GEN (0x1ull)
+#define NIX_AF_INT_VEC_AQ_DONE (0x2ull)
+#define NIX_AF_INT_VEC_AF_ERR (0x3ull)
+#define NIX_AF_INT_VEC_POISON (0x4ull)
+
+#define NIX_AQINT_GEN_RX_MCAST_DROP (0x0ull)
+#define NIX_AQINT_GEN_RX_MIRROR_DROP (0x1ull)
+#define NIX_AQINT_GEN_TL1_DRAIN (0x3ull)
+#define NIX_AQINT_GEN_SMQ_FLUSH_DONE (0x4ull)
+
+#define NIX_AQ_INSTOP_NOP (0x0ull)
+#define NIX_AQ_INSTOP_INIT (0x1ull)
+#define NIX_AQ_INSTOP_WRITE (0x2ull)
+#define NIX_AQ_INSTOP_READ (0x3ull)
+#define NIX_AQ_INSTOP_LOCK (0x4ull)
+#define NIX_AQ_INSTOP_UNLOCK (0x5ull)
+
+#define NIX_AQ_CTYPE_RQ (0x0ull)
+#define NIX_AQ_CTYPE_SQ (0x1ull)
+#define NIX_AQ_CTYPE_CQ (0x2ull)
+#define NIX_AQ_CTYPE_MCE (0x3ull)
+#define NIX_AQ_CTYPE_RSS (0x4ull)
+#define NIX_AQ_CTYPE_DYNO (0x5ull)
+
+#define NIX_COLORRESULT_GREEN (0x0ull)
+#define NIX_COLORRESULT_YELLOW (0x1ull)
+#define NIX_COLORRESULT_RED_SEND (0x2ull)
+#define NIX_COLORRESULT_RED_DROP (0x3ull)
+
+#define NIX_CHAN_LBKX_CHX(a, b) \
+ (0x000ull | ((uint64_t)(a) << 8) | (uint64_t)(b))
+#define NIX_CHAN_R4 (0x400ull)
+#define NIX_CHAN_R5 (0x500ull)
+#define NIX_CHAN_R6 (0x600ull)
+#define NIX_CHAN_SDP_CH_END (0x7ffull)
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_CGXX_LMACX_CHX(a, b, c) \
+ (0x800ull | ((uint64_t)(a) << 8) | ((uint64_t)(b) << 4) | \
+ (uint64_t)(c))
+
+#define NIX_INTF_SDP (0x4ull)
+#define NIX_INTF_CGX0 (0x0ull)
+#define NIX_INTF_CGX1 (0x1ull)
+#define NIX_INTF_CGX2 (0x2ull)
+#define NIX_INTF_LBK0 (0x3ull)
+
+#define NIX_CQERRINT_DOOR_ERR (0x0ull)
+#define NIX_CQERRINT_WR_FULL (0x1ull)
+#define NIX_CQERRINT_CQE_FAULT (0x2ull)
+
+#define NIX_LF_INT_VEC_GINT (0x80ull)
+#define NIX_LF_INT_VEC_ERR_INT (0x81ull)
+#define NIX_LF_INT_VEC_POISON (0x82ull)
+#define NIX_LF_INT_VEC_QINT_END (0x3full)
+#define NIX_LF_INT_VEC_QINT_START (0x0ull)
+#define NIX_LF_INT_VEC_CINT_END (0x7full)
+#define NIX_LF_INT_VEC_CINT_START (0x40ull)
+
+/* Enums definitions */
+
+/* Structures definitions */
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t lf : 7;
+ uint64_t rsvd_23_15 : 9;
+ uint64_t cindex : 20;
+ uint64_t rsvd_62_44 : 19;
+ uint64_t doneint : 1;
+ uint64_t res_addr : 64; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t compcode : 8;
+ uint64_t doneint : 1;
+ uint64_t rsvd_63_17 : 47;
+ uint64_t rsvd_127_64 : 64; /* W1 */
+};
+
+/* NIX completion interrupt context hardware structure */
+struct nix_cint_hw_s {
+ uint64_t ecount : 32;
+ uint64_t qcount : 16;
+ uint64_t intr : 1;
+ uint64_t ena : 1;
+ uint64_t timer_idx : 8;
+ uint64_t rsvd_63_58 : 6;
+ uint64_t ecount_wait : 32;
+ uint64_t qcount_wait : 16;
+ uint64_t time_wait : 8;
+ uint64_t rsvd_127_120 : 8;
+};
+
+/* NIX completion queue entry header structure */
+struct nix_cqe_hdr_s {
+ uint64_t tag : 32;
+ uint64_t q : 20;
+ uint64_t rsvd_57_52 : 6;
+ uint64_t node : 2;
+ uint64_t cqe_type : 4;
+};
+
+/* NIX completion queue context structure */
+struct nix_cq_ctx_s {
+ uint64_t base : 64;/* W0 */
+ uint64_t rsvd_67_64 : 4;
+ uint64_t bp_ena : 1;
+ uint64_t rsvd_71_69 : 3;
+ uint64_t bpid : 9;
+ uint64_t rsvd_83_81 : 3;
+ uint64_t qint_idx : 7;
+ uint64_t cq_err : 1;
+ uint64_t cint_idx : 7;
+ uint64_t avg_con : 9;
+ uint64_t wrptr : 20;
+ uint64_t tail : 20;
+ uint64_t head : 20;
+ uint64_t avg_level : 8;
+ uint64_t update_time : 16;
+ uint64_t bp : 8;
+ uint64_t drop : 8;
+ uint64_t drop_ena : 1;
+ uint64_t ena : 1;
+ uint64_t rsvd_211_210 : 2;
+ uint64_t substream : 20;
+ uint64_t caching : 1;
+ uint64_t rsvd_235_233 : 3;
+ uint64_t qsize : 4;
+ uint64_t cq_err_int : 8;
+ uint64_t cq_err_int_ena : 8;
+};
+
+/* NIX instruction header structure */
+struct nix_inst_hdr_s {
+ uint64_t pf_func : 16;
+ uint64_t sq : 20;
+ uint64_t rsvd_63_36 : 28;
+};
+
+/* NIX i/o virtual address structure */
+struct nix_iova_s {
+ uint64_t addr : 64; /* W0 */
+};
+
+/* NIX IPsec dynamic ordering counter structure */
+struct nix_ipsec_dyno_s {
+ uint32_t count : 32; /* W0 */
+};
+
+/* NIX memory value structure */
+struct nix_mem_result_s {
+ uint64_t v : 1;
+ uint64_t color : 2;
+ uint64_t rsvd_63_3 : 61;
+};
+
+/* NIX statistics operation write data structure */
+struct nix_op_q_wdata_s {
+ uint64_t rsvd_31_0 : 32;
+ uint64_t q : 20;
+ uint64_t rsvd_63_52 : 12;
+};
+
+/* NIX queue interrupt context hardware structure */
+struct nix_qint_hw_s {
+ uint32_t count : 22;
+ uint32_t rsvd_30_22 : 9;
+ uint32_t ena : 1;
+};
+
+/* NIX receive queue context structure */
+struct nix_rq_ctx_hw_s {
+ uint64_t ena : 1;
+ uint64_t sso_ena : 1;
+ uint64_t ipsech_ena : 1;
+ uint64_t ena_wqwd : 1;
+ uint64_t cq : 20;
+ uint64_t substream : 20;
+ uint64_t wqe_aura : 20;
+ uint64_t spb_aura : 20;
+ uint64_t lpb_aura : 20;
+ uint64_t sso_grp : 10;
+ uint64_t sso_tt : 2;
+ uint64_t pb_caching : 2;
+ uint64_t wqe_caching : 1;
+ uint64_t xqe_drop_ena : 1;
+ uint64_t spb_drop_ena : 1;
+ uint64_t lpb_drop_ena : 1;
+ uint64_t wqe_skip : 2;
+ uint64_t rsvd_127_124 : 4;
+ uint64_t rsvd_139_128 : 12;
+ uint64_t spb_sizem1 : 6;
+ uint64_t rsvd_150_146 : 5;
+ uint64_t spb_ena : 1;
+ uint64_t lpb_sizem1 : 12;
+ uint64_t first_skip : 7;
+ uint64_t rsvd_171 : 1;
+ uint64_t later_skip : 6;
+ uint64_t xqe_imm_size : 6;
+ uint64_t rsvd_189_184 : 6;
+ uint64_t xqe_imm_copy : 1;
+ uint64_t xqe_hdr_split : 1;
+ uint64_t xqe_drop : 8;
+ uint64_t xqe_pass : 8;
+ uint64_t wqe_pool_drop : 8;
+ uint64_t wqe_pool_pass : 8;
+ uint64_t spb_aura_drop : 8;
+ uint64_t spb_aura_pass : 8;
+ uint64_t spb_pool_drop : 8;
+ uint64_t spb_pool_pass : 8;
+ uint64_t lpb_aura_drop : 8;
+ uint64_t lpb_aura_pass : 8;
+ uint64_t lpb_pool_drop : 8;
+ uint64_t lpb_pool_pass : 8;
+ uint64_t rsvd_319_288 : 32;
+ uint64_t ltag : 24;
+ uint64_t good_utag : 8;
+ uint64_t bad_utag : 8;
+ uint64_t flow_tagw : 6;
+ uint64_t rsvd_383_366 : 18;
+ uint64_t octs : 48;
+ uint64_t rsvd_447_432 : 16;
+ uint64_t pkts : 48;
+ uint64_t rsvd_511_496 : 16;
+ uint64_t drop_octs : 48;
+ uint64_t rsvd_575_560 : 16;
+ uint64_t drop_pkts : 48;
+ uint64_t rsvd_639_624 : 16;
+ uint64_t re_pkts : 48;
+ uint64_t rsvd_702_688 : 15;
+ uint64_t ena_copy : 1;
+ uint64_t rsvd_739_704 : 36;
+ uint64_t rq_int : 8;
+ uint64_t rq_int_ena : 8;
+ uint64_t qint_idx : 7;
+ uint64_t rsvd_767_763 : 5;
+ uint64_t rsvd_831_768 : 64;/* W12 */
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t rsvd_959_896 : 64;/* W14 */
+ uint64_t rsvd_1023_960 : 64;/* W15 */
+};
+
+/* NIX receive queue context structure */
+struct nix_rq_ctx_s {
+ uint64_t ena : 1;
+ uint64_t sso_ena : 1;
+ uint64_t ipsech_ena : 1;
+ uint64_t ena_wqwd : 1;
+ uint64_t cq : 20;
+ uint64_t substream : 20;
+ uint64_t wqe_aura : 20;
+ uint64_t spb_aura : 20;
+ uint64_t lpb_aura : 20;
+ uint64_t sso_grp : 10;
+ uint64_t sso_tt : 2;
+ uint64_t pb_caching : 2;
+ uint64_t wqe_caching : 1;
+ uint64_t xqe_drop_ena : 1;
+ uint64_t spb_drop_ena : 1;
+ uint64_t lpb_drop_ena : 1;
+ uint64_t rsvd_127_122 : 6;
+ uint64_t rsvd_139_128 : 12;
+ uint64_t spb_sizem1 : 6;
+ uint64_t wqe_skip : 2;
+ uint64_t rsvd_150_148 : 3;
+ uint64_t spb_ena : 1;
+ uint64_t lpb_sizem1 : 12;
+ uint64_t first_skip : 7;
+ uint64_t rsvd_171 : 1;
+ uint64_t later_skip : 6;
+ uint64_t xqe_imm_size : 6;
+ uint64_t rsvd_189_184 : 6;
+ uint64_t xqe_imm_copy : 1;
+ uint64_t xqe_hdr_split : 1;
+ uint64_t xqe_drop : 8;
+ uint64_t xqe_pass : 8;
+ uint64_t wqe_pool_drop : 8;
+ uint64_t wqe_pool_pass : 8;
+ uint64_t spb_aura_drop : 8;
+ uint64_t spb_aura_pass : 8;
+ uint64_t spb_pool_drop : 8;
+ uint64_t spb_pool_pass : 8;
+ uint64_t lpb_aura_drop : 8;
+ uint64_t lpb_aura_pass : 8;
+ uint64_t lpb_pool_drop : 8;
+ uint64_t lpb_pool_pass : 8;
+ uint64_t rsvd_291_288 : 4;
+ uint64_t rq_int : 8;
+ uint64_t rq_int_ena : 8;
+ uint64_t qint_idx : 7;
+ uint64_t rsvd_319_315 : 5;
+ uint64_t ltag : 24;
+ uint64_t good_utag : 8;
+ uint64_t bad_utag : 8;
+ uint64_t flow_tagw : 6;
+ uint64_t rsvd_383_366 : 18;
+ uint64_t octs : 48;
+ uint64_t rsvd_447_432 : 16;
+ uint64_t pkts : 48;
+ uint64_t rsvd_511_496 : 16;
+ uint64_t drop_octs : 48;
+ uint64_t rsvd_575_560 : 16;
+ uint64_t drop_pkts : 48;
+ uint64_t rsvd_639_624 : 16;
+ uint64_t re_pkts : 48;
+ uint64_t rsvd_703_688 : 16;
+ uint64_t rsvd_767_704 : 64;/* W11 */
+ uint64_t rsvd_831_768 : 64;/* W12 */
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t rsvd_959_896 : 64;/* W14 */
+ uint64_t rsvd_1023_960 : 64;/* W15 */
+};
+
+/* NIX receive side scaling entry structure */
+struct nix_rsse_s {
+ uint32_t rq : 20;
+ uint32_t rsvd_31_20 : 12;
+};
+
+/* NIX receive action structure */
+struct nix_rx_action_s {
+ uint64_t op : 4;
+ uint64_t pf_func : 16;
+ uint64_t index : 20;
+ uint64_t match_id : 16;
+ uint64_t flow_key_alg : 5;
+ uint64_t rsvd_63_61 : 3;
+};
+
+/* NIX receive immediate sub descriptor structure */
+struct nix_rx_imm_s {
+ uint64_t size : 16;
+ uint64_t apad : 3;
+ uint64_t rsvd_59_19 : 41;
+ uint64_t subdc : 4;
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+};
+
+/* NIX receive parse structure */
+struct nix_rx_parse_s {
+ uint64_t chan : 12;
+ uint64_t desc_sizem1 : 5;
+ uint64_t imm_copy : 1;
+ uint64_t express : 1;
+ uint64_t wqwd : 1;
+ uint64_t errlev : 4;
+ uint64_t errcode : 8;
+ uint64_t latype : 4;
+ uint64_t lbtype : 4;
+ uint64_t lctype : 4;
+ uint64_t ldtype : 4;
+ uint64_t letype : 4;
+ uint64_t lftype : 4;
+ uint64_t lgtype : 4;
+ uint64_t lhtype : 4;
+ uint64_t pkt_lenm1 : 16;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t vtag0_valid : 1;
+ uint64_t vtag0_gone : 1;
+ uint64_t vtag1_valid : 1;
+ uint64_t vtag1_gone : 1;
+ uint64_t pkind : 6;
+ uint64_t rsvd_95_94 : 2;
+ uint64_t vtag0_tci : 16;
+ uint64_t vtag1_tci : 16;
+ uint64_t laflags : 8;
+ uint64_t lbflags : 8;
+ uint64_t lcflags : 8;
+ uint64_t ldflags : 8;
+ uint64_t leflags : 8;
+ uint64_t lfflags : 8;
+ uint64_t lgflags : 8;
+ uint64_t lhflags : 8;
+ uint64_t eoh_ptr : 8;
+ uint64_t wqe_aura : 20;
+ uint64_t pb_aura : 20;
+ uint64_t match_id : 16;
+ uint64_t laptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t lcptr : 8;
+ uint64_t ldptr : 8;
+ uint64_t leptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t lhptr : 8;
+ uint64_t vtag0_ptr : 8;
+ uint64_t vtag1_ptr : 8;
+ uint64_t flow_key_alg : 5;
+ uint64_t rsvd_383_341 : 43;
+ uint64_t rsvd_447_384 : 64; /* W6 */
+};
+
+/* NIX receive scatter/gather sub descriptor structure */
+struct nix_rx_sg_s {
+ uint64_t seg1_size : 16;
+ uint64_t seg2_size : 16;
+ uint64_t seg3_size : 16;
+ uint64_t segs : 2;
+ uint64_t rsvd_59_50 : 10;
+ uint64_t subdc : 4;
+};
+
+/* NIX receive vtag action structure */
+struct nix_rx_vtag_action_s {
+ uint64_t vtag0_relptr : 8;
+ uint64_t vtag0_lid : 3;
+ uint64_t rsvd_11 : 1;
+ uint64_t vtag0_type : 3;
+ uint64_t vtag0_valid : 1;
+ uint64_t rsvd_31_16 : 16;
+ uint64_t vtag1_relptr : 8;
+ uint64_t vtag1_lid : 3;
+ uint64_t rsvd_43 : 1;
+ uint64_t vtag1_type : 3;
+ uint64_t vtag1_valid : 1;
+ uint64_t rsvd_63_48 : 16;
+};
+
+/* NIX send completion structure */
+struct nix_send_comp_s {
+ uint64_t status : 8;
+ uint64_t sqe_id : 16;
+ uint64_t rsvd_63_24 : 40;
+};
+
+/* NIX send CRC sub descriptor structure */
+struct nix_send_crc_s {
+ uint64_t size : 16;
+ uint64_t start : 16;
+ uint64_t insert : 16;
+ uint64_t rsvd_57_48 : 10;
+ uint64_t alg : 2;
+ uint64_t subdc : 4;
+ uint64_t iv : 32;
+ uint64_t rsvd_127_96 : 32;
+};
+
+/* NIX send extended header sub descriptor structure */
+RTE_STD_C11
+union nix_send_ext_w0_u {
+ uint64_t u;
+ struct {
+ uint64_t lso_mps : 14;
+ uint64_t lso : 1;
+ uint64_t tstmp : 1;
+ uint64_t lso_sb : 8;
+ uint64_t lso_format : 5;
+ uint64_t rsvd_31_29 : 3;
+ uint64_t shp_chg : 9;
+ uint64_t shp_dis : 1;
+ uint64_t shp_ra : 2;
+ uint64_t markptr : 8;
+ uint64_t markform : 7;
+ uint64_t mark_en : 1;
+ uint64_t subdc : 4;
+ };
+};
+
+RTE_STD_C11
+union nix_send_ext_w1_u {
+ uint64_t u;
+ struct {
+ uint64_t vlan0_ins_ptr : 8;
+ uint64_t vlan0_ins_tci : 16;
+ uint64_t vlan1_ins_ptr : 8;
+ uint64_t vlan1_ins_tci : 16;
+ uint64_t vlan0_ins_ena : 1;
+ uint64_t vlan1_ins_ena : 1;
+ uint64_t rsvd_127_114 : 14;
+ };
+};
+
+struct nix_send_ext_s {
+ union nix_send_ext_w0_u w0;
+ union nix_send_ext_w1_u w1;
+};
+
+/* NIX send header sub descriptor structure */
+RTE_STD_C11
+union nix_send_hdr_w0_u {
+ uint64_t u;
+ struct {
+ uint64_t total : 18;
+ uint64_t rsvd_18 : 1;
+ uint64_t df : 1;
+ uint64_t aura : 20;
+ uint64_t sizem1 : 3;
+ uint64_t pnc : 1;
+ uint64_t sq : 20;
+ };
+};
+
+RTE_STD_C11
+union nix_send_hdr_w1_u {
+ uint64_t u;
+ struct {
+ uint64_t ol3ptr : 8;
+ uint64_t ol4ptr : 8;
+ uint64_t il3ptr : 8;
+ uint64_t il4ptr : 8;
+ uint64_t ol3type : 4;
+ uint64_t ol4type : 4;
+ uint64_t il3type : 4;
+ uint64_t il4type : 4;
+ uint64_t sqe_id : 16;
+ };
+};
+
+struct nix_send_hdr_s {
+ union nix_send_hdr_w0_u w0;
+ union nix_send_hdr_w1_u w1;
+};
+
+/* NIX send immediate sub descriptor structure */
+struct nix_send_imm_s {
+ uint64_t size : 16;
+ uint64_t apad : 3;
+ uint64_t rsvd_59_19 : 41;
+ uint64_t subdc : 4;
+};
+
+/* NIX send jump sub descriptor structure */
+struct nix_send_jump_s {
+ uint64_t sizem1 : 7;
+ uint64_t rsvd_13_7 : 7;
+ uint64_t ld_type : 2;
+ uint64_t aura : 20;
+ uint64_t rsvd_58_36 : 23;
+ uint64_t f : 1;
+ uint64_t subdc : 4;
+ uint64_t addr : 64; /* W1 */
+};
+
+/* NIX send memory sub descriptor structure */
+struct nix_send_mem_s {
+ uint64_t offset : 16;
+ uint64_t rsvd_52_16 : 37;
+ uint64_t wmem : 1;
+ uint64_t dsz : 2;
+ uint64_t alg : 4;
+ uint64_t subdc : 4;
+ uint64_t addr : 64; /* W1 */
+};
+
+/* NIX send scatter/gather sub descriptor structure */
+RTE_STD_C11
+union nix_send_sg_s {
+ uint64_t u;
+ struct {
+ uint64_t seg1_size : 16;
+ uint64_t seg2_size : 16;
+ uint64_t seg3_size : 16;
+ uint64_t segs : 2;
+ uint64_t rsvd_54_50 : 5;
+ uint64_t i1 : 1;
+ uint64_t i2 : 1;
+ uint64_t i3 : 1;
+ uint64_t ld_type : 2;
+ uint64_t subdc : 4;
+ };
+};
+
+/* NIX send work sub descriptor structure */
+struct nix_send_work_s {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t rsvd_59_44 : 16;
+ uint64_t subdc : 4;
+ uint64_t addr : 64; /* W1 */
+};
+
+/* NIX sq context hardware structure */
+struct nix_sq_ctx_hw_s {
+ uint64_t ena : 1;
+ uint64_t substream : 20;
+ uint64_t max_sqe_size : 2;
+ uint64_t sqe_way_mask : 16;
+ uint64_t sqb_aura : 20;
+ uint64_t gbl_rsvd1 : 5;
+ uint64_t cq_id : 20;
+ uint64_t cq_ena : 1;
+ uint64_t qint_idx : 6;
+ uint64_t gbl_rsvd2 : 1;
+ uint64_t sq_int : 8;
+ uint64_t sq_int_ena : 8;
+ uint64_t xoff : 1;
+ uint64_t sqe_stype : 2;
+ uint64_t gbl_rsvd : 17;
+ uint64_t head_sqb : 64;/* W2 */
+ uint64_t head_offset : 6;
+ uint64_t sqb_dequeue_count : 16;
+ uint64_t default_chan : 12;
+ uint64_t sdp_mcast : 1;
+ uint64_t sso_ena : 1;
+ uint64_t dse_rsvd1 : 28;
+ uint64_t sqb_enqueue_count : 16;
+ uint64_t tail_offset : 6;
+ uint64_t lmt_dis : 1;
+ uint64_t smq_rr_quantum : 24;
+ uint64_t dnq_rsvd1 : 17;
+ uint64_t tail_sqb : 64;/* W5 */
+ uint64_t next_sqb : 64;/* W6 */
+ uint64_t mnq_dis : 1;
+ uint64_t smq : 9;
+ uint64_t smq_pend : 1;
+ uint64_t smq_next_sq : 20;
+ uint64_t smq_next_sq_vld : 1;
+ uint64_t scm1_rsvd2 : 32;
+ uint64_t smenq_sqb : 64;/* W8 */
+ uint64_t smenq_offset : 6;
+ uint64_t cq_limit : 8;
+ uint64_t smq_rr_count : 25;
+ uint64_t scm_lso_rem : 18;
+ uint64_t scm_dq_rsvd0 : 7;
+ uint64_t smq_lso_segnum : 8;
+ uint64_t vfi_lso_total : 18;
+ uint64_t vfi_lso_sizem1 : 3;
+ uint64_t vfi_lso_sb : 8;
+ uint64_t vfi_lso_mps : 14;
+ uint64_t vfi_lso_vlan0_ins_ena : 1;
+ uint64_t vfi_lso_vlan1_ins_ena : 1;
+ uint64_t vfi_lso_vld : 1;
+ uint64_t smenq_next_sqb_vld : 1;
+ uint64_t scm_dq_rsvd1 : 9;
+ uint64_t smenq_next_sqb : 64;/* W11 */
+ uint64_t seb_rsvd1 : 64;/* W12 */
+ uint64_t drop_pkts : 48;
+ uint64_t drop_octs_lsw : 16;
+ uint64_t drop_octs_msw : 32;
+ uint64_t pkts_lsw : 32;
+ uint64_t pkts_msw : 16;
+ uint64_t octs : 48;
+};
+
+/* NIX send queue context structure */
+struct nix_sq_ctx_s {
+ uint64_t ena : 1;
+ uint64_t qint_idx : 6;
+ uint64_t substream : 20;
+ uint64_t sdp_mcast : 1;
+ uint64_t cq : 20;
+ uint64_t sqe_way_mask : 16;
+ uint64_t smq : 9;
+ uint64_t cq_ena : 1;
+ uint64_t xoff : 1;
+ uint64_t sso_ena : 1;
+ uint64_t smq_rr_quantum : 24;
+ uint64_t default_chan : 12;
+ uint64_t sqb_count : 16;
+ uint64_t smq_rr_count : 25;
+ uint64_t sqb_aura : 20;
+ uint64_t sq_int : 8;
+ uint64_t sq_int_ena : 8;
+ uint64_t sqe_stype : 2;
+ uint64_t rsvd_191 : 1;
+ uint64_t max_sqe_size : 2;
+ uint64_t cq_limit : 8;
+ uint64_t lmt_dis : 1;
+ uint64_t mnq_dis : 1;
+ uint64_t smq_next_sq : 20;
+ uint64_t smq_lso_segnum : 8;
+ uint64_t tail_offset : 6;
+ uint64_t smenq_offset : 6;
+ uint64_t head_offset : 6;
+ uint64_t smenq_next_sqb_vld : 1;
+ uint64_t smq_pend : 1;
+ uint64_t smq_next_sq_vld : 1;
+ uint64_t rsvd_255_253 : 3;
+ uint64_t next_sqb : 64;/* W4 */
+ uint64_t tail_sqb : 64;/* W5 */
+ uint64_t smenq_sqb : 64;/* W6 */
+ uint64_t smenq_next_sqb : 64;/* W7 */
+ uint64_t head_sqb : 64;/* W8 */
+ uint64_t rsvd_583_576 : 8;
+ uint64_t vfi_lso_total : 18;
+ uint64_t vfi_lso_sizem1 : 3;
+ uint64_t vfi_lso_sb : 8;
+ uint64_t vfi_lso_mps : 14;
+ uint64_t vfi_lso_vlan0_ins_ena : 1;
+ uint64_t vfi_lso_vlan1_ins_ena : 1;
+ uint64_t vfi_lso_vld : 1;
+ uint64_t rsvd_639_630 : 10;
+ uint64_t scm_lso_rem : 18;
+ uint64_t rsvd_703_658 : 46;
+ uint64_t octs : 48;
+ uint64_t rsvd_767_752 : 16;
+ uint64_t pkts : 48;
+ uint64_t rsvd_831_816 : 16;
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t drop_octs : 48;
+ uint64_t rsvd_959_944 : 16;
+ uint64_t drop_pkts : 48;
+ uint64_t rsvd_1023_1008 : 16;
+};
+
+/* NIX transmit action structure */
+struct nix_tx_action_s {
+ uint64_t op : 4;
+ uint64_t rsvd_11_4 : 8;
+ uint64_t index : 20;
+ uint64_t match_id : 16;
+ uint64_t rsvd_63_48 : 16;
+};
+
+/* NIX transmit vtag action structure */
+struct nix_tx_vtag_action_s {
+ uint64_t vtag0_relptr : 8;
+ uint64_t vtag0_lid : 3;
+ uint64_t rsvd_11 : 1;
+ uint64_t vtag0_op : 2;
+ uint64_t rsvd_15_14 : 2;
+ uint64_t vtag0_def : 10;
+ uint64_t rsvd_31_26 : 6;
+ uint64_t vtag1_relptr : 8;
+ uint64_t vtag1_lid : 3;
+ uint64_t rsvd_43 : 1;
+ uint64_t vtag1_op : 2;
+ uint64_t rsvd_47_46 : 2;
+ uint64_t vtag1_def : 10;
+ uint64_t rsvd_63_58 : 6;
+};
+
+/* NIX work queue entry header structure */
+struct nix_wqe_hdr_s {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t node : 2;
+ uint64_t q : 14;
+ uint64_t wqe_type : 4;
+};
+
+/* NIX Rx flow key algorithm field structure */
+struct nix_rx_flowkey_alg {
+ uint64_t key_offset :6;
+ uint64_t ln_mask :1;
+ uint64_t fn_mask :1;
+ uint64_t hdr_offset :8;
+ uint64_t bytesm1 :5;
+ uint64_t lid :3;
+ uint64_t reserved_24_24 :1;
+ uint64_t ena :1;
+ uint64_t sel_chan :1;
+ uint64_t ltype_mask :4;
+ uint64_t ltype_match :4;
+ uint64_t reserved_35_63 :29;
+};
+
+/* NIX LSO format field structure */
+struct nix_lso_format {
+ uint64_t offset : 8;
+ uint64_t layer : 2;
+ uint64_t rsvd_10_11 : 2;
+ uint64_t sizem1 : 2;
+ uint64_t rsvd_14_15 : 2;
+ uint64_t alg : 3;
+ uint64_t rsvd_19_63 : 45;
+};
+
+#define NIX_LSO_FIELD_MAX (8)
+#define NIX_LSO_FIELD_ALG_MASK GENMASK(18, 16)
+#define NIX_LSO_FIELD_SZ_MASK GENMASK(13, 12)
+#define NIX_LSO_FIELD_LY_MASK GENMASK(9, 8)
+#define NIX_LSO_FIELD_OFF_MASK GENMASK(7, 0)
+
+#define NIX_LSO_FIELD_MASK \
+ (NIX_LSO_FIELD_OFF_MASK | \
+ NIX_LSO_FIELD_LY_MASK | \
+ NIX_LSO_FIELD_SZ_MASK | \
+ NIX_LSO_FIELD_ALG_MASK)
+
+#endif /* __OTX2_NIX_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h
new file mode 100644
index 000000000..2224216c9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npa.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_NPA_HW_H__
+#define __OTX2_NPA_HW_H__
+
+/* Register offsets */
+
+#define NPA_AF_BLK_RST (0x0ull)
+#define NPA_AF_CONST (0x10ull)
+#define NPA_AF_CONST1 (0x18ull)
+#define NPA_AF_LF_RST (0x20ull)
+#define NPA_AF_GEN_CFG (0x30ull)
+#define NPA_AF_NDC_CFG (0x40ull)
+#define NPA_AF_NDC_SYNC (0x50ull)
+#define NPA_AF_INP_CTL (0xd0ull)
+#define NPA_AF_ACTIVE_CYCLES_PC (0xf0ull)
+#define NPA_AF_AVG_DELAY (0x100ull)
+#define NPA_AF_GEN_INT (0x140ull)
+#define NPA_AF_GEN_INT_W1S (0x148ull)
+#define NPA_AF_GEN_INT_ENA_W1S (0x150ull)
+#define NPA_AF_GEN_INT_ENA_W1C (0x158ull)
+#define NPA_AF_RVU_INT (0x160ull)
+#define NPA_AF_RVU_INT_W1S (0x168ull)
+#define NPA_AF_RVU_INT_ENA_W1S (0x170ull)
+#define NPA_AF_RVU_INT_ENA_W1C (0x178ull)
+#define NPA_AF_ERR_INT (0x180ull)
+#define NPA_AF_ERR_INT_W1S (0x188ull)
+#define NPA_AF_ERR_INT_ENA_W1S (0x190ull)
+#define NPA_AF_ERR_INT_ENA_W1C (0x198ull)
+#define NPA_AF_RAS (0x1a0ull)
+#define NPA_AF_RAS_W1S (0x1a8ull)
+#define NPA_AF_RAS_ENA_W1S (0x1b0ull)
+#define NPA_AF_RAS_ENA_W1C (0x1b8ull)
+#define NPA_AF_AQ_CFG (0x600ull)
+#define NPA_AF_AQ_BASE (0x610ull)
+#define NPA_AF_AQ_STATUS (0x620ull)
+#define NPA_AF_AQ_DOOR (0x630ull)
+#define NPA_AF_AQ_DONE_WAIT (0x640ull)
+#define NPA_AF_AQ_DONE (0x650ull)
+#define NPA_AF_AQ_DONE_ACK (0x660ull)
+#define NPA_AF_AQ_DONE_TIMER (0x670ull)
+#define NPA_AF_AQ_DONE_INT (0x680ull)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x690ull)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x698ull)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000ull | (uint64_t)(a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010ull | (uint64_t)(a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100ull | (uint64_t)(a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110ull | (uint64_t)(a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000ull)
+#define NPA_PRIV_LFX_CFG(a) (0x10010ull | (uint64_t)(a) << 8)
+#define NPA_PRIV_LFX_INT_CFG(a) (0x10020ull | (uint64_t)(a) << 8)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030ull)
+#define NPA_AF_DTX_FILTER_CTL (0x10040ull)
+
+#define NPA_LF_AURA_OP_ALLOCX(a) (0x10ull | (uint64_t)(a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (0x20ull)
+#define NPA_LF_AURA_OP_FREE1 (0x28ull)
+#define NPA_LF_AURA_OP_CNT (0x30ull)
+#define NPA_LF_AURA_OP_LIMIT (0x50ull)
+#define NPA_LF_AURA_OP_INT (0x60ull)
+#define NPA_LF_AURA_OP_THRESH (0x70ull)
+#define NPA_LF_POOL_OP_PC (0x100ull)
+#define NPA_LF_POOL_OP_AVAILABLE (0x110ull)
+#define NPA_LF_POOL_OP_PTR_START0 (0x120ull)
+#define NPA_LF_POOL_OP_PTR_START1 (0x128ull)
+#define NPA_LF_POOL_OP_PTR_END0 (0x130ull)
+#define NPA_LF_POOL_OP_PTR_END1 (0x138ull)
+#define NPA_LF_POOL_OP_INT (0x160ull)
+#define NPA_LF_POOL_OP_THRESH (0x170ull)
+#define NPA_LF_ERR_INT (0x200ull)
+#define NPA_LF_ERR_INT_W1S (0x208ull)
+#define NPA_LF_ERR_INT_ENA_W1C (0x210ull)
+#define NPA_LF_ERR_INT_ENA_W1S (0x218ull)
+#define NPA_LF_RAS (0x220ull)
+#define NPA_LF_RAS_W1S (0x228ull)
+#define NPA_LF_RAS_ENA_W1C (0x230ull)
+#define NPA_LF_RAS_ENA_W1S (0x238ull)
+#define NPA_LF_QINTX_CNT(a) (0x300ull | (uint64_t)(a) << 12)
+#define NPA_LF_QINTX_INT(a) (0x310ull | (uint64_t)(a) << 12)
+#define NPA_LF_QINTX_ENA_W1S(a) (0x320ull | (uint64_t)(a) << 12)
+#define NPA_LF_QINTX_ENA_W1C(a) (0x330ull | (uint64_t)(a) << 12)
+
+
+/* Enum offsets */
+
+#define NPA_AQ_COMP_NOTDONE (0x0ull)
+#define NPA_AQ_COMP_GOOD (0x1ull)
+#define NPA_AQ_COMP_SWERR (0x2ull)
+#define NPA_AQ_COMP_CTX_POISON (0x3ull)
+#define NPA_AQ_COMP_CTX_FAULT (0x4ull)
+#define NPA_AQ_COMP_LOCKERR (0x5ull)
+
+#define NPA_AF_INT_VEC_RVU (0x0ull)
+#define NPA_AF_INT_VEC_GEN (0x1ull)
+#define NPA_AF_INT_VEC_AQ_DONE (0x2ull)
+#define NPA_AF_INT_VEC_AF_ERR (0x3ull)
+#define NPA_AF_INT_VEC_POISON (0x4ull)
+
+#define NPA_AQ_INSTOP_NOP (0x0ull)
+#define NPA_AQ_INSTOP_INIT (0x1ull)
+#define NPA_AQ_INSTOP_WRITE (0x2ull)
+#define NPA_AQ_INSTOP_READ (0x3ull)
+#define NPA_AQ_INSTOP_LOCK (0x4ull)
+#define NPA_AQ_INSTOP_UNLOCK (0x5ull)
+
+#define NPA_AQ_CTYPE_AURA (0x0ull)
+#define NPA_AQ_CTYPE_POOL (0x1ull)
+
+#define NPA_BPINTF_NIX0_RX (0x0ull)
+#define NPA_BPINTF_NIX1_RX (0x1ull)
+
+#define NPA_AURA_ERR_INT_AURA_FREE_UNDER (0x0ull)
+#define NPA_AURA_ERR_INT_AURA_ADD_OVER (0x1ull)
+#define NPA_AURA_ERR_INT_AURA_ADD_UNDER (0x2ull)
+#define NPA_AURA_ERR_INT_POOL_DIS (0x3ull)
+#define NPA_AURA_ERR_INT_R4 (0x4ull)
+#define NPA_AURA_ERR_INT_R5 (0x5ull)
+#define NPA_AURA_ERR_INT_R6 (0x6ull)
+#define NPA_AURA_ERR_INT_R7 (0x7ull)
+
+#define NPA_LF_INT_VEC_ERR_INT (0x40ull)
+#define NPA_LF_INT_VEC_POISON (0x41ull)
+#define NPA_LF_INT_VEC_QINT_END (0x3full)
+#define NPA_LF_INT_VEC_QINT_START (0x0ull)
+
+#define NPA_INPQ_SSO (0x4ull)
+#define NPA_INPQ_TIM (0x5ull)
+#define NPA_INPQ_DPI (0x6ull)
+#define NPA_INPQ_AURA_OP (0xeull)
+#define NPA_INPQ_INTERNAL_RSV (0xfull)
+#define NPA_INPQ_NIX0_RX (0x0ull)
+#define NPA_INPQ_NIX1_RX (0x2ull)
+#define NPA_INPQ_NIX0_TX (0x1ull)
+#define NPA_INPQ_NIX1_TX (0x3ull)
+#define NPA_INPQ_R_END (0xdull)
+#define NPA_INPQ_R_START (0x7ull)
+
+#define NPA_POOL_ERR_INT_OVFLS (0x0ull)
+#define NPA_POOL_ERR_INT_RANGE (0x1ull)
+#define NPA_POOL_ERR_INT_PERR (0x2ull)
+#define NPA_POOL_ERR_INT_R3 (0x3ull)
+#define NPA_POOL_ERR_INT_R4 (0x4ull)
+#define NPA_POOL_ERR_INT_R5 (0x5ull)
+#define NPA_POOL_ERR_INT_R6 (0x6ull)
+#define NPA_POOL_ERR_INT_R7 (0x7ull)
+
+#define NPA_NDC0_PORT_AURA0 (0x0ull)
+#define NPA_NDC0_PORT_AURA1 (0x1ull)
+#define NPA_NDC0_PORT_POOL0 (0x2ull)
+#define NPA_NDC0_PORT_POOL1 (0x3ull)
+#define NPA_NDC0_PORT_STACK0 (0x4ull)
+#define NPA_NDC0_PORT_STACK1 (0x5ull)
+
+#define NPA_LF_ERR_INT_AURA_DIS (0x0ull)
+#define NPA_LF_ERR_INT_AURA_OOR (0x1ull)
+#define NPA_LF_ERR_INT_AURA_FAULT (0xcull)
+#define NPA_LF_ERR_INT_POOL_FAULT (0xdull)
+#define NPA_LF_ERR_INT_STACK_FAULT (0xeull)
+#define NPA_LF_ERR_INT_QINT_FAULT (0xfull)
+
+/* Structures definitions */
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t lf : 9;
+ uint64_t rsvd_23_17 : 7;
+ uint64_t cindex : 20;
+ uint64_t rsvd_62_44 : 19;
+ uint64_t doneint : 1;
+ uint64_t res_addr : 64; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+ uint64_t op : 4;
+ uint64_t ctype : 4;
+ uint64_t compcode : 8;
+ uint64_t doneint : 1;
+ uint64_t rsvd_63_17 : 47;
+ uint64_t rsvd_127_64 : 64; /* W1 */
+};
+
+/* NPA aura operation write data structure */
+struct npa_aura_op_wdata_s {
+ uint64_t aura : 20;
+ uint64_t rsvd_62_20 : 43;
+ uint64_t drop : 1;
+};
+
+/* NPA aura context structure */
+struct npa_aura_s {
+ uint64_t pool_addr : 64;/* W0 */
+ uint64_t ena : 1;
+ uint64_t rsvd_66_65 : 2;
+ uint64_t pool_caching : 1;
+ uint64_t pool_way_mask : 16;
+ uint64_t avg_con : 9;
+ uint64_t rsvd_93 : 1;
+ uint64_t pool_drop_ena : 1;
+ uint64_t aura_drop_ena : 1;
+ uint64_t bp_ena : 2;
+ uint64_t rsvd_103_98 : 6;
+ uint64_t aura_drop : 8;
+ uint64_t shift : 6;
+ uint64_t rsvd_119_118 : 2;
+ uint64_t avg_level : 8;
+ uint64_t count : 36;
+ uint64_t rsvd_167_164 : 4;
+ uint64_t nix0_bpid : 9;
+ uint64_t rsvd_179_177 : 3;
+ uint64_t nix1_bpid : 9;
+ uint64_t rsvd_191_189 : 3;
+ uint64_t limit : 36;
+ uint64_t rsvd_231_228 : 4;
+ uint64_t bp : 8;
+ uint64_t rsvd_243_240 : 4;
+ uint64_t fc_ena : 1;
+ uint64_t fc_up_crossing : 1;
+ uint64_t fc_stype : 2;
+ uint64_t fc_hyst_bits : 4;
+ uint64_t rsvd_255_252 : 4;
+ uint64_t fc_addr : 64;/* W4 */
+ uint64_t pool_drop : 8;
+ uint64_t update_time : 16;
+ uint64_t err_int : 8;
+ uint64_t err_int_ena : 8;
+ uint64_t thresh_int : 1;
+ uint64_t thresh_int_ena : 1;
+ uint64_t thresh_up : 1;
+ uint64_t rsvd_363 : 1;
+ uint64_t thresh_qint_idx : 7;
+ uint64_t rsvd_371 : 1;
+ uint64_t err_qint_idx : 7;
+ uint64_t rsvd_383_379 : 5;
+ uint64_t thresh : 36;
+ uint64_t rsvd_447_420 : 28;
+ uint64_t rsvd_511_448 : 64;/* W7 */
+};
+
+/* NPA pool context structure */
+struct npa_pool_s {
+ uint64_t stack_base : 64;/* W0 */
+ uint64_t ena : 1;
+ uint64_t nat_align : 1;
+ uint64_t rsvd_67_66 : 2;
+ uint64_t stack_caching : 1;
+ uint64_t rsvd_71_69 : 3;
+ uint64_t stack_way_mask : 16;
+ uint64_t buf_offset : 12;
+ uint64_t rsvd_103_100 : 4;
+ uint64_t buf_size : 11;
+ uint64_t rsvd_127_115 : 13;
+ uint64_t stack_max_pages : 32;
+ uint64_t stack_pages : 32;
+ uint64_t op_pc : 48;
+ uint64_t rsvd_255_240 : 16;
+ uint64_t stack_offset : 4;
+ uint64_t rsvd_263_260 : 4;
+ uint64_t shift : 6;
+ uint64_t rsvd_271_270 : 2;
+ uint64_t avg_level : 8;
+ uint64_t avg_con : 9;
+ uint64_t fc_ena : 1;
+ uint64_t fc_stype : 2;
+ uint64_t fc_hyst_bits : 4;
+ uint64_t fc_up_crossing : 1;
+ uint64_t rsvd_299_297 : 3;
+ uint64_t update_time : 16;
+ uint64_t rsvd_319_316 : 4;
+ uint64_t fc_addr : 64;/* W5 */
+ uint64_t ptr_start : 64;/* W6 */
+ uint64_t ptr_end : 64;/* W7 */
+ uint64_t rsvd_535_512 : 24;
+ uint64_t err_int : 8;
+ uint64_t err_int_ena : 8;
+ uint64_t thresh_int : 1;
+ uint64_t thresh_int_ena : 1;
+ uint64_t thresh_up : 1;
+ uint64_t rsvd_555 : 1;
+ uint64_t thresh_qint_idx : 7;
+ uint64_t rsvd_563 : 1;
+ uint64_t err_qint_idx : 7;
+ uint64_t rsvd_575_571 : 5;
+ uint64_t thresh : 36;
+ uint64_t rsvd_639_612 : 28;
+ uint64_t rsvd_703_640 : 64;/* W10 */
+ uint64_t rsvd_767_704 : 64;/* W11 */
+ uint64_t rsvd_831_768 : 64;/* W12 */
+ uint64_t rsvd_895_832 : 64;/* W13 */
+ uint64_t rsvd_959_896 : 64;/* W14 */
+ uint64_t rsvd_1023_960 : 64;/* W15 */
+};
+
+/* NPA queue interrupt context hardware structure */
+struct npa_qint_hw_s {
+ uint32_t count : 22;
+ uint32_t rsvd_30_22 : 9;
+ uint32_t ena : 1;
+};
+
+#endif /* __OTX2_NPA_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h
new file mode 100644
index 000000000..efde1e214
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_npc.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_NPC_HW_H__
+#define __OTX2_NPC_HW_H__
+
+/* Register offsets */
+
+#define NPC_AF_CFG (0x0ull)
+#define NPC_AF_ACTIVE_PC (0x10ull)
+#define NPC_AF_CONST (0x20ull)
+#define NPC_AF_CONST1 (0x30ull)
+#define NPC_AF_BLK_RST (0x40ull)
+#define NPC_AF_MCAM_SCRUB_CTL (0xa0ull)
+#define NPC_AF_KCAM_SCRUB_CTL (0xb0ull)
+#define NPC_AF_KPUX_CFG(a) \
+ (0x500ull | (uint64_t)(a) << 3)
+#define NPC_AF_PCK_CFG (0x600ull)
+#define NPC_AF_PCK_DEF_OL2 (0x610ull)
+#define NPC_AF_PCK_DEF_OIP4 (0x620ull)
+#define NPC_AF_PCK_DEF_OIP6 (0x630ull)
+#define NPC_AF_PCK_DEF_IIP4 (0x640ull)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) \
+ (0x800ull | (uint64_t)(a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) \
+ (0x1010ull | (uint64_t)(a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) \
+ (0x80000ull | (uint64_t)(a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) \
+ (0x80008ull | (uint64_t)(a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) \
+ (0x80020ull | (uint64_t)(a) << 6 | (uint64_t)(b) << 3)
+#define NPC_AF_CHLEN90B_PKIND (0x3bull)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) \
+ (0x180000ull | (uint64_t)(a) << 6 | (uint64_t)(b) << 3)
+#define NPC_AF_CPIX_CFG(a) \
+ (0x200000ull | (uint64_t)(a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 12 | \
+ (uint64_t)(c) << 5 | (uint64_t)(d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 12 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
+ (0x1000000ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
+ (0x1000010ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
+ (0x1000020ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
+ (uint64_t)(c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) \
+ (0x1800000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
+ (0x1880000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_MATCH_STATX(a) \
+ (0x1880008ull | (uint64_t)(a) << 8)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) \
+ (0x1880040ull + (uint64_t)(a) * 0x8)
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) \
+ (0x1900000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
+ (0x1900008ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_INTFX_MISS_ACT(a) \
+ (0x1a00000ull | (uint64_t)(a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) \
+ (0x1b00008ull | (uint64_t)(a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) \
+ (0x1c80000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000ull)
+#define NPC_AF_LKUP_DATAX(a) \
+ (0x2000200ull | (uint64_t)(a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) \
+ (0x2000400ull | (uint64_t)(a) << 4)
+#define NPC_AF_INTFX_STAT(a) \
+ (0x2000800ull | (uint64_t)(a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000ull)
+#define NPC_AF_DBG_STATUS (0x3000010ull)
+#define NPC_AF_KPUX_DBG(a) \
+ (0x3000020ull | (uint64_t)(a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080ull)
+#define NPC_AF_KPUX_ERR_CTL(a) \
+ (0x30000a0ull | (uint64_t)(a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000ull)
+#define NPC_AF_DBG_DATAX(a) \
+ (0x3001400ull | (uint64_t)(a) << 4)
+#define NPC_AF_DBG_RESULTX(a) \
+ (0x3001800ull | (uint64_t)(a) << 4)
+
+
+/* Enum offsets */
+
+#define NPC_INTF_NIX0_RX (0x0ull)
+#define NPC_INTF_NIX0_TX (0x1ull)
+
+#define NPC_LKUPOP_PKT (0x0ull)
+#define NPC_LKUPOP_KEY (0x1ull)
+
+#define NPC_MCAM_KEY_X1 (0x0ull)
+#define NPC_MCAM_KEY_X2 (0x1ull)
+#define NPC_MCAM_KEY_X4 (0x2ull)
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_IH_LENGTH,
+ NPC_EC_EDSA_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_K4,
+ NPC_EC_MPLS_2MANY,
+ NPC_EC_MPLS_UNK,
+ NPC_EC_NSH_UNK,
+ NPC_EC_IP_TTL_0,
+ NPC_EC_IP_FRAG_OFFSET_1,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_HOP_0,
+ NPC_EC_IP6_VER,
+ NPC_EC_TCP_FLAGS_FIN_ONLY,
+ NPC_EC_TCP_FLAGS_ZERO,
+ NPC_EC_TCP_FLAGS_RST_FIN,
+ NPC_EC_TCP_FLAGS_URG_SYN,
+ NPC_EC_TCP_FLAGS_RST_SYN,
+ NPC_EC_TCP_FLAGS_SYN_FIN,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_OIP4_CSUM,
+ NPC_EC_IIP4_CSUM,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+ NPC_LT_LA_IH_NIX_ETHER,
+ NPC_LT_LA_IH_8_ETHER,
+ NPC_LT_LA_IH_4_ETHER,
+ NPC_LT_LA_IH_2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CH_LEN_90B_ETHER, /* Custom L2 header of length 90 bytes */
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG_QINQ,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_ITAG,
+ NPC_LT_LB_DSA,
+ NPC_LT_LB_DSA_VLAN,
+ NPC_LT_LB_EDSA,
+ NPC_LT_LB_EDSA_VLAN,
+ NPC_LT_LB_EXDSA,
+ NPC_LT_LB_EXDSA_VLAN,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
+ NPC_LT_LC_IP_OPT,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_FCOE,
+};
+
+/* Don't modify Ltypes up to SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_IGMP = 8,
+ NPC_LT_LD_ESP,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_NVGRE,
+ NPC_LT_LD_NSH,
+ NPC_LT_LD_TU_MPLS_IN_NSH,
+ NPC_LT_LD_TU_MPLS_IN_IP,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_VXLAN = 1,
+ NPC_LT_LE_GENEVE,
+ NPC_LT_LE_GTPU = 4,
+ NPC_LT_LE_VXLANGPE,
+ NPC_LT_LE_GTPC,
+ NPC_LT_LE_NSH,
+ NPC_LT_LE_TU_MPLS_IN_GRE,
+ NPC_LT_LE_TU_NSH_IN_GRE,
+ NPC_LT_LE_TU_MPLS_IN_UDP,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_ETHER = 1,
+ NPC_LT_LF_TU_PPP,
+ NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
+ NPC_LT_LF_TU_NSH_IN_VXLANGPE,
+ NPC_LT_LF_TU_MPLS_IN_NSH,
+ NPC_LT_LF_TU_3RD_NSH,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_IP = 1,
+ NPC_LT_LG_TU_IP6,
+ NPC_LT_LG_TU_ARP,
+ NPC_LT_LG_TU_ETHER_IN_NSH,
+};
+
+/* Don't modify Ltypes up to SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TU_TCP = 1,
+ NPC_LT_LH_TU_UDP,
+ NPC_LT_LH_TU_ICMP,
+ NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_TU_IGMP = 8,
+ NPC_LT_LH_TU_ESP,
+ NPC_LT_LH_TU_AH,
+};
+
+/* Structures definitions */
+struct npc_kpu_profile_cam {
+ uint8_t state;
+ uint8_t state_mask;
+ uint16_t dp0;
+ uint16_t dp0_mask;
+ uint16_t dp1;
+ uint16_t dp1_mask;
+ uint16_t dp2;
+ uint16_t dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+ uint8_t errlev;
+ uint8_t errcode;
+ uint8_t dp0_offset;
+ uint8_t dp1_offset;
+ uint8_t dp2_offset;
+ uint8_t bypass_count;
+ uint8_t parse_done;
+ uint8_t next_state;
+ uint8_t ptr_advance;
+ uint8_t cap_ena;
+ uint8_t lid;
+ uint8_t ltype;
+ uint8_t flags;
+ uint8_t offset;
+ uint8_t mask;
+ uint8_t right;
+ uint8_t shift;
+};
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+ uint64_t dp0_data : 16;
+ uint64_t dp1_data : 16;
+ uint64_t dp2_data : 16;
+ uint64_t state : 8;
+ uint64_t rsvd_63_56 : 8;
+};
+
+struct npc_kpu_action0 {
+ uint64_t var_len_shift : 3;
+ uint64_t var_len_right : 1;
+ uint64_t var_len_mask : 8;
+ uint64_t var_len_offset : 8;
+ uint64_t ptr_advance : 8;
+ uint64_t capture_flags : 8;
+ uint64_t capture_ltype : 4;
+ uint64_t capture_lid : 3;
+ uint64_t rsvd_43 : 1;
+ uint64_t next_state : 8;
+ uint64_t parse_done : 1;
+ uint64_t capture_ena : 1;
+ uint64_t byp_count : 3;
+ uint64_t rsvd_63_57 : 7;
+};
+
+struct npc_kpu_action1 {
+ uint64_t dp0_offset : 8;
+ uint64_t dp1_offset : 8;
+ uint64_t dp2_offset : 8;
+ uint64_t errcode : 8;
+ uint64_t errlev : 4;
+ uint64_t rsvd_63_36 : 28;
+};
+
+struct npc_kpu_pkind_cpi_def {
+ uint64_t cpi_base : 10;
+ uint64_t rsvd_11_10 : 2;
+ uint64_t add_shift : 3;
+ uint64_t rsvd_15 : 1;
+ uint64_t add_mask : 8;
+ uint64_t add_offset : 8;
+ uint64_t flags_mask : 8;
+ uint64_t flags_match : 8;
+ uint64_t ltype_mask : 4;
+ uint64_t ltype_match : 4;
+ uint64_t lid : 3;
+ uint64_t rsvd_62_59 : 4;
+ uint64_t ena : 1;
+};
+
+struct nix_rx_action {
+ uint64_t op :4;
+ uint64_t pf_func :16;
+ uint64_t index :20;
+ uint64_t match_id :16;
+ uint64_t flow_key_alg :5;
+ uint64_t rsvd_63_61 :3;
+};
+
+struct nix_tx_action {
+ uint64_t op :4;
+ uint64_t rsvd_11_4 :8;
+ uint64_t index :20;
+ uint64_t match_id :16;
+ uint64_t rsvd_63_48 :16;
+};
+
+/* NPC layer parse information structure */
+struct npc_layer_info_s {
+ uint32_t lptr : 8;
+ uint32_t flags : 8;
+ uint32_t ltype : 4;
+ uint32_t rsvd_31_20 : 12;
+};
+
+/* NPC layer mcam search key extract structure */
+struct npc_layer_kex_s {
+ uint16_t flags : 8;
+ uint16_t ltype : 4;
+ uint16_t rsvd_15_12 : 4;
+};
+
+/* NPC mcam search key x1 structure */
+struct npc_mcam_key_x1_s {
+ uint64_t intf : 2;
+ uint64_t rsvd_63_2 : 62;
+ uint64_t kw0 : 64; /* W1 */
+ uint64_t kw1 : 48;
+ uint64_t rsvd_191_176 : 16;
+};
+
+/* NPC mcam search key x2 structure */
+struct npc_mcam_key_x2_s {
+ uint64_t intf : 2;
+ uint64_t rsvd_63_2 : 62;
+ uint64_t kw0 : 64; /* W1 */
+ uint64_t kw1 : 64; /* W2 */
+ uint64_t kw2 : 64; /* W3 */
+ uint64_t kw3 : 32;
+ uint64_t rsvd_319_288 : 32;
+};
+
+/* NPC mcam search key x4 structure */
+struct npc_mcam_key_x4_s {
+ uint64_t intf : 2;
+ uint64_t rsvd_63_2 : 62;
+ uint64_t kw0 : 64; /* W1 */
+ uint64_t kw1 : 64; /* W2 */
+ uint64_t kw2 : 64; /* W3 */
+ uint64_t kw3 : 64; /* W4 */
+ uint64_t kw4 : 64; /* W5 */
+ uint64_t kw5 : 64; /* W6 */
+ uint64_t kw6 : 64; /* W7 */
+};
+
+/* NPC parse key extract structure */
+struct npc_parse_kex_s {
+ uint64_t chan : 12;
+ uint64_t errlev : 4;
+ uint64_t errcode : 8;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t la : 12;
+ uint64_t lb : 12;
+ uint64_t lc : 12;
+ uint64_t ld : 12;
+ uint64_t le : 12;
+ uint64_t lf : 12;
+ uint64_t lg : 12;
+ uint64_t lh : 12;
+ uint64_t rsvd_127_124 : 4;
+};
+
+/* NPC result structure */
+struct npc_result_s {
+ uint64_t intf : 2;
+ uint64_t pkind : 6;
+ uint64_t chan : 12;
+ uint64_t errlev : 4;
+ uint64_t errcode : 8;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t eoh_ptr : 8;
+ uint64_t rsvd_63_44 : 20;
+ uint64_t action : 64; /* W1 */
+ uint64_t vtag_action : 64; /* W2 */
+ uint64_t la : 20;
+ uint64_t lb : 20;
+ uint64_t lc : 20;
+ uint64_t rsvd_255_252 : 4;
+ uint64_t ld : 20;
+ uint64_t le : 20;
+ uint64_t lf : 20;
+ uint64_t rsvd_319_316 : 4;
+ uint64_t lg : 20;
+ uint64_t lh : 20;
+ uint64_t rsvd_383_360 : 24;
+};
+
+#endif /* __OTX2_NPC_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h
new file mode 100644
index 000000000..f2037ec57
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_rvu.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_RVU_HW_H__
+#define __OTX2_RVU_HW_H__
+
+/* Register offsets */
+
+#define RVU_AF_MSIXTR_BASE (0x10ull)
+#define RVU_AF_BLK_RST (0x30ull)
+#define RVU_AF_PF_BAR4_ADDR (0x40ull)
+#define RVU_AF_RAS (0x100ull)
+#define RVU_AF_RAS_W1S (0x108ull)
+#define RVU_AF_RAS_ENA_W1S (0x110ull)
+#define RVU_AF_RAS_ENA_W1C (0x118ull)
+#define RVU_AF_GEN_INT (0x120ull)
+#define RVU_AF_GEN_INT_W1S (0x128ull)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130ull)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138ull)
+#define RVU_AF_AFPFX_MBOXX(a, b) \
+ (0x2000ull | (uint64_t)(a) << 4 | (uint64_t)(b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800ull)
+#define RVU_AF_PFTRPEND (0x2810ull)
+#define RVU_AF_PFTRPEND_W1S (0x2820ull)
+#define RVU_AF_PF_RST (0x2840ull)
+#define RVU_AF_HWVF_RST (0x2850ull)
+#define RVU_AF_PFAF_MBOX_INT (0x2880ull)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888ull)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890ull)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898ull)
+#define RVU_AF_PFFLR_INT (0x28a0ull)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8ull)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0ull)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8ull)
+#define RVU_AF_PFME_INT (0x28c0ull)
+#define RVU_AF_PFME_INT_W1S (0x28c8ull)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0ull)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8ull)
+#define RVU_PRIV_CONST (0x8000000ull)
+#define RVU_PRIV_GEN_CFG (0x8000010ull)
+#define RVU_PRIV_CLK_CFG (0x8000020ull)
+#define RVU_PRIV_ACTIVE_PC (0x8000030ull)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_NIXX_CFG(a, b) \
+ (0x8000300ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define RVU_PRIV_PFX_NPA_CFG(a) (0x8000310ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_SSO_CFG(a) (0x8000320ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_SSOW_CFG(a) (0x8000330ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_TIM_CFG(a) (0x8000340ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_PFX_CPTX_CFG(a, b) \
+ (0x8000350ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400ull | (uint64_t)(a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a, b) \
+ (0x8001300ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+#define RVU_PRIV_HWVFX_NPA_CFG(a) (0x8001310ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_SSO_CFG(a) (0x8001320ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_SSOW_CFG(a) (0x8001330ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_TIM_CFG(a) (0x8001340ull | (uint64_t)(a) << 16)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a, b) \
+ (0x8001350ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
+
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) \
+ (0x0ull | (uint64_t)(a) << 12 | (uint64_t)(b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10ull)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8a0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8c0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8e0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9a0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9c0ull | (uint64_t)(a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9e0ull | (uint64_t)(a) << 3)
+#define RVU_PF_PFAF_MBOXX(a) (0xc00ull | (uint64_t)(a) << 3)
+#define RVU_PF_INT (0xc20ull)
+#define RVU_PF_INT_W1S (0xc28ull)
+#define RVU_PF_INT_ENA_W1S (0xc30ull)
+#define RVU_PF_INT_ENA_W1C (0xc38ull)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x80000ull | (uint64_t)(a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x80008ull | (uint64_t)(a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xf0000ull | (uint64_t)(a) << 3)
+#define RVU_VF_VFPF_MBOXX(a) (0x0ull | (uint64_t)(a) << 3)
+#define RVU_VF_INT (0x20ull)
+#define RVU_VF_INT_W1S (0x28ull)
+#define RVU_VF_INT_ENA_W1S (0x30ull)
+#define RVU_VF_INT_ENA_W1C (0x38ull)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200ull | (uint64_t)(a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x80000ull | (uint64_t)(a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x80008ull | (uint64_t)(a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xf0000ull | (uint64_t)(a) << 3)
+
+
+/* Enum offsets */
+
+#define RVU_BAR_RVU_PF_END_BAR0 (0x84f000000000ull)
+#define RVU_BAR_RVU_PF_START_BAR0 (0x840000000000ull)
+#define RVU_BAR_RVU_PFX_FUNCX_BAR2(a, b) \
+ (0x840200000000ull | ((uint64_t)(a) << 36) | ((uint64_t)(b) << 25))
+
+#define RVU_AF_INT_VEC_POISON (0x0ull)
+#define RVU_AF_INT_VEC_PFFLR (0x1ull)
+#define RVU_AF_INT_VEC_PFME (0x2ull)
+#define RVU_AF_INT_VEC_GEN (0x3ull)
+#define RVU_AF_INT_VEC_MBOX (0x4ull)
+
+#define RVU_BLOCK_TYPE_RVUM (0x0ull)
+#define RVU_BLOCK_TYPE_LMT (0x2ull)
+#define RVU_BLOCK_TYPE_NIX (0x3ull)
+#define RVU_BLOCK_TYPE_NPA (0x4ull)
+#define RVU_BLOCK_TYPE_NPC (0x5ull)
+#define RVU_BLOCK_TYPE_SSO (0x6ull)
+#define RVU_BLOCK_TYPE_SSOW (0x7ull)
+#define RVU_BLOCK_TYPE_TIM (0x8ull)
+#define RVU_BLOCK_TYPE_CPT (0x9ull)
+#define RVU_BLOCK_TYPE_NDC (0xaull)
+#define RVU_BLOCK_TYPE_DDF (0xbull)
+#define RVU_BLOCK_TYPE_ZIP (0xcull)
+#define RVU_BLOCK_TYPE_RAD (0xdull)
+#define RVU_BLOCK_TYPE_DFA (0xeull)
+#define RVU_BLOCK_TYPE_HNA (0xfull)
+
+#define RVU_BLOCK_ADDR_RVUM (0x0ull)
+#define RVU_BLOCK_ADDR_LMT (0x1ull)
+#define RVU_BLOCK_ADDR_NPA (0x3ull)
+#define RVU_BLOCK_ADDR_NPC (0x6ull)
+#define RVU_BLOCK_ADDR_SSO (0x7ull)
+#define RVU_BLOCK_ADDR_SSOW (0x8ull)
+#define RVU_BLOCK_ADDR_TIM (0x9ull)
+#define RVU_BLOCK_ADDR_NIX0 (0x4ull)
+#define RVU_BLOCK_ADDR_CPT0 (0xaull)
+#define RVU_BLOCK_ADDR_NDC0 (0xcull)
+#define RVU_BLOCK_ADDR_NDC1 (0xdull)
+#define RVU_BLOCK_ADDR_NDC2 (0xeull)
+#define RVU_BLOCK_ADDR_R_END (0x1full)
+#define RVU_BLOCK_ADDR_R_START (0x14ull)
+
+#define RVU_VF_INT_VEC_MBOX (0x0ull)
+
+#define RVU_PF_INT_VEC_AFPF_MBOX (0x6ull)
+#define RVU_PF_INT_VEC_VFFLR0 (0x0ull)
+#define RVU_PF_INT_VEC_VFFLR1 (0x1ull)
+#define RVU_PF_INT_VEC_VFME0 (0x2ull)
+#define RVU_PF_INT_VEC_VFME1 (0x3ull)
+#define RVU_PF_INT_VEC_VFPF_MBOX0 (0x4ull)
+#define RVU_PF_INT_VEC_VFPF_MBOX1 (0x5ull)
+
+
+#define AF_BAR2_ALIASX_SIZE (0x100000ull)
+
+#define TIM_AF_BAR2_SEL (0x9000000ull)
+#define SSO_AF_BAR2_SEL (0x9000000ull)
+#define NIX_AF_BAR2_SEL (0x9000000ull)
+#define SSOW_AF_BAR2_SEL (0x9000000ull)
+#define NPA_AF_BAR2_SEL (0x9000000ull)
+#define CPT_AF_BAR2_SEL (0x9000000ull)
+#define RVU_AF_BAR2_SEL (0x9000000ull)
+
+#define AF_BAR2_ALIASX(a, b) \
+ (0x9100000ull | (uint64_t)(a) << 12 | (uint64_t)(b))
+#define TIM_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define SSO_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define NIX_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
+#define SSOW_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define NPA_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
+#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define RVU_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
+/* Structures definitions */
+
+/* RVU admin function register address structure */
+struct rvu_af_addr_s {
+ uint64_t addr : 28;
+ uint64_t block : 5;
+ uint64_t rsvd_63_33 : 31;
+};
+
+/* RVU function-unique address structure */
+struct rvu_func_addr_s {
+ uint32_t addr : 12;
+ uint32_t lf_slot : 8;
+ uint32_t block : 5;
+ uint32_t rsvd_31_25 : 7;
+};
+
+/* RVU msi-x vector structure */
+struct rvu_msix_vec_s {
+ uint64_t addr : 64; /* W0 */
+ uint64_t data : 32;
+ uint64_t mask : 1;
+ uint64_t pend : 1;
+ uint64_t rsvd_127_98 : 30;
+};
+
+/* RVU pf function identification structure */
+struct rvu_pf_func_s {
+ uint16_t func : 10;
+ uint16_t pf : 6;
+};
+
+#endif /* __OTX2_RVU_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h
new file mode 100644
index 000000000..1e690f8b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sdp.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SDP_HW_H_
+#define __OTX2_SDP_HW_H_
+
+/* SDP VF IOQs */
+#define SDP_MIN_RINGS_PER_VF (1)
+#define SDP_MAX_RINGS_PER_VF (8)
+
+/* SDP VF IQ configuration */
+#define SDP_VF_MAX_IQ_DESCRIPTORS (512)
+#define SDP_VF_MIN_IQ_DESCRIPTORS (128)
+
+#define SDP_VF_DB_MIN (1)
+#define SDP_VF_DB_TIMEOUT (1)
+#define SDP_VF_INTR_THRESHOLD (0xFFFFFFFF)
+
+#define SDP_VF_64BYTE_INSTR (64)
+#define SDP_VF_32BYTE_INSTR (32)
+
+/* SDP VF OQ configuration */
+#define SDP_VF_MAX_OQ_DESCRIPTORS (512)
+#define SDP_VF_MIN_OQ_DESCRIPTORS (128)
+#define SDP_VF_OQ_BUF_SIZE (2048)
+#define SDP_VF_OQ_REFIL_THRESHOLD (16)
+
+#define SDP_VF_OQ_INFOPTR_MODE (1)
+#define SDP_VF_OQ_BUFPTR_MODE (0)
+
+#define SDP_VF_OQ_INTR_PKT (1)
+#define SDP_VF_OQ_INTR_TIME (10)
+#define SDP_VF_CFG_IO_QUEUES SDP_MAX_RINGS_PER_VF
+
+/* Wait time in milliseconds for FLR */
+#define SDP_VF_PCI_FLR_WAIT (100)
+#define SDP_VF_BUSY_LOOP_COUNT (10000)
+
+#define SDP_VF_MAX_IO_QUEUES SDP_MAX_RINGS_PER_VF
+#define SDP_VF_MIN_IO_QUEUES SDP_MIN_RINGS_PER_VF
+
+/* SDP VF IOQs per rawdev */
+#define SDP_VF_MAX_IOQS_PER_RAWDEV SDP_VF_MAX_IO_QUEUES
+#define SDP_VF_DEFAULT_IOQS_PER_RAWDEV SDP_VF_MIN_IO_QUEUES
+
+/* SDP VF Register definitions */
+#define SDP_VF_RING_OFFSET (0x1ull << 17)
+
+/* SDP VF IQ Registers */
+#define SDP_VF_R_IN_CONTROL_START (0x10000)
+#define SDP_VF_R_IN_ENABLE_START (0x10010)
+#define SDP_VF_R_IN_INSTR_BADDR_START (0x10020)
+#define SDP_VF_R_IN_INSTR_RSIZE_START (0x10030)
+#define SDP_VF_R_IN_INSTR_DBELL_START (0x10040)
+#define SDP_VF_R_IN_CNTS_START (0x10050)
+#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
+#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
+#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
+
+#define SDP_VF_R_IN_CONTROL(ring) \
+ (SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_ENABLE(ring) \
+ (SDP_VF_R_IN_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INSTR_BADDR(ring) \
+ (SDP_VF_R_IN_INSTR_BADDR_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INSTR_RSIZE(ring) \
+ (SDP_VF_R_IN_INSTR_RSIZE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INSTR_DBELL(ring) \
+ (SDP_VF_R_IN_INSTR_DBELL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_CNTS(ring) \
+ (SDP_VF_R_IN_CNTS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_INT_LEVELS(ring) \
+ (SDP_VF_R_IN_INT_LEVELS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_PKT_CNT(ring) \
+ (SDP_VF_R_IN_PKT_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_IN_BYTE_CNT(ring) \
+ (SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+/* SDP VF IQ Masks */
+#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define SDP_VF_R_IN_CTL_RPVF_POS (48)
+
+#define SDP_VF_R_IN_CTL_IDLE (0x1ull << 28)
+#define SDP_VF_R_IN_CTL_RDSIZE (0x3ull << 25) /* Setting to max(4) */
+#define SDP_VF_R_IN_CTL_IS_64B (0x1ull << 24)
+#define SDP_VF_R_IN_CTL_D_NSR (0x1ull << 8)
+#define SDP_VF_R_IN_CTL_D_ESR (0x1ull << 6)
+#define SDP_VF_R_IN_CTL_D_ROR (0x1ull << 5)
+#define SDP_VF_R_IN_CTL_NSR (0x1ull << 3)
+#define SDP_VF_R_IN_CTL_ESR (0x1ull << 1)
+#define SDP_VF_R_IN_CTL_ROR (0x1ull << 0)
+
+#define SDP_VF_R_IN_CTL_MASK \
+ (SDP_VF_R_IN_CTL_RDSIZE | SDP_VF_R_IN_CTL_IS_64B)
+
+/* SDP VF OQ Registers */
+#define SDP_VF_R_OUT_CNTS_START (0x10100)
+#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
+#define SDP_VF_R_OUT_SLIST_BADDR_START (0x10120)
+#define SDP_VF_R_OUT_SLIST_RSIZE_START (0x10130)
+#define SDP_VF_R_OUT_SLIST_DBELL_START (0x10140)
+#define SDP_VF_R_OUT_CONTROL_START (0x10150)
+#define SDP_VF_R_OUT_ENABLE_START (0x10160)
+#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
+#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
+
+#define SDP_VF_R_OUT_CONTROL(ring) \
+ (SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_ENABLE(ring) \
+ (SDP_VF_R_OUT_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_SLIST_BADDR(ring) \
+ (SDP_VF_R_OUT_SLIST_BADDR_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_SLIST_RSIZE(ring) \
+ (SDP_VF_R_OUT_SLIST_RSIZE_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_SLIST_DBELL(ring) \
+ (SDP_VF_R_OUT_SLIST_DBELL_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_CNTS(ring) \
+ (SDP_VF_R_OUT_CNTS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_INT_LEVELS(ring) \
+ (SDP_VF_R_OUT_INT_LEVELS_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_PKT_CNT(ring) \
+ (SDP_VF_R_OUT_PKT_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+#define SDP_VF_R_OUT_BYTE_CNT(ring) \
+ (SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+
+/* SDP VF OQ Masks */
+#define SDP_VF_R_OUT_CTL_IDLE (1ull << 40)
+#define SDP_VF_R_OUT_CTL_ES_I (1ull << 34)
+#define SDP_VF_R_OUT_CTL_NSR_I (1ull << 33)
+#define SDP_VF_R_OUT_CTL_ROR_I (1ull << 32)
+#define SDP_VF_R_OUT_CTL_ES_D (1ull << 30)
+#define SDP_VF_R_OUT_CTL_NSR_D (1ull << 29)
+#define SDP_VF_R_OUT_CTL_ROR_D (1ull << 28)
+#define SDP_VF_R_OUT_CTL_ES_P (1ull << 26)
+#define SDP_VF_R_OUT_CTL_NSR_P (1ull << 25)
+#define SDP_VF_R_OUT_CTL_ROR_P (1ull << 24)
+#define SDP_VF_R_OUT_CTL_IMODE (1ull << 23)
+
+#define SDP_VF_R_OUT_INT_LEVELS_BMODE (1ull << 63)
+#define SDP_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+/* SDP Instruction Header */
+struct sdp_instr_ih {
+ /* Data Len */
+ uint64_t tlen:16;
+
+ /* Reserved1 */
+ uint64_t rsvd1:20;
+
+ /* PKIND for SDP */
+ uint64_t pkind:6;
+
+ /* Front Data size */
+ uint64_t fsz:6;
+
+ /* No. of entries in gather list */
+ uint64_t gsz:14;
+
+ /* Gather indicator */
+ uint64_t gather:1;
+
+ /* Reserved2 */
+ uint64_t rsvd2:1;
+} __rte_packed;
+
+#endif /* __OTX2_SDP_HW_H_ */
+
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h
new file mode 100644
index 000000000..98a8130b1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_sso.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SSO_HW_H__
+#define __OTX2_SSO_HW_H__
+
+/* Register offsets */
+
+#define SSO_AF_CONST (0x1000ull)
+#define SSO_AF_CONST1 (0x1008ull)
+#define SSO_AF_WQ_INT_PC (0x1020ull)
+#define SSO_AF_NOS_CNT (0x1050ull)
+#define SSO_AF_AW_WE (0x1080ull)
+#define SSO_AF_WS_CFG (0x1088ull)
+#define SSO_AF_GWE_CFG (0x1098ull)
+#define SSO_AF_GWE_RANDOM (0x10b0ull)
+#define SSO_AF_LF_HWGRP_RST (0x10e0ull)
+#define SSO_AF_AW_CFG (0x10f0ull)
+#define SSO_AF_BLK_RST (0x10f8ull)
+#define SSO_AF_ACTIVE_CYCLES0 (0x1100ull)
+#define SSO_AF_ACTIVE_CYCLES1 (0x1108ull)
+#define SSO_AF_ACTIVE_CYCLES2 (0x1110ull)
+#define SSO_AF_ERR0 (0x1220ull)
+#define SSO_AF_ERR0_W1S (0x1228ull)
+#define SSO_AF_ERR0_ENA_W1C (0x1230ull)
+#define SSO_AF_ERR0_ENA_W1S (0x1238ull)
+#define SSO_AF_ERR2 (0x1260ull)
+#define SSO_AF_ERR2_W1S (0x1268ull)
+#define SSO_AF_ERR2_ENA_W1C (0x1270ull)
+#define SSO_AF_ERR2_ENA_W1S (0x1278ull)
+#define SSO_AF_UNMAP_INFO (0x12f0ull)
+#define SSO_AF_UNMAP_INFO2 (0x1300ull)
+#define SSO_AF_UNMAP_INFO3 (0x1310ull)
+#define SSO_AF_RAS (0x1420ull)
+#define SSO_AF_RAS_W1S (0x1430ull)
+#define SSO_AF_RAS_ENA_W1C (0x1460ull)
+#define SSO_AF_RAS_ENA_W1S (0x1470ull)
+#define SSO_AF_AW_INP_CTL (0x2070ull)
+#define SSO_AF_AW_ADD (0x2080ull)
+#define SSO_AF_AW_READ_ARB (0x2090ull)
+#define SSO_AF_XAQ_REQ_PC (0x20b0ull)
+#define SSO_AF_XAQ_LATENCY_PC (0x20b8ull)
+#define SSO_AF_TAQ_CNT (0x20c0ull)
+#define SSO_AF_TAQ_ADD (0x20e0ull)
+#define SSO_AF_POISONX(a) (0x2100ull | (uint64_t)(a) << 3)
+#define SSO_AF_POISONX_W1S(a) (0x2200ull | (uint64_t)(a) << 3)
+#define SSO_PRIV_AF_INT_CFG (0x3000ull)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800ull)
+#define SSO_PRIV_LFX_HWGRP_CFG(a) (0x10000ull | (uint64_t)(a) << 3)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG(a) (0x20000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IU_ACCNTX_CFG(a) (0x50000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IU_ACCNTX_RST(a) (0x60000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_HEAD_PTR(a) (0x80000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_TAIL_PTR(a) (0x90000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_HEAD_NEXT(a) (0xa0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_TAIL_NEXT(a) (0xb0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TIAQX_STATUS(a) (0xc0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TOAQX_STATUS(a) (0xd0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQX_GMCTL(a) (0xe0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_HWGRPX_IAQ_THR(a) (0x200000ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_TAQ_THR(a) (0x200010ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_PRI(a) (0x200020ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_WS_PC(a) (0x200050ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_EXT_PC(a) (0x200060ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_WA_PC(a) (0x200070ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_TS_PC(a) (0x200080ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_DS_PC(a) (0x200090ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_DQ_PC(a) (0x2000A0ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_PAGE_CNT(a) (0x200100ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_AW_STATUS(a) (0x200110ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_AW_CFG(a) (0x200120ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_AW_TAGSPACE(a) (0x200130ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_XAQ_AURA(a) (0x200140ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_XAQ_LIMIT(a) (0x200220ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWGRPX_IU_ACCNT(a) (0x200230ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_ARB(a) (0x400100ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_INV(a) (0x400180ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_GMCTL(a) (0x400200ull | (uint64_t)(a) << 12)
+#define SSO_AF_HWSX_SX_GRPMSKX(a, b, c) \
+ (0x400400ull | (uint64_t)(a) << 12 | (uint64_t)(b) << 5 | \
+ (uint64_t)(c) << 3)
+#define SSO_AF_IPL_FREEX(a) (0x800000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IPL_IAQX(a) (0x840000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IPL_DESCHEDX(a) (0x860000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IPL_CONFX(a) (0x880000ull | (uint64_t)(a) << 3)
+#define SSO_AF_NPA_DIGESTX(a) (0x900000ull | (uint64_t)(a) << 3)
+#define SSO_AF_NPA_DIGESTX_W1S(a) (0x900100ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFP_DIGESTX(a) (0x900200ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFP_DIGESTX_W1S(a) (0x900300ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFPN_DIGESTX(a) (0x900400ull | (uint64_t)(a) << 3)
+#define SSO_AF_BFPN_DIGESTX_W1S(a) (0x900500ull | (uint64_t)(a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX(a) (0x900600ull | (uint64_t)(a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX_W1S(a) (0x900700ull | (uint64_t)(a) << 3)
+#define SSO_AF_AWEMPTY_DIGESTX(a) (0x900800ull | (uint64_t)(a) << 3)
+#define SSO_AF_AWEMPTY_DIGESTX_W1S(a) (0x900900ull | (uint64_t)(a) << 3)
+#define SSO_AF_WQP0_DIGESTX(a) (0x900a00ull | (uint64_t)(a) << 3)
+#define SSO_AF_WQP0_DIGESTX_W1S(a) (0x900b00ull | (uint64_t)(a) << 3)
+#define SSO_AF_AW_DROPPED_DIGESTX(a) (0x900c00ull | (uint64_t)(a) << 3)
+#define SSO_AF_AW_DROPPED_DIGESTX_W1S(a) (0x900d00ull | (uint64_t)(a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX(a) (0x900e00ull | (uint64_t)(a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX_W1S(a) (0x900f00ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX(a) (0x901000ull | (uint64_t)(a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX_W1S(a) (0x901100ull | (uint64_t)(a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX(a) (0x901200ull | (uint64_t)(a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX_W1S(a) (0x901300ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GMULTI_DIGESTX(a) (0x902000ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GMULTI_DIGESTX_W1S(a) (0x902100ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GUNMAP_DIGESTX(a) (0x902200ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GUNMAP_DIGESTX_W1S(a) (0x902300ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_AWE_DIGESTX(a) (0x902400ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_AWE_DIGESTX_W1S(a) (0x902500ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GWI_DIGESTX(a) (0x902600ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_GWI_DIGESTX_W1S(a) (0x902700ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_NE_DIGESTX(a) (0x902800ull | (uint64_t)(a) << 3)
+#define SSO_AF_WS_NE_DIGESTX_W1S(a) (0x902900ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_TAG(a) (0xa00000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_GRP(a) (0xa20000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_PENDTAG(a) (0xa40000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_LINKS(a) (0xa60000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_QLINKS(a) (0xa80000ull | (uint64_t)(a) << 3)
+#define SSO_AF_IENTX_WQP(a) (0xaa0000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TAQX_LINK(a) (0xc00000ull | (uint64_t)(a) << 3)
+#define SSO_AF_TAQX_WAEX_TAG(a, b) \
+ (0xe00000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+#define SSO_AF_TAQX_WAEX_WQP(a, b) \
+ (0xe00008ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
+
+#define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
+#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
+#define SSO_LF_GGRP_QCTL (0x20ull)
+#define SSO_LF_GGRP_EXE_DIS (0x80ull)
+#define SSO_LF_GGRP_INT (0x100ull)
+#define SSO_LF_GGRP_INT_W1S (0x108ull)
+#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
+#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
+#define SSO_LF_GGRP_INT_THR (0x140ull)
+#define SSO_LF_GGRP_INT_CNT (0x180ull)
+#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
+#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
+#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
+#define SSO_LF_GGRP_MISC_CNT (0x200ull)
+
+#define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_IAQ_FREE_CNT_MAX SSO_AF_IAQ_FREE_CNT_MASK
+#define SSO_AF_AW_ADD_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_AW_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_IAQ_MAX_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_RSVD_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_IAQ_RSVD_THR 0x2
+
+#define SSO_AF_TAQ_FREE_CNT_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_TAQ_FREE_CNT_MAX SSO_AF_TAQ_FREE_CNT_MASK
+#define SSO_AF_TAQ_ADD_RSVD_FREE_MASK 0x1FFFull
+#define SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_TAQ_MAX_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_RSVD_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_TAQ_RSVD_THR 0x3
+
+#define SSO_HWGRP_PRI_AFF_MASK 0xFull
+#define SSO_HWGRP_PRI_AFF_SHIFT 8
+#define SSO_HWGRP_PRI_WGT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_SHIFT 16
+#define SSO_HWGRP_PRI_WGT_LEFT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_LEFT_SHIFT 24
+
+#define SSO_HWGRP_AW_CFG_RWEN BIT_ULL(0)
+#define SSO_HWGRP_AW_CFG_LDWB BIT_ULL(1)
+#define SSO_HWGRP_AW_CFG_LDT BIT_ULL(2)
+#define SSO_HWGRP_AW_CFG_STT BIT_ULL(3)
+#define SSO_HWGRP_AW_CFG_XAQ_BYP_DIS BIT_ULL(4)
+
+#define SSO_HWGRP_AW_STS_TPTR_VLD BIT_ULL(8)
+#define SSO_HWGRP_AW_STS_NPA_FETCH BIT_ULL(9)
+#define SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK 0x7ull
+#define SSO_HWGRP_AW_STS_INIT_STS 0x18ull
+
+/* Enum offsets */
+
+#define SSO_LF_INT_VEC_GRP (0x0ull)
+
+#define SSO_AF_INT_VEC_ERR0 (0x0ull)
+#define SSO_AF_INT_VEC_ERR2 (0x1ull)
+#define SSO_AF_INT_VEC_RAS (0x2ull)
+
+#define SSO_WA_IOBN (0x0ull)
+#define SSO_WA_NIXRX (0x1ull)
+#define SSO_WA_CPT (0x2ull)
+#define SSO_WA_ADDWQ (0x3ull)
+#define SSO_WA_DPI (0x4ull)
+#define SSO_WA_NIXTX (0x5ull)
+#define SSO_WA_TIM (0x6ull)
+#define SSO_WA_ZIP (0x7ull)
+
+#define SSO_TT_ORDERED (0x0ull)
+#define SSO_TT_ATOMIC (0x1ull)
+#define SSO_TT_UNTAGGED (0x2ull)
+#define SSO_TT_EMPTY (0x3ull)
+
+
+/* Structures definitions */
+
+#endif /* __OTX2_SSO_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h
new file mode 100644
index 000000000..8a4457803
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_ssow.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SSOW_HW_H__
+#define __OTX2_SSOW_HW_H__
+
+/* Register offsets */
+
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x10ull)
+#define SSOW_AF_LF_HWS_RST (0x30ull)
+#define SSOW_PRIV_LFX_HWS_CFG(a) (0x1000ull | (uint64_t)(a) << 3)
+#define SSOW_PRIV_LFX_HWS_INT_CFG(a) (0x2000ull | (uint64_t)(a) << 3)
+#define SSOW_AF_SCRATCH_WS (0x100000ull)
+#define SSOW_AF_SCRATCH_GW (0x200000ull)
+#define SSOW_AF_SCRATCH_AW (0x300000ull)
+
+#define SSOW_LF_GWS_LINKS (0x10ull)
+#define SSOW_LF_GWS_PENDWQP (0x40ull)
+#define SSOW_LF_GWS_PENDSTATE (0x50ull)
+#define SSOW_LF_GWS_NW_TIM (0x70ull)
+#define SSOW_LF_GWS_GRPMSK_CHG (0x80ull)
+#define SSOW_LF_GWS_INT (0x100ull)
+#define SSOW_LF_GWS_INT_W1S (0x108ull)
+#define SSOW_LF_GWS_INT_ENA_W1S (0x110ull)
+#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
+#define SSOW_LF_GWS_TAG (0x200ull)
+#define SSOW_LF_GWS_WQP (0x210ull)
+#define SSOW_LF_GWS_SWTP (0x220ull)
+#define SSOW_LF_GWS_PENDTAG (0x230ull)
+#define SSOW_LF_GWS_OP_ALLOC_WE (0x400ull)
+#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
+#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
+#define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull)
+#define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull)
+#define SSOW_LF_GWS_OP_UPD_WQP_GRP0 (0x830ull)
+#define SSOW_LF_GWS_OP_UPD_WQP_GRP1 (0x838ull)
+#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
+#define SSOW_LF_GWS_OP_DESCHED_NOSCH (0x8c0ull)
+#define SSOW_LF_GWS_OP_SWTAG_DESCHED (0x980ull)
+#define SSOW_LF_GWS_OP_SWTAG_NOSCHED (0x9c0ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xa00ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED1 (0xa08ull)
+#define SSOW_LF_GWS_OP_SWTP_SET (0xc00ull)
+#define SSOW_LF_GWS_OP_SWTAG_NORM (0xc10ull)
+#define SSOW_LF_GWS_OP_SWTAG_FULL0 (0xc20ull)
+#define SSOW_LF_GWS_OP_SWTAG_FULL1 (0xc28ull)
+#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
+
+
+/* Enum offsets */
+
+#define SSOW_LF_INT_VEC_IOP (0x0ull)
+
+
+#endif /* __OTX2_SSOW_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h
new file mode 100644
index 000000000..41442ad0a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/hw/otx2_tim.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_TIM_HW_H__
+#define __OTX2_TIM_HW_H__
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG(a) (0x20000 | (a) << 3)
+#define TIM_PRIV_LFX_INT_CFG(a) (0x24000 | (a) << 3)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_RINGX_GMCTL(a) (0x2000 | (a) << 3)
+#define TIM_AF_RINGX_CTL0(a) (0x4000 | (a) << 3)
+#define TIM_AF_RINGX_CTL1(a) (0x6000 | (a) << 3)
+#define TIM_AF_RINGX_CTL2(a) (0x8000 | (a) << 3)
+#define TIM_AF_FLAGS_REG (0x80)
+#define TIM_AF_FLAGS_REG_ENA_TIM BIT_ULL(0)
+#define TIM_AF_RINGX_CTL1_ENA BIT_ULL(47)
+#define TIM_AF_RINGX_CTL1_RCF_BUSY BIT_ULL(50)
+#define TIM_AF_RINGX_CLT1_CLK_10NS (0)
+#define TIM_AF_RINGX_CLT1_CLK_GPIO (1)
+#define TIM_AF_RINGX_CLT1_CLK_GTI (2)
+#define TIM_AF_RINGX_CLT1_CLK_PTP (3)
+
+/* ENUMS */
+
+#define TIM_LF_INT_VEC_NRSPERR_INT (0x0ull)
+#define TIM_LF_INT_VEC_RAS_INT (0x1ull)
+
+#endif /* __OTX2_TIM_HW_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/meson.build b/src/spdk/dpdk/drivers/common/octeontx2/meson.build
new file mode 100644
index 000000000..f2c04342e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/meson.build
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+sources= files('otx2_dev.c',
+ 'otx2_irq.c',
+ 'otx2_mbox.c',
+ 'otx2_common.c',
+ 'otx2_sec_idev.c',
+ )
+
+extra_flags = []
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+deps = ['eal', 'pci', 'ethdev', 'kvargs']
+includes += include_directories('../../common/octeontx2',
+ '../../mempool/octeontx2', '../../bus/pci')
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c
new file mode 100644
index 000000000..5e7272f69
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.c
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "otx2_common.h"
+#include "otx2_dev.h"
+#include "otx2_mbox.h"
+
+/**
+ * @internal
+ * Set default NPA configuration.
+ */
+void
+otx2_npa_set_defaults(struct otx2_idev_cfg *idev)
+{
+ idev->npa_pf_func = 0;
+ rte_atomic16_set(&idev->npa_refcnt, 0);
+}
+
+/**
+ * @internal
+ * Get intra device config structure.
+ */
+struct otx2_idev_cfg *
+otx2_intra_dev_get_cfg(void)
+{
+ const char name[] = "octeontx2_intra_device_conf";
+ const struct rte_memzone *mz;
+ struct otx2_idev_cfg *idev;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_cfg),
+ SOCKET_ID_ANY, 0, OTX2_ALIGN);
+ if (mz != NULL) {
+ idev = mz->addr;
+ idev->sso_pf_func = 0;
+ idev->npa_lf = NULL;
+ otx2_npa_set_defaults(idev);
+ return idev;
+ }
+ return NULL;
+}
+
+/**
+ * @internal
+ * Get SSO PF_FUNC.
+ */
+uint16_t
+otx2_sso_pf_func_get(void)
+{
+ struct otx2_idev_cfg *idev;
+ uint16_t sso_pf_func;
+
+ sso_pf_func = 0;
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL)
+ sso_pf_func = idev->sso_pf_func;
+
+ return sso_pf_func;
+}
+
+/**
+ * @internal
+ * Set SSO PF_FUNC.
+ */
+void
+otx2_sso_pf_func_set(uint16_t sso_pf_func)
+{
+ struct otx2_idev_cfg *idev;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL) {
+ idev->sso_pf_func = sso_pf_func;
+ rte_smp_wmb();
+ }
+}
+
+/**
+ * @internal
+ * Get NPA PF_FUNC.
+ */
+uint16_t
+otx2_npa_pf_func_get(void)
+{
+ struct otx2_idev_cfg *idev;
+ uint16_t npa_pf_func;
+
+ npa_pf_func = 0;
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL)
+ npa_pf_func = idev->npa_pf_func;
+
+ return npa_pf_func;
+}
+
+/**
+ * @internal
+ * Get NPA lf object.
+ */
+struct otx2_npa_lf *
+otx2_npa_lf_obj_get(void)
+{
+ struct otx2_idev_cfg *idev;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev != NULL && rte_atomic16_read(&idev->npa_refcnt))
+ return idev->npa_lf;
+
+ return NULL;
+}
+
+/**
+ * @internal
+ * Is NPA lf active for the given device?.
+ */
+int
+otx2_npa_lf_active(void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ struct otx2_idev_cfg *idev;
+
+ /* Check if npalf is actively used on this dev */
+ idev = otx2_intra_dev_get_cfg();
+ if (!idev || !idev->npa_lf || idev->npa_lf->mbox != dev->mbox)
+ return 0;
+
+ return rte_atomic16_read(&idev->npa_refcnt);
+}
+
+/*
+ * @internal
+ * Gets reference only to existing NPA LF object.
+ */
+int otx2_npa_lf_obj_ref(void)
+{
+ struct otx2_idev_cfg *idev;
+ uint16_t cnt;
+ int rc;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ /* Check if ref not possible */
+ if (idev == NULL)
+ return -EINVAL;
+
+
+ /* Get ref only if > 0 */
+ cnt = rte_atomic16_read(&idev->npa_refcnt);
+ while (cnt != 0) {
+ rc = rte_atomic16_cmpset(&idev->npa_refcnt_u16, cnt, cnt + 1);
+ if (rc)
+ break;
+
+ cnt = rte_atomic16_read(&idev->npa_refcnt);
+ }
+
+ return cnt ? 0 : -EINVAL;
+}
+
+static int
+parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint64_t val;
+
+ val = strtoull(value, NULL, 16);
+
+ *(uint64_t *)extra_args = val;
+
+ return 0;
+}
+
+/*
+ * @internal
+ * Parse common device arguments
+ */
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
+{
+
+ struct otx2_idev_cfg *idev;
+ uint64_t npa_lock_mask = 0;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
+ &parse_npa_lock_mask, &npa_lock_mask);
+
+ idev->npa_lock_mask = npa_lock_mask;
+}
+
+/**
+ * @internal
+ */
+int otx2_logtype_base;
+/**
+ * @internal
+ */
+int otx2_logtype_mbox;
+/**
+ * @internal
+ */
+int otx2_logtype_npa;
+/**
+ * @internal
+ */
+int otx2_logtype_nix;
+/**
+ * @internal
+ */
+int otx2_logtype_npc;
+/**
+ * @internal
+ */
+int otx2_logtype_tm;
+/**
+ * @internal
+ */
+int otx2_logtype_sso;
+/**
+ * @internal
+ */
+int otx2_logtype_tim;
+/**
+ * @internal
+ */
+int otx2_logtype_dpi;
+/**
+ * @internal
+ */
+int otx2_logtype_ep;
+
+RTE_INIT(otx2_log_init);
+static void
+otx2_log_init(void)
+{
+ otx2_logtype_base = rte_log_register("pmd.octeontx2.base");
+ if (otx2_logtype_base >= 0)
+ rte_log_set_level(otx2_logtype_base, RTE_LOG_NOTICE);
+
+ otx2_logtype_mbox = rte_log_register("pmd.octeontx2.mbox");
+ if (otx2_logtype_mbox >= 0)
+ rte_log_set_level(otx2_logtype_mbox, RTE_LOG_NOTICE);
+
+ otx2_logtype_npa = rte_log_register("pmd.mempool.octeontx2");
+ if (otx2_logtype_npa >= 0)
+ rte_log_set_level(otx2_logtype_npa, RTE_LOG_NOTICE);
+
+ otx2_logtype_nix = rte_log_register("pmd.net.octeontx2");
+ if (otx2_logtype_nix >= 0)
+ rte_log_set_level(otx2_logtype_nix, RTE_LOG_NOTICE);
+
+ otx2_logtype_npc = rte_log_register("pmd.net.octeontx2.flow");
+ if (otx2_logtype_npc >= 0)
+ rte_log_set_level(otx2_logtype_npc, RTE_LOG_NOTICE);
+
+ otx2_logtype_tm = rte_log_register("pmd.net.octeontx2.tm");
+ if (otx2_logtype_tm >= 0)
+ rte_log_set_level(otx2_logtype_tm, RTE_LOG_NOTICE);
+
+ otx2_logtype_sso = rte_log_register("pmd.event.octeontx2");
+ if (otx2_logtype_sso >= 0)
+ rte_log_set_level(otx2_logtype_sso, RTE_LOG_NOTICE);
+
+ otx2_logtype_tim = rte_log_register("pmd.event.octeontx2.timer");
+ if (otx2_logtype_tim >= 0)
+ rte_log_set_level(otx2_logtype_tim, RTE_LOG_NOTICE);
+
+ otx2_logtype_dpi = rte_log_register("pmd.raw.octeontx2.dpi");
+ if (otx2_logtype_dpi >= 0)
+ rte_log_set_level(otx2_logtype_dpi, RTE_LOG_NOTICE);
+
+ otx2_logtype_ep = rte_log_register("pmd.raw.octeontx2.ep");
+ if (otx2_logtype_ep >= 0)
+ rte_log_set_level(otx2_logtype_ep, RTE_LOG_NOTICE);
+
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h
new file mode 100644
index 000000000..2168cde4d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_common.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_COMMON_H_
+#define _OTX2_COMMON_H_
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_io.h>
+
+#include "hw/otx2_rvu.h"
+#include "hw/otx2_nix.h"
+#include "hw/otx2_npc.h"
+#include "hw/otx2_npa.h"
+#include "hw/otx2_sdp.h"
+#include "hw/otx2_sso.h"
+#include "hw/otx2_ssow.h"
+#include "hw/otx2_tim.h"
+
+/* Alignment */
+#define OTX2_ALIGN 128
+
+/* Bits manipulation */
+#ifndef BIT_ULL
+#define BIT_ULL(nr) (1ULL << (nr))
+#endif
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
+#endif
+
+#ifndef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#endif
+
+#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
+
+/* Intra device related functions */
+struct otx2_npa_lf;
+struct otx2_idev_cfg {
+ uint16_t sso_pf_func;
+ uint16_t npa_pf_func;
+ struct otx2_npa_lf *npa_lf;
+ RTE_STD_C11
+ union {
+ rte_atomic16_t npa_refcnt;
+ uint16_t npa_refcnt_u16;
+ };
+ uint64_t npa_lock_mask;
+};
+
+__rte_internal
+struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void);
+__rte_internal
+void otx2_sso_pf_func_set(uint16_t sso_pf_func);
+__rte_internal
+uint16_t otx2_sso_pf_func_get(void);
+__rte_internal
+uint16_t otx2_npa_pf_func_get(void);
+__rte_internal
+struct otx2_npa_lf *otx2_npa_lf_obj_get(void);
+__rte_internal
+void otx2_npa_set_defaults(struct otx2_idev_cfg *idev);
+__rte_internal
+int otx2_npa_lf_active(void *dev);
+__rte_internal
+int otx2_npa_lf_obj_ref(void);
+__rte_internal
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist);
+
+/* Log */
+extern int otx2_logtype_base;
+extern int otx2_logtype_mbox;
+extern int otx2_logtype_npa;
+extern int otx2_logtype_nix;
+extern int otx2_logtype_sso;
+extern int otx2_logtype_npc;
+extern int otx2_logtype_tm;
+extern int otx2_logtype_tim;
+extern int otx2_logtype_dpi;
+extern int otx2_logtype_ep;
+
+#define otx2_err(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", \
+ __func__, __LINE__, ## args)
+
+#define otx2_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, fmt"\n", ## args)
+
+#define otx2_dbg(subsystem, fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, otx2_logtype_ ## subsystem, \
+ "[%s] %s():%u " fmt "\n", \
+ #subsystem, __func__, __LINE__, ##args)
+
+#define otx2_base_dbg(fmt, ...) otx2_dbg(base, fmt, ##__VA_ARGS__)
+#define otx2_mbox_dbg(fmt, ...) otx2_dbg(mbox, fmt, ##__VA_ARGS__)
+#define otx2_npa_dbg(fmt, ...) otx2_dbg(npa, fmt, ##__VA_ARGS__)
+#define otx2_nix_dbg(fmt, ...) otx2_dbg(nix, fmt, ##__VA_ARGS__)
+#define otx2_sso_dbg(fmt, ...) otx2_dbg(sso, fmt, ##__VA_ARGS__)
+#define otx2_npc_dbg(fmt, ...) otx2_dbg(npc, fmt, ##__VA_ARGS__)
+#define otx2_tm_dbg(fmt, ...) otx2_dbg(tm, fmt, ##__VA_ARGS__)
+#define otx2_tim_dbg(fmt, ...) otx2_dbg(tim, fmt, ##__VA_ARGS__)
+#define otx2_dpi_dbg(fmt, ...) otx2_dbg(dpi, fmt, ##__VA_ARGS__)
+#define otx2_sdp_dbg(fmt, ...) otx2_dbg(ep, fmt, ##__VA_ARGS__)
+
+/* PCI IDs */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF 0xA0FA
+#define PCI_DEVID_OCTEONTX2_RVU_NPA_PF 0xA0FB
+#define PCI_DEVID_OCTEONTX2_RVU_NPA_VF 0xA0FC
+#define PCI_DEVID_OCTEONTX2_RVU_CPT_PF 0xA0FD
+#define PCI_DEVID_OCTEONTX2_RVU_CPT_VF 0xA0FE
+#define PCI_DEVID_OCTEONTX2_RVU_AF_VF 0xA0f8
+#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
+#define PCI_DEVID_OCTEONTX2_EP_VF 0xB203 /* OCTEON TX2 EP mode */
+#define PCI_DEVID_OCTEONTX2_RVU_SDP_PF 0xA0f6
+#define PCI_DEVID_OCTEONTX2_RVU_SDP_VF 0xA0f7
+
+/*
+ * REVID for RVU PCIe devices.
+ * Bits 0..1: minor pass
+ * Bits 3..2: major pass
+ * Bits 7..4: midr id, 0:96, 1:95, 2:loki, f:unknown
+ */
+
+#define RVU_PCI_REV_MIDR_ID(rev_id) (rev_id >> 4)
+#define RVU_PCI_REV_MAJOR(rev_id) ((rev_id >> 2) & 0x3)
+#define RVU_PCI_REV_MINOR(rev_id) (rev_id & 0x3)
+
+#define RVU_PCI_CN96XX_MIDR_ID 0x0
+#define RVU_PCI_CNF95XX_MIDR_ID 0x1
+
+/* PCI Config offsets */
+#define RVU_PCI_REVISION_ID 0x08
+
+/* IO Access */
+#define otx2_read64(addr) rte_read64_relaxed((void *)(addr))
+#define otx2_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
+
+#if defined(RTE_ARCH_ARM64)
+#include "otx2_io_arm64.h"
+#else
+#include "otx2_io_generic.h"
+#endif
+
+/* Fastpath lookup */
+#define OTX2_NIX_FASTPATH_LOOKUP_MEM "otx2_nix_fastpath_lookup_mem"
+#define OTX2_NIX_SA_TBL_START (4096*4 + 69632*2)
+
+#endif /* _OTX2_COMMON_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c
new file mode 100644
index 000000000..d61c712fa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.c
@@ -0,0 +1,1043 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_memcpy.h>
+
+#include "otx2_dev.h"
+#include "otx2_mbox.h"
+
+#define RVU_MAX_VF 64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
+#define RVU_MAX_INT_RETRY 3
+
+/* PF/VF message handling timer */
+#define VF_PF_MBOX_TIMER_MS (20 * 1000)
+
+static void *
+mbox_mem_map(off_t off, size_t size)
+{
+ void *va = MAP_FAILED;
+ int mem_fd;
+
+ if (size <= 0)
+ goto error;
+
+ mem_fd = open("/dev/mem", O_RDWR);
+ if (mem_fd < 0)
+ goto error;
+
+ va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, off);
+ close(mem_fd);
+
+ if (va == MAP_FAILED)
+ otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
+ size, mem_fd, (intmax_t)off);
+error:
+ return va;
+}
+
+static void
+mbox_mem_unmap(void *va, size_t size)
+{
+ if (va)
+ munmap(va, size);
+}
+
+static int
+pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
+{
+ uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ volatile uint64_t int_status;
+ struct mbox_msghdr *msghdr;
+ uint64_t off;
+ int rc = 0;
+
+ /* We need to disable PF interrupts. We are in timer interrupt */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ /* Send message */
+ otx2_mbox_msg_send(mbox, 0);
+
+ do {
+ rte_delay_ms(sleep);
+ timeout += sleep;
+ if (timeout >= MBOX_RSP_TIMEOUT) {
+ otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
+ rc = -EIO;
+ break;
+ }
+ int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
+ } while ((int_status & 0x1) != 0x1);
+
+ /* Clear */
+ otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
+
+ /* Enable interrupts */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+ if (rc == 0) {
+ /* Get message */
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
+ if (rsp)
+ *rsp = msghdr;
+ rc = msghdr->rc;
+ }
+
+ return rc;
+}
+
+static int
+af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
+{
+ uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ volatile uint64_t int_status;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct mbox_msghdr *rsp;
+ uint64_t offset;
+ size_t size;
+ int i;
+
+ /* We need to disable PF interrupts. We are in timer interrupt */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ /* Send message */
+ otx2_mbox_msg_send(mbox, 0);
+
+ do {
+ rte_delay_ms(sleep);
+ timeout++;
+ if (timeout >= MBOX_RSP_TIMEOUT) {
+ otx2_err("Routed messages %d timeout: %dms",
+ num_msg, MBOX_RSP_TIMEOUT);
+ break;
+ }
+ int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
+ } while ((int_status & 0x1) != 0x1);
+
+ /* Clear */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
+
+ /* Enable interrupts */
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs != num_msg)
+ otx2_err("Routed messages: %d received: %d", num_msg,
+ req_hdr->num_msgs);
+
+ /* Get messages from mbox */
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ size = mbox->rx_start + msg->next_msgoff - offset;
+
+ /* Reserve PF/VF mbox message */
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
+ otx2_mbox_rsp_init(msg->id, rsp);
+
+ /* Copy message from AF<->PF mbox to PF<->VF mbox */
+ otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size - sizeof(struct mbox_msghdr));
+
+ /* Set status and sender pf_func data */
+ rsp->rc = msg->rc;
+ rsp->pcifunc = msg->pcifunc;
+
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return req_hdr->num_msgs;
+}
+
+static int
+vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
+{
+ int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ size_t size;
+ uint16_t i;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (!req_hdr->num_msgs)
+ return 0;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ size = mbox->rx_start + msg->next_msgoff - offset;
+
+ /* RVU_PF_FUNC_S */
+ msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+ if (msg->id == MBOX_MSG_READY) {
+ struct ready_msg_rsp *rsp;
+ uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
+
+ /* Handle READY message in PF */
+ dev->active_vfs[vf / max_bits] |=
+ BIT_ULL(vf % max_bits);
+ rsp = (struct ready_msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
+ otx2_mbox_rsp_init(msg->id, rsp);
+
+ /* PF/VF function ID */
+ rsp->hdr.pcifunc = msg->pcifunc;
+ rsp->hdr.rc = 0;
+ } else {
+ struct mbox_msghdr *af_req;
+ /* Reserve AF/PF mbox message */
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
+ otx2_mbox_req_init(msg->id, af_req);
+
+ /* Copy message from VF<->PF mbox to PF<->AF mbox */
+ otx2_mbox_memcpy((uint8_t *)af_req +
+ sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size - sizeof(struct mbox_msghdr));
+ af_req->pcifunc = msg->pcifunc;
+ routed++;
+ }
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ if (routed > 0) {
+ otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
+ dev->pf, routed, vf);
+ af_pf_wait_msg(dev, vf, routed);
+ otx2_mbox_reset(dev->mbox, 0);
+ }
+
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs) {
+ otx2_base_dbg("pf:%d reply %d messages to vf:%d",
+ dev->pf, mdev->num_msgs, vf);
+ otx2_mbox_msg_send(mbox, vf);
+ }
+
+ return i;
+}
+
+static int
+vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
+{
+ struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int msgs_acked = 0;
+ int offset;
+ uint16_t i;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs == 0)
+ return 0;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+
+ msgs_acked++;
+ /* RVU_PF_FUNC_S */
+ msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
+ msg->id, otx2_mbox_id2name(msg->id),
+ msg->pcifunc, otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+ break;
+ case MBOX_MSG_CGX_PTP_RX_INFO:
+ otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
+ msg->id, otx2_mbox_id2name(msg->id),
+ msg->pcifunc, otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+ break;
+ default:
+ otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
+ msg->id, otx2_mbox_id2name(msg->id),
+ msg->pcifunc);
+ }
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ otx2_mbox_reset(mbox, vf);
+ mdev->msgs_acked = msgs_acked;
+ rte_wmb();
+
+ return i;
+}
+
+static void
+otx2_vf_pf_mbox_handle_msg(void *param)
+{
+ uint16_t vf, max_vf, max_bits;
+ struct otx2_dev *dev = param;
+
+ max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
+ max_vf = max_bits * MAX_VFPF_DWORD_BITS;
+
+ for (vf = 0; vf < max_vf; vf++) {
+ if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
+ otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
+ vf, dev->pf, dev->vf);
+ vf_pf_process_msgs(dev, vf);
+ /* UP messages */
+ vf_pf_process_up_msgs(dev, vf);
+ dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
+ }
+ }
+ dev->timer_set = 0;
+}
+
+static void
+otx2_vf_pf_mbox_irq(void *param)
+{
+ struct otx2_dev *dev = param;
+ bool alarm_set = false;
+ uint64_t intr;
+ int vfpf;
+
+ for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
+ intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
+ if (!intr)
+ continue;
+
+ otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
+ vfpf, intr, dev->pf, dev->vf);
+
+ /* Save and clear intr bits */
+ dev->intr.bits[vfpf] |= intr;
+ otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
+ alarm_set = true;
+ }
+
+ if (!dev->timer_set && alarm_set) {
+ dev->timer_set = 1;
+ /* Start timer to handle messages */
+ rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
+ otx2_vf_pf_mbox_handle_msg, dev);
+ }
+}
+
+static void
+otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int msgs_acked = 0;
+ int offset;
+ uint16_t i;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+
+ msgs_acked++;
+ otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
+ msg->id, otx2_mbox_id2name(msg->id),
+ otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+
+ switch (msg->id) {
+ /* Add message id's that are handled here */
+ case MBOX_MSG_READY:
+ /* Get our identity */
+ dev->pf_func = msg->pcifunc;
+ break;
+
+ default:
+ if (msg->rc)
+ otx2_err("Message (%s) response has err=%d",
+ otx2_mbox_id2name(msg->id), msg->rc);
+ break;
+ }
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+ /* Update acked if someone is waiting a message */
+ mdev->msgs_acked = msgs_acked;
+ rte_wmb();
+}
+
+/* Copies the message received from AF and sends it to VF */
+static void
+pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
+{
+ uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
+ struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
+ struct msg_req *msg = rec_msg;
+ struct mbox_msghdr *vf_msg;
+ uint16_t vf;
+ size_t size;
+
+ size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
+ /* Send UP message to all VF's */
+ for (vf = 0; vf < vf_mbox->ndevs; vf++) {
+ /* VF active */
+ if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
+ continue;
+
+ otx2_base_dbg("(%s) size: %zx to VF: %d",
+ otx2_mbox_id2name(msg->hdr.id), size, vf);
+
+ /* Reserve PF/VF mbox message */
+ vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
+ if (!vf_msg) {
+ otx2_err("Failed to alloc VF%d UP message", vf);
+ continue;
+ }
+ otx2_mbox_req_init(msg->hdr.id, vf_msg);
+
+ /*
+ * Copy message from AF<->PF UP mbox
+ * to PF<->VF UP mbox
+ */
+ otx2_mbox_memcpy((uint8_t *)vf_msg +
+ sizeof(struct mbox_msghdr), (uint8_t *)msg
+ + sizeof(struct mbox_msghdr), size -
+ sizeof(struct mbox_msghdr));
+
+ vf_msg->rc = msg->hdr.rc;
+ /* Set PF to be a sender */
+ vf_msg->pcifunc = dev->pf_func;
+
+ /* Send to VF */
+ otx2_mbox_msg_send(vf_mbox, vf);
+ }
+}
+
+static int
+otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
+ struct cgx_link_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ struct cgx_link_user_info *linfo = &msg->link_info;
+
+ otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
+ otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
+ linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
+ otx2_mbox_id2name(msg->hdr.id),
+ otx2_get_pf(msg->hdr.pcifunc),
+ otx2_get_vf(msg->hdr.pcifunc));
+
+ /* PF gets link notification from AF */
+ if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
+ if (dev->ops && dev->ops->link_status_update)
+ dev->ops->link_status_update(dev, linfo);
+
+ /* Forward the same message as received from AF to VF */
+ pf_vf_mbox_send_up_msg(dev, msg);
+ } else {
+ /* VF gets link up notification */
+ if (dev->ops && dev->ops->link_status_update)
+ dev->ops->link_status_update(dev, linfo);
+ }
+
+ rsp->hdr.rc = 0;
+ return 0;
+}
+
+static int
+otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
+ struct cgx_ptp_rx_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
+ otx2_get_pf(dev->pf_func),
+ otx2_get_vf(dev->pf_func),
+ msg->ptp_en ? "ENABLED" : "DISABLED",
+ msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
+ otx2_get_pf(msg->hdr.pcifunc),
+ otx2_get_vf(msg->hdr.pcifunc));
+
+ /* PF gets PTP notification from AF */
+ if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
+ if (dev->ops && dev->ops->ptp_info_update)
+ dev->ops->ptp_info_update(dev, msg->ptp_en);
+
+ /* Forward the same message as received from AF to VF */
+ pf_vf_mbox_send_up_msg(dev, msg);
+ } else {
+ /* VF gets PTP notification */
+ if (dev->ops && dev->ops->ptp_info_update)
+ dev->ops->ptp_info_update(dev, msg->ptp_en);
+ }
+
+ rsp->hdr.rc = 0;
+ return 0;
+}
+
+static int
+mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ return -EIO;
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &dev->mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = dev->pf_func; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ dev, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+ default :
+ otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
+ }
+
+ return -ENODEV;
+}
+
+static void
+otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int i, err, offset;
+
+ req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+
+ otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
+ msg->id, otx2_mbox_id2name(msg->id),
+ otx2_get_pf(msg->pcifunc),
+ otx2_get_vf(msg->pcifunc));
+ err = mbox_process_msgs_up(dev, msg);
+ if (err)
+ otx2_err("Error %d handling 0x%x (%s)",
+ err, msg->id, otx2_mbox_id2name(msg->id));
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ /* Send mbox responses */
+ if (mdev->num_msgs) {
+ otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
+ otx2_mbox_msg_send(mbox, 0);
+ }
+}
+
+static void
+otx2_pf_vf_mbox_irq(void *param)
+{
+ struct otx2_dev *dev = param;
+ uint64_t intr;
+
+ intr = otx2_read64(dev->bar2 + RVU_VF_INT);
+ if (intr == 0)
+ otx2_base_dbg("Proceeding to check mbox UP messages if any");
+
+ otx2_write64(intr, dev->bar2 + RVU_VF_INT);
+ otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
+
+ /* First process all configuration messages */
+ otx2_process_msgs(dev, dev->mbox);
+
+ /* Process Uplink messages */
+ otx2_process_msgs_up(dev, &dev->mbox_up);
+}
+
+static void
+otx2_af_pf_mbox_irq(void *param)
+{
+ struct otx2_dev *dev = param;
+ uint64_t intr;
+
+ intr = otx2_read64(dev->bar2 + RVU_PF_INT);
+ if (intr == 0)
+ otx2_base_dbg("Proceeding to check mbox UP messages if any");
+
+ otx2_write64(intr, dev->bar2 + RVU_PF_INT);
+ otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
+
+ /* First process all configuration messages */
+ otx2_process_msgs(dev, dev->mbox);
+
+ /* Process Uplink messages */
+ otx2_process_msgs_up(dev, &dev->mbox_up);
+}
+
+static int
+mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i, rc;
+
+ /* HW clear irq */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+ otx2_write64(~0ull, dev->bar2 +
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
+
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ dev->timer_set = 0;
+
+ /* MBOX interrupt for VF(0...63) <-> PF */
+ rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX0);
+
+ if (rc) {
+ otx2_err("Fail to register PF(VF0-63) mbox irq");
+ return rc;
+ }
+ /* MBOX interrupt for VF(64...128) <-> PF */
+ rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX1);
+
+ if (rc) {
+ otx2_err("Fail to register PF(VF64-128) mbox irq");
+ return rc;
+ }
+ /* MBOX interrupt AF <-> PF */
+ rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
+ dev, RVU_PF_INT_VEC_AFPF_MBOX);
+ if (rc) {
+ otx2_err("Fail to register AF<->PF mbox irq");
+ return rc;
+ }
+
+ /* HW enable intr */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+ otx2_write64(~0ull, dev->bar2 +
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
+
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+ return rc;
+}
+
+static int
+mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int rc;
+
+ /* Clear irq */
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
+
+ /* MBOX interrupt PF <-> VF */
+ rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
+ dev, RVU_VF_INT_VEC_MBOX);
+ if (rc) {
+ otx2_err("Fail to register PF<->VF mbox irq");
+ return rc;
+ }
+
+ /* HW enable intr */
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
+
+ return rc;
+}
+
+static int
+mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ if (otx2_dev_is_vf(dev))
+ return mbox_register_vf_irq(pci_dev, dev);
+ else
+ return mbox_register_pf_irq(pci_dev, dev);
+}
+
+static void
+mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i;
+
+ /* HW clear irq */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+ otx2_write64(~0ull, dev->bar2 +
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
+
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+ dev->timer_set = 0;
+
+ rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
+
+ /* Unregister the interrupt handler for each vectors */
+ /* MBOX interrupt for VF(0...63) <-> PF */
+ otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX0);
+
+ /* MBOX interrupt for VF(64...128) <-> PF */
+ otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_VFPF_MBOX1);
+
+ /* MBOX interrupt AF <-> PF */
+ otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
+ RVU_PF_INT_VEC_AFPF_MBOX);
+
+}
+
+static void
+mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ /* Clear irq */
+ otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
+
+ /* Unregister the interrupt handler */
+ otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
+ RVU_VF_INT_VEC_MBOX);
+}
+
+static void
+mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ if (otx2_dev_is_vf(dev))
+ mbox_unregister_vf_irq(pci_dev, dev);
+ else
+ mbox_unregister_pf_irq(pci_dev, dev);
+}
+
+static int
+vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msg_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_vf_flr(mbox);
+ /* Overwrite pcifunc to indicate VF */
+ req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+ /* Sync message in interrupt context */
+ rc = pf_af_sync_msg(dev, NULL);
+ if (rc)
+ otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
+
+ return rc;
+}
+
+static void
+otx2_pf_vf_flr_irq(void *param)
+{
+ struct otx2_dev *dev = (struct otx2_dev *)param;
+ uint16_t max_vf = 64, vf;
+ uintptr_t bar2;
+ uint64_t intr;
+ int i;
+
+ max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
+ bar2 = dev->bar2;
+
+ otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
+
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
+ intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
+ if (!intr)
+ continue;
+
+ for (vf = 0; vf < max_vf; vf++) {
+ if (!(intr & (1ULL << vf)))
+ continue;
+
+ otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
+ i, intr, (64 * i + vf));
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
+ /* Disable the interrupt */
+ otx2_write64(BIT_ULL(vf),
+ bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
+ /* Inform AF about VF reset */
+ vf_flr_send_msg(dev, vf);
+
+ /* Signal FLR finish */
+ otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
+ /* Enable interrupt */
+ otx2_write64(~0ull,
+ bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
+ }
+ }
+}
+
+static int
+vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int i;
+
+ otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
+
+ /* HW clear irq */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
+
+ otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR0);
+
+ otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR1);
+
+ return 0;
+}
+
+static int
+vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int i, rc;
+
+ otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
+
+ rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR0);
+ if (rc)
+ otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
+
+ rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
+ RVU_PF_INT_VEC_VFFLR1);
+ if (rc)
+ otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
+
+ /* Enable HW interrupt */
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
+ otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
+ }
+ return 0;
+}
+
+/**
+ * @internal
+ * Get number of active VFs for the given PF device.
+ */
+int
+otx2_dev_active_vfs(void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ int i, count = 0;
+
+ for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
+ count += __builtin_popcount(dev->active_vfs[i]);
+
+ return count;
+}
+
+static void
+otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
+{
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVID_OCTEONTX2_RVU_PF:
+ break;
+ case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
+ dev->hwcap |= OTX2_HWCAP_F_VF;
+ break;
+ }
+}
+
+/**
+ * @internal
+ * Initialize the otx2 device
+ */
+int
+otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ int up_direction = MBOX_DIR_PFAF_UP;
+ int rc, direction = MBOX_DIR_PFAF;
+ uint64_t intr_offset = RVU_PF_INT;
+ struct otx2_dev *dev = otx2_dev;
+ uintptr_t bar2, bar4;
+ uint64_t bar4_addr;
+ void *hwbase;
+
+ bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
+ bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
+
+ if (bar2 == 0 || bar4 == 0) {
+ otx2_err("Failed to get pci bars");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ dev->node = pci_dev->device.numa_node;
+ dev->maxvf = pci_dev->max_vfs;
+ dev->bar2 = bar2;
+ dev->bar4 = bar4;
+
+ otx2_update_vf_hwcap(pci_dev, dev);
+
+ if (otx2_dev_is_vf(dev)) {
+ direction = MBOX_DIR_VFPF;
+ up_direction = MBOX_DIR_VFPF_UP;
+ intr_offset = RVU_VF_INT;
+ }
+
+ /* Initialize the local mbox */
+ rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
+ intr_offset);
+ if (rc)
+ goto error;
+ dev->mbox = &dev->mbox_local;
+
+ rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
+ intr_offset);
+ if (rc)
+ goto error;
+
+ /* Register mbox interrupts */
+ rc = mbox_register_irq(pci_dev, dev);
+ if (rc)
+ goto mbox_fini;
+
+ /* Check the readiness of PF/VF */
+ rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
+ if (rc)
+ goto mbox_unregister;
+
+ dev->pf = otx2_get_pf(dev->pf_func);
+ dev->vf = otx2_get_vf(dev->pf_func);
+ memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
+
+ /* Found VF devices in a PF device */
+ if (pci_dev->max_vfs > 0) {
+
+ /* Remap mbox area for all vf's */
+ bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
+ if (bar4_addr == 0) {
+ rc = -ENODEV;
+ goto mbox_fini;
+ }
+
+ hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
+ if (hwbase == MAP_FAILED) {
+ rc = -ENOMEM;
+ goto mbox_fini;
+ }
+ /* Init mbox object */
+ rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
+ bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
+ intr_offset);
+ if (rc)
+ goto iounmap;
+
+ /* PF -> VF UP messages */
+ rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
+ bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
+ intr_offset);
+ if (rc)
+ goto mbox_fini;
+ }
+
+ /* Register VF-FLR irq handlers */
+ if (otx2_dev_is_pf(dev)) {
+ rc = vf_flr_register_irqs(pci_dev, dev);
+ if (rc)
+ goto iounmap;
+ }
+ dev->mbox_active = 1;
+ return rc;
+
+iounmap:
+ mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
+mbox_unregister:
+ mbox_unregister_irq(pci_dev, dev);
+mbox_fini:
+ otx2_mbox_fini(dev->mbox);
+ otx2_mbox_fini(&dev->mbox_up);
+error:
+ return rc;
+}
+
+/**
+ * @internal
+ * Finalize the otx2 device
+ */
+void
+otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct otx2_dev *dev = otx2_dev;
+ struct otx2_idev_cfg *idev;
+ struct otx2_mbox *mbox;
+
+ /* Clear references to this pci dev */
+ idev = otx2_intra_dev_get_cfg();
+ if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
+ idev->npa_lf = NULL;
+
+ mbox_unregister_irq(pci_dev, dev);
+
+ if (otx2_dev_is_pf(dev))
+ vf_flr_unregister_irqs(pci_dev, dev);
+ /* Release PF - VF */
+ mbox = &dev->mbox_vfpf;
+ if (mbox->hwbase && mbox->dev)
+ mbox_mem_unmap((void *)mbox->hwbase,
+ MBOX_SIZE * pci_dev->max_vfs);
+ otx2_mbox_fini(mbox);
+ mbox = &dev->mbox_vfpf_up;
+ otx2_mbox_fini(mbox);
+
+ /* Release PF - AF */
+ mbox = dev->mbox;
+ otx2_mbox_fini(mbox);
+ mbox = &dev->mbox_up;
+ otx2_mbox_fini(mbox);
+ dev->mbox_active = 0;
+
+ /* Disable MSIX vectors */
+ otx2_disable_irqs(intr_handle);
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h
new file mode 100644
index 000000000..cd4fe517d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_dev.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_DEV_H
+#define _OTX2_DEV_H
+
+#include <rte_bus_pci.h>
+
+#include "otx2_common.h"
+#include "otx2_irq.h"
+#include "otx2_mbox.h"
+#include "otx2_mempool.h"
+
+/* Common HWCAP flags. Use from LSB bits */
+#define OTX2_HWCAP_F_VF BIT_ULL(8) /* VF device */
+#define otx2_dev_is_vf(dev) (dev->hwcap & OTX2_HWCAP_F_VF)
+#define otx2_dev_is_pf(dev) (!(dev->hwcap & OTX2_HWCAP_F_VF))
+#define otx2_dev_is_lbk(dev) ((dev->hwcap & OTX2_HWCAP_F_VF) && \
+ (dev->tx_chan_base < 0x700))
+#define otx2_dev_revid(dev) (dev->hwcap & 0xFF)
+#define otx2_dev_is_sdp(dev) (dev->sdp_link)
+
+#define otx2_dev_is_vf_or_sdp(dev) \
+ (otx2_dev_is_vf(dev) || otx2_dev_is_sdp(dev))
+
+#define otx2_dev_is_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0))
+#define otx2_dev_is_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0))
+
+#define otx2_dev_is_95xx_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
+#define otx2_dev_is_95xx_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
+
+#define otx2_dev_is_96xx_A0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+#define otx2_dev_is_96xx_Ax(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
+#define otx2_dev_is_96xx_Cx(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
+#define otx2_dev_is_96xx_C0(dev) \
+ ((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) && \
+ (RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
+ (RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
+
+struct otx2_dev;
+
+/* Link status callback */
+typedef void (*otx2_link_status_t)(struct otx2_dev *dev,
+ struct cgx_link_user_info *link);
+/* PTP info callback */
+typedef int (*otx2_ptp_info_t)(struct otx2_dev *dev, bool ptp_en);
+
+struct otx2_dev_ops {
+ otx2_link_status_t link_status_update;
+ otx2_ptp_info_t ptp_info_update;
+};
+
+#define OTX2_DEV \
+ int node __rte_cache_aligned; \
+ uint16_t pf; \
+ int16_t vf; \
+ uint16_t pf_func; \
+ uint8_t mbox_active; \
+ bool drv_inited; \
+ uint64_t active_vfs[MAX_VFPF_DWORD_BITS]; \
+ uintptr_t bar2; \
+ uintptr_t bar4; \
+ struct otx2_mbox mbox_local; \
+ struct otx2_mbox mbox_up; \
+ struct otx2_mbox mbox_vfpf; \
+ struct otx2_mbox mbox_vfpf_up; \
+ otx2_intr_t intr; \
+ int timer_set; /* ~0 : no alarm handling */ \
+ uint64_t hwcap; \
+ struct otx2_npa_lf npalf; \
+ struct otx2_mbox *mbox; \
+ uint16_t maxvf; \
+ const struct otx2_dev_ops *ops
+
+struct otx2_dev {
+ OTX2_DEV;
+};
+
+__rte_internal
+int otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev);
+
+/* Common dev init and fini routines */
+
+static __rte_always_inline int
+otx2_dev_init(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ uint8_t rev_id;
+ int rc;
+
+ rc = rte_pci_read_config(pci_dev, &rev_id,
+ 1, RVU_PCI_REVISION_ID);
+ if (rc != 1) {
+ otx2_err("Failed to read pci revision id, rc=%d", rc);
+ return rc;
+ }
+
+ dev->hwcap = rev_id;
+ return otx2_dev_priv_init(pci_dev, otx2_dev);
+}
+
+__rte_internal
+void otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev);
+__rte_internal
+int otx2_dev_active_vfs(void *otx2_dev);
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+static inline int
+otx2_get_vf(uint16_t pf_func)
+{
+ return (((pf_func >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK) - 1);
+}
+
+static inline int
+otx2_get_pf(uint16_t pf_func)
+{
+ return (pf_func >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+static inline int
+otx2_pfvf_func(int pf, int vf)
+{
+ return (pf << RVU_PFVF_PF_SHIFT) | ((vf << RVU_PFVF_FUNC_SHIFT) + 1);
+}
+
+static inline int
+otx2_is_afvf(uint16_t pf_func)
+{
+ return !(pf_func & ~RVU_PFVF_FUNC_MASK);
+}
+
+#endif /* _OTX2_DEV_H */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h
new file mode 100644
index 000000000..7e45329b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_arm64.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_IO_ARM64_H_
+#define _OTX2_IO_ARM64_H_
+
+#define otx2_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define otx2_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1],#0]!" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+
+#define otx2_prefetch_store_keep(ptr) ({\
+ asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (ptr)); })
+
+static __rte_always_inline uint64_t
+otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
+{
+ uint64_t result;
+
+ /* Atomic add with no ordering */
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+static __rte_always_inline uint64_t
+otx2_atomic64_add_sync(int64_t incr, int64_t *ptr)
+{
+ uint64_t result;
+
+ /* Atomic add with ordering */
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+static __rte_always_inline uint64_t
+otx2_lmt_submit(rte_iova_t io_address)
+{
+ uint64_t result;
+
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldeor xzr,%x[rf],[%[rs]]" :
+ [rf] "=r"(result): [rs] "r"(io_address));
+ return result;
+}
+
+static __rte_always_inline void
+otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
+{
+ volatile const __uint128_t *src128 = (const __uint128_t *)in;
+ volatile __uint128_t *dst128 = (__uint128_t *)out;
+ dst128[0] = src128[0];
+ dst128[1] = src128[1];
+ /* lmtext receives following value:
+ * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
+ * 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
+ */
+ if (lmtext) {
+ dst128[2] = src128[2];
+ if (lmtext > 1)
+ dst128[3] = src128[3];
+ }
+}
+
+static __rte_always_inline void
+otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
+{
+ volatile const __uint128_t *src128 = (const __uint128_t *)in;
+ volatile __uint128_t *dst128 = (__uint128_t *)out;
+ uint8_t i;
+
+ for (i = 0; i < segdw; i++)
+ dst128[i] = src128[i];
+}
+
+#endif /* _OTX2_IO_ARM64_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h
new file mode 100644
index 000000000..b1d754008
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_io_generic.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_IO_GENERIC_H_
+#define _OTX2_IO_GENERIC_H_
+
+#define otx2_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64_relaxed((void *)(addr)); \
+ val1 = rte_read64_relaxed((uint8_t *)(addr) + 8); \
+} while (0)
+
+#define otx2_store_pair(val0, val1, addr) \
+do { \
+ rte_write64_relaxed(val0, (void *)(addr)); \
+ rte_write64_relaxed(val1, (((uint8_t *)(addr)) + 8)); \
+} while (0)
+
+#define otx2_prefetch_store_keep(ptr) do {} while (0)
+
+static inline uint64_t
+otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
+{
+ RTE_SET_USED(ptr);
+ RTE_SET_USED(incr);
+
+ return 0;
+}
+
+static inline uint64_t
+otx2_atomic64_add_sync(int64_t incr, int64_t *ptr)
+{
+ RTE_SET_USED(ptr);
+ RTE_SET_USED(incr);
+
+ return 0;
+}
+
+static inline int64_t
+otx2_lmt_submit(uint64_t io_address)
+{
+ RTE_SET_USED(io_address);
+
+ return 0;
+}
+
+static __rte_always_inline void
+otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
+{
+ RTE_SET_USED(out);
+ RTE_SET_USED(in);
+ RTE_SET_USED(lmtext);
+}
+
+static __rte_always_inline void
+otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
+{
+ RTE_SET_USED(out);
+ RTE_SET_USED(in);
+ RTE_SET_USED(segdw);
+}
+#endif /* _OTX2_IO_GENERIC_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c
new file mode 100644
index 000000000..fa3206af5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.c
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_interrupts.h>
+
+#include "otx2_common.h"
+#include "otx2_irq.h"
+
+#ifdef RTE_EAL_VFIO
+
+#include <inttypes.h>
+#include <linux/vfio.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define MAX_INTR_VEC_ID RTE_MAX_RXTX_INTR_VEC_ID
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (MAX_INTR_VEC_ID))
+
+static int
+irq_get_info(struct rte_intr_handle *intr_handle)
+{
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int rc;
+
+ irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (rc < 0) {
+ otx2_err("Failed to get IRQ info rc=%d errno=%d", rc, errno);
+ return rc;
+ }
+
+ otx2_base_dbg("Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x",
+ irq.flags, irq.index, irq.count, MAX_INTR_VEC_ID);
+
+ if (irq.count > MAX_INTR_VEC_ID) {
+ otx2_err("HW max=%d > MAX_INTR_VEC_ID: %d",
+ intr_handle->max_intr, MAX_INTR_VEC_ID);
+ intr_handle->max_intr = MAX_INTR_VEC_ID;
+ } else {
+ intr_handle->max_intr = irq.count;
+ }
+
+ return 0;
+}
+
+static int
+irq_config(struct rte_intr_handle *intr_handle, unsigned int vec)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int32_t *fd_ptr;
+ int len, rc;
+
+ if (vec > intr_handle->max_intr) {
+ otx2_err("vector=%d greater than max_intr=%d", vec,
+ intr_handle->max_intr);
+ return -EINVAL;
+ }
+
+ len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+
+ irq_set->start = vec;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ /* Use vec fd to set interrupt vectors */
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ fd_ptr[0] = intr_handle->efds[vec];
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ otx2_err("Failed to set_irqs vector=0x%x rc=%d", vec, rc);
+
+ return rc;
+}
+
+static int
+irq_init(struct rte_intr_handle *intr_handle)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int32_t *fd_ptr;
+ int len, rc;
+ uint32_t i;
+
+ if (intr_handle->max_intr > MAX_INTR_VEC_ID) {
+ otx2_err("Max_intr=%d greater than MAX_INTR_VEC_ID=%d",
+ intr_handle->max_intr, MAX_INTR_VEC_ID);
+ return -ERANGE;
+ }
+
+ len = sizeof(struct vfio_irq_set) +
+ sizeof(int32_t) * intr_handle->max_intr;
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->start = 0;
+ irq_set->count = intr_handle->max_intr;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ for (i = 0; i < irq_set->count; i++)
+ fd_ptr[i] = -1;
+
+ rc = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ otx2_err("Failed to set irqs vector rc=%d", rc);
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Disable IRQ
+ */
+int
+otx2_disable_irqs(struct rte_intr_handle *intr_handle)
+{
+ /* Clear max_intr to indicate re-init next time */
+ intr_handle->max_intr = 0;
+ return rte_intr_disable(intr_handle);
+}
+
+/**
+ * @internal
+ * Register IRQ
+ */
+int
+otx2_register_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec)
+{
+ struct rte_intr_handle tmp_handle;
+ int rc;
+
+ /* If no max_intr read from VFIO */
+ if (intr_handle->max_intr == 0) {
+ irq_get_info(intr_handle);
+ irq_init(intr_handle);
+ }
+
+ if (vec > intr_handle->max_intr) {
+ otx2_err("Vector=%d greater than max_intr=%d", vec,
+ intr_handle->max_intr);
+ return -EINVAL;
+ }
+
+ tmp_handle = *intr_handle;
+ /* Create new eventfd for interrupt vector */
+ tmp_handle.fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (tmp_handle.fd == -1)
+ return -ENODEV;
+
+ /* Register vector interrupt callback */
+ rc = rte_intr_callback_register(&tmp_handle, cb, data);
+ if (rc) {
+ otx2_err("Failed to register vector:0x%x irq callback.", vec);
+ return rc;
+ }
+
+ intr_handle->efds[vec] = tmp_handle.fd;
+ intr_handle->nb_efd = (vec > intr_handle->nb_efd) ?
+ vec : intr_handle->nb_efd;
+ if ((intr_handle->nb_efd + 1) > intr_handle->max_intr)
+ intr_handle->max_intr = intr_handle->nb_efd + 1;
+
+ otx2_base_dbg("Enable vector:0x%x for vfio (efds: %d, max:%d)",
+ vec, intr_handle->nb_efd, intr_handle->max_intr);
+
+ /* Enable MSIX vectors to VFIO */
+ return irq_config(intr_handle, vec);
+}
+
+/**
+ * @internal
+ * Unregister IRQ
+ */
+void
+otx2_unregister_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec)
+{
+ struct rte_intr_handle tmp_handle;
+
+ if (vec > intr_handle->max_intr) {
+ otx2_err("Error unregistering MSI-X interrupts vec:%d > %d",
+ vec, intr_handle->max_intr);
+ return;
+ }
+
+ tmp_handle = *intr_handle;
+ tmp_handle.fd = intr_handle->efds[vec];
+ if (tmp_handle.fd == -1)
+ return;
+
+ /* Un-register callback func from eal lib */
+ rte_intr_callback_unregister(&tmp_handle, cb, data);
+
+ otx2_base_dbg("Disable vector:0x%x for vfio (efds: %d, max:%d)",
+ vec, intr_handle->nb_efd, intr_handle->max_intr);
+
+ if (intr_handle->efds[vec] != -1)
+ close(intr_handle->efds[vec]);
+ /* Disable MSIX vectors from VFIO */
+ intr_handle->efds[vec] = -1;
+ irq_config(intr_handle, vec);
+}
+
+#else
+
+/**
+ * @internal
+ * Register IRQ
+ */
+int otx2_register_irq(__rte_unused struct rte_intr_handle *intr_handle,
+ __rte_unused rte_intr_callback_fn cb,
+ __rte_unused void *data, __rte_unused unsigned int vec)
+{
+ return -ENOTSUP;
+}
+
+
+/**
+ * @internal
+ * Unregister IRQ
+ */
+void otx2_unregister_irq(__rte_unused struct rte_intr_handle *intr_handle,
+ __rte_unused rte_intr_callback_fn cb,
+ __rte_unused void *data, __rte_unused unsigned int vec)
+{
+}
+
+/**
+ * @internal
+ * Disable IRQ
+ */
+int otx2_disable_irqs(__rte_unused struct rte_intr_handle *intr_handle)
+{
+ return -ENOTSUP;
+}
+
+#endif /* RTE_EAL_VFIO */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h
new file mode 100644
index 000000000..0683cf554
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_irq.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_IRQ_H_
+#define _OTX2_IRQ_H_
+
+#include <rte_pci.h>
+#include <rte_interrupts.h>
+
+#include "otx2_common.h"
+
+typedef struct {
+/* 128 devices translate to two 64 bits dwords */
+#define MAX_VFPF_DWORD_BITS 2
+ uint64_t bits[MAX_VFPF_DWORD_BITS];
+} otx2_intr_t;
+
+__rte_internal
+int otx2_register_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec);
+__rte_internal
+void otx2_unregister_irq(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec);
+__rte_internal
+int otx2_disable_irqs(struct rte_intr_handle *intr_handle);
+
+#endif /* _OTX2_IRQ_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c
new file mode 100644
index 000000000..2b7810929
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+
+#include "otx2_mbox.h"
+#include "otx2_dev.h"
+
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+
+#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
+
+#define RVU_VF_VFPF_MBOX0 (0x0000)
+#define RVU_VF_VFPF_MBOX1 (0x0008)
+
+static inline uint16_t
+msgs_offset(void)
+{
+ return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+}
+
+void
+otx2_mbox_fini(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = 0;
+ mbox->hwbase = 0;
+ free(mbox->dev);
+ mbox->dev = NULL;
+}
+
+void
+otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->msg_size = 0;
+ tx_hdr->num_msgs = 0;
+ rx_hdr->msg_size = 0;
+ rx_hdr->num_msgs = 0;
+ rte_spinlock_unlock(&mdev->mbox_lock);
+}
+
+int
+otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
+ int direction, int ndevs, uint64_t intr_offset)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ mbox->intr_offset = intr_offset;
+ mbox->reg_base = reg_base;
+ mbox->hwbase = hwbase;
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->dev = malloc(ndevs * sizeof(struct otx2_mbox_dev));
+ if (!mbox->dev) {
+ otx2_mbox_fini(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
+ rte_spinlock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Allocate a message response
+ */
+struct mbox_msghdr *
+otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
+ int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
+ goto exit;
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
+ mbox->tx_start + msgs_offset() + mdev->msg_size));
+
+ /* Clear the whole msg region */
+ otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset();
+exit:
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+
+/**
+ * @internal
+ * Send a mailbox message
+ */
+void
+otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+
+ /* Reset header for next messages */
+ tx_hdr->msg_size = mdev->msg_size;
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after copying txmem
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+
+ /* Sync mbox data into memory */
+ rte_wmb();
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ rte_write64(1, (volatile void *)(mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift))));
+}
+
+/**
+ * @internal
+ * Wait and get mailbox response
+ */
+int
+otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr;
+ uint64_t offset;
+ int rc;
+
+ rc = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (rc != 1)
+ return -EIO;
+
+ rte_rmb();
+
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ if (msg != NULL)
+ *msg = msghdr;
+
+ return msghdr->rc;
+}
+
+/**
+ * Polling for given wait time to get mailbox response
+ */
+static int
+mbox_poll(struct otx2_mbox *mbox, uint32_t wait)
+{
+ uint32_t timeout = 0, sleep = 1;
+ uint32_t wait_us = wait * 1000;
+ uint64_t rsp_reg = 0;
+ uintptr_t reg_addr;
+
+ reg_addr = mbox->reg_base + mbox->intr_offset;
+ do {
+ rsp_reg = otx2_read64(reg_addr);
+
+ if (timeout >= wait_us)
+ return -ETIMEDOUT;
+
+ rte_delay_us(sleep);
+ timeout += sleep;
+ } while (!rsp_reg);
+
+ rte_smp_rmb();
+
+ /* Clear interrupt */
+ otx2_write64(rsp_reg, reg_addr);
+
+ /* Reset mbox */
+ otx2_mbox_reset(mbox, 0);
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Wait and get mailbox response with timeout
+ */
+int
+otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
+ uint32_t tmo)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr;
+ uint64_t offset;
+ int rc;
+
+ rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
+ if (rc != 1)
+ return -EIO;
+
+ rte_rmb();
+
+ offset = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+ if (msg != NULL)
+ *msg = msghdr;
+
+ return msghdr->rc;
+}
+
+static int
+mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
+{
+ volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ uint32_t timeout = 0, sleep = 1;
+
+ rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
+ while (mdev->num_msgs > mdev->msgs_acked) {
+ rte_delay_us(sleep);
+ timeout += sleep;
+ if (timeout >= rst_timo) {
+ struct mbox_hdr *tx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase +
+ mbox->tx_start);
+ struct mbox_hdr *rx_hdr =
+ (struct mbox_hdr *)((uintptr_t)mdev->mbase +
+ mbox->rx_start);
+
+ otx2_err("MBOX[devid: %d] message wait timeout %d, "
+ "num_msgs: %d, msgs_acked: %d "
+ "(tx/rx num_msgs: %d/%d), msg_size: %d, "
+ "rsp_size: %d",
+ devid, timeout, mdev->num_msgs,
+ mdev->msgs_acked, tx_hdr->num_msgs,
+ rx_hdr->num_msgs, mdev->msg_size,
+ mdev->rsp_size);
+
+ return -EIO;
+ }
+ rte_rmb();
+ }
+ return 0;
+}
+
+int
+otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int rc = 0;
+
+ /* Sync with mbox region */
+ rte_rmb();
+
+ if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
+ mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
+ /* In case of VF, Wait a bit more to account round trip delay */
+ tmo = tmo * 2;
+ }
+
+ /* Wait message */
+ if (rte_thread_is_intr())
+ rc = mbox_poll(mbox, tmo);
+ else
+ rc = mbox_wait(mbox, devid, tmo);
+
+ if (!rc)
+ rc = mdev->num_msgs;
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Wait for the mailbox response
+ */
+int
+otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
+}
+
+int
+otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int avail;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ avail = mbox->tx_size - mdev->msg_size - msgs_offset();
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return avail;
+}
+
+int
+otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
+{
+ struct ready_msg_rsp *rsp;
+ int rc;
+
+ otx2_mbox_alloc_msg_ready(mbox);
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->hdr.ver != OTX2_MBOX_VERSION) {
+ otx2_err("Incompatible MBox versions(AF: 0x%04x DPDK: 0x%04x)",
+ rsp->hdr.ver, OTX2_MBOX_VERSION);
+ return -EPIPE;
+ }
+
+ if (pcifunc)
+ *pcifunc = rsp->hdr.pcifunc;
+
+ return 0;
+}
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
+ uint16_t id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+
+ return 0;
+}
+
+/**
+ * @internal
+ * Convert mail box ID to name
+ */
+const char *otx2_mbox_id2name(uint16_t id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_MESSAGES
+ MBOX_UP_CGX_MESSAGES
+#undef M
+ default :
+ return "INVALID ID";
+ }
+}
+
+int otx2_mbox_id2size(uint16_t id)
+{
+ switch (id) {
+#define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);
+ MBOX_MESSAGES
+ MBOX_UP_CGX_MESSAGES
+#undef M
+ default :
+ return 0;
+ }
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h
new file mode 100644
index 000000000..7fa4276e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_mbox.h
@@ -0,0 +1,1773 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_MBOX_H__
+#define __OTX2_MBOX_H__
+
+#include <errno.h>
+#include <stdbool.h>
+
+#include <rte_ether.h>
+#include <rte_spinlock.h>
+
+#include <otx2_common.h>
+
+#define SZ_64K (64ULL * 1024ULL)
+#define SZ_1K (1ULL * 1024ULL)
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "Incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 3000 /* Time to wait for mbox response in ms */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+/* Device memory does not support unaligned access, instruct compiler to
+ * not optimize the memory access when working with mailbox memory.
+ */
+#define __otx2_io volatile
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ rte_spinlock_t mbox_lock;
+ uint16_t msg_size; /* Total msg size to be sent */
+ uint16_t rsp_size; /* Total rsp size to be sure the reply is ok */
+ uint16_t num_msgs; /* No of msgs sent or waiting for response */
+ uint16_t msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ uintptr_t hwbase; /* Mbox region advertised by HW */
+ uintptr_t reg_base;/* CSR base for this dev */
+ uint64_t trigger; /* Trigger mbox notification */
+ uint16_t tr_shift; /* Mbox trigger shift */
+ uint64_t rx_start; /* Offset of Rx region in mbox memory */
+ uint64_t tx_start; /* Offset of Tx region in mbox memory */
+ uint16_t rx_size; /* Size of Rx region */
+ uint16_t tx_size; /* Size of Tx region */
+ uint16_t ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+ uint64_t intr_offset; /* Offset to interrupt register */
+};
+
+/* Header which precedes all mbox messages */
+struct mbox_hdr {
+ uint64_t __otx2_io msg_size; /* Total msgs size embedded */
+ uint16_t __otx2_io num_msgs; /* No of msgs embedded */
+};
+
+/* Header which precedes every msg and is also part of it */
+struct mbox_msghdr {
+ uint16_t __otx2_io pcifunc; /* Who's sending this msg */
+ uint16_t __otx2_io id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ /* Signature, for validating corrupted msgs */
+ uint16_t __otx2_io sig;
+#define OTX2_MBOX_VERSION (0x0007)
+ /* Version of msg's structure for this ID */
+ uint16_t __otx2_io ver;
+ /* Offset of next msg within mailbox region */
+ uint16_t __otx2_io next_msgoff;
+ int __otx2_io rc; /* Msg processed response code */
+};
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach_req, msg_rsp)\
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach_req, msg_rsp)\
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
+M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get,\
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get,\
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg)\
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_SET_LINK_STATE, 0x214, cgx_set_link_state, \
+ cgx_set_link_state_msg, msg_rsp) \
+M(CGX_GET_PHY_MOD_TYPE, 0x215, cgx_get_phy_mod_type, msg_req, \
+ cgx_phy_mod_type) \
+M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
+ msg_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req, \
+ npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp)\
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, sso_lf_alloc_req, \
+ sso_lf_alloc_rsp) \
+M(SSO_LF_FREE, 0x601, sso_lf_free, sso_lf_free_req, msg_rsp) \
+M(SSOW_LF_ALLOC, 0x602, ssow_lf_alloc, ssow_lf_alloc_req, msg_rsp)\
+M(SSOW_LF_FREE, 0x603, ssow_lf_free, ssow_lf_free_req, msg_rsp) \
+M(SSO_HW_SETCONFIG, 0x604, sso_hw_setconfig, sso_hw_setconfig, \
+ msg_rsp) \
+M(SSO_GRP_SET_PRIORITY, 0x605, sso_grp_set_priority, sso_grp_priority, \
+ msg_rsp) \
+M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, sso_info_req, \
+ sso_grp_priority) \
+M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp) \
+M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg, \
+ msg_rsp) \
+M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, \
+ sso_grp_stats) \
+M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, \
+ sso_hws_stats) \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, tim_lf_alloc_req, \
+ tim_lf_alloc_rsp) \
+M(TIM_LF_FREE, 0x801, tim_lf_free, tim_ring_req, msg_rsp) \
+M(TIM_CONFIG_RING, 0x802, tim_config_ring, tim_config_req, msg_rsp)\
+M(TIM_ENABLE_RING, 0x803, tim_enable_ring, tim_ring_req, \
+ tim_enable_rsp) \
+M(TIM_DISABLE_RING, 0x804, tim_disable_ring, tim_ring_req, msg_rsp) \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ cpt_lf_alloc_rsp_msg) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_SET_CRYPTO_GRP, 0xA03, cpt_set_crypto_grp, \
+ cpt_set_crypto_grp_req_msg, \
+ msg_rsp) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
+M(CPT_RX_INLINE_LF_CFG, 0xBFE, cpt_rx_inline_lf_cfg, \
+ cpt_rx_inline_lf_cfg_msg, msg_rsp) \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, \
+ npc_mcam_alloc_entry_req, \
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, \
+ npc_mcam_shift_entry_req, \
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, \
+ msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, \
+ msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, \
+ msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry,\
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, msg_req, \
+ npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, \
+ npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, \
+ msg_rsp) \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, nix_lf_alloc_req, \
+ nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, \
+ nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, hwctx_disable_req, \
+ msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, nix_txsch_alloc_req, \
+ nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, \
+ msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, \
+ msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
+ nix_mark_format_cfg, \
+ nix_mark_format_cfg_rsp) \
+M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
+M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, nix_lso_format_cfg, \
+ nix_lso_format_cfg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, \
+ msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, \
+ msg_rsp) \
+M(NIX_SET_VLAN_TPID, 0x8015, nix_set_vlan_tpid, nix_set_vlan_tpid, \
+ msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp)\
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, \
+ nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, \
+ 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, \
+ msg_rsp) \
+M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, \
+ msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic response msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io sclk_feq; /* SCLK frequency */
+ uint16_t __otx2_io rclk_freq; /* RCLK frequency */
+};
+
+/* Struct to set pkind */
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_EDSA BIT_ULL(1)
+#define OTX2_PRIV_FLAGS_HIGIG BIT_ULL(2)
+#define OTX2_PRIV_FLAGS_LEN_90B BIT_ULL(3)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ uint64_t __otx2_io mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ uint8_t __otx2_io dir;
+ uint8_t __otx2_io pkind; /* valid only in case custom flag */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a certain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
+ */
+struct rsrc_attach_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io modify:1;
+ uint8_t __otx2_io npalf:1;
+ uint8_t __otx2_io nixlf:1;
+ uint16_t __otx2_io sso;
+ uint16_t __otx2_io ssow;
+ uint16_t __otx2_io timlfs;
+ uint16_t __otx2_io cptlfs;
+ uint16_t __otx2_io reelfs;
+ /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+ int __otx2_io cpt_blkaddr;
+ /* BLKADDR_REE0/BLKADDR_REE1 or 0 for BLKADDR_REE0 */
+ int __otx2_io ree_blkaddr;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io partial:1;
+ uint8_t __otx2_io npalf:1;
+ uint8_t __otx2_io nixlf:1;
+ uint8_t __otx2_io sso:1;
+ uint8_t __otx2_io ssow:1;
+ uint8_t __otx2_io timlfs:1;
+ uint8_t __otx2_io cptlfs:1;
+ uint8_t __otx2_io reelfs:1;
+};
+
+/* NIX Transmit schedulers */
+#define NIX_TXSCH_LVL_SMQ 0x0
+#define NIX_TXSCH_LVL_MDQ 0x0
+#define NIX_TXSCH_LVL_TL4 0x1
+#define NIX_TXSCH_LVL_TL3 0x2
+#define NIX_TXSCH_LVL_TL2 0x3
+#define NIX_TXSCH_LVL_TL1 0x4
+#define NIX_TXSCH_LVL_CNT 0x5
+
+/*
+ * Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io schq[NIX_TXSCH_LVL_CNT];
+ uint16_t __otx2_io sso;
+ uint16_t __otx2_io tim;
+ uint16_t __otx2_io ssow;
+ uint16_t __otx2_io cpt;
+ uint8_t __otx2_io npa;
+ uint8_t __otx2_io nix;
+ uint16_t __otx2_io schq_nix1[NIX_TXSCH_LVL_CNT];
+ uint8_t __otx2_io nix1;
+ uint8_t __otx2_io cpt1;
+ uint8_t __otx2_io ree0;
+ uint8_t __otx2_io ree1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io npa_msixoff;
+ uint16_t __otx2_io nix_msixoff;
+ uint8_t __otx2_io sso;
+ uint8_t __otx2_io ssow;
+ uint8_t __otx2_io timlfs;
+ uint8_t __otx2_io cptlfs;
+ uint16_t __otx2_io sso_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint8_t __otx2_io cpt1_lfs;
+ uint8_t __otx2_io ree0_lfs;
+ uint8_t __otx2_io ree1_lfs;
+ uint16_t __otx2_io cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ uint16_t __otx2_io ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 13
+#define CGX_TX_STATS_COUNT 18
+ uint64_t __otx2_io rx_stats[CGX_RX_STATS_COUNT];
+ uint64_t __otx2_io tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io fec_corr_blks;
+ uint64_t __otx2_io fec_uncorr_blks;
+};
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io max_dmac_filters;
+};
+
+struct cgx_link_user_info {
+ uint64_t __otx2_io link_up:1;
+ uint64_t __otx2_io full_duplex:1;
+ uint64_t __otx2_io lmac_type_id:4;
+ uint64_t __otx2_io speed:20; /* speed in Mbps */
+ uint64_t __otx2_io an:1; /* AN supported or not */
+ uint64_t __otx2_io fec:2; /* FEC type if enabled else 0 */
+ uint64_t __otx2_io port:8;
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+struct cgx_ptp_rx_info_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io ptp_en;
+};
+
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ uint8_t __otx2_io rx_pause;
+ uint8_t __otx2_io tx_pause;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ uint16_t __otx2_io sff_id;
+ uint8_t __otx2_io buf[SFP_EEPROM_SIZE];
+ uint64_t __otx2_io reserved;
+};
+
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+};
+
+struct phy_s {
+ uint64_t __otx2_io can_change_mod_type : 1;
+ uint64_t __otx2_io mod_type : 1;
+};
+
+struct cgx_lmac_fwdata_s {
+ uint16_t __otx2_io rw_valid;
+ uint64_t __otx2_io supported_fec;
+ uint64_t __otx2_io supported_an;
+ uint64_t __otx2_io supported_link_modes;
+ /* Only applicable if AN is supported */
+ uint64_t __otx2_io advertised_fec;
+ uint64_t __otx2_io advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1023
+ uint64_t __otx2_io reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int __otx2_io fec;
+};
+
+struct cgx_set_link_state_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io enable;
+};
+
+struct cgx_phy_mod_type {
+ struct mbox_msghdr hdr;
+ int __otx2_io mod;
+};
+
+struct cgx_set_link_mode_args {
+ uint32_t __otx2_io speed;
+ uint8_t __otx2_io duplex;
+ uint8_t __otx2_io an;
+ uint8_t __otx2_io ports;
+ uint64_t __otx2_io mode;
+};
+
+struct cgx_set_link_mode_req {
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int __otx2_io status;
+};
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+#define NPA_AURA_SZ_0 0
+#define NPA_AURA_SZ_128 1
+#define NPA_AURA_SZ_256 2
+#define NPA_AURA_SZ_512 3
+#define NPA_AURA_SZ_1K 4
+#define NPA_AURA_SZ_2K 5
+#define NPA_AURA_SZ_4K 6
+#define NPA_AURA_SZ_8K 7
+#define NPA_AURA_SZ_16K 8
+#define NPA_AURA_SZ_32K 9
+#define NPA_AURA_SZ_64K 10
+#define NPA_AURA_SZ_128K 11
+#define NPA_AURA_SZ_256K 12
+#define NPA_AURA_SZ_512K 13
+#define NPA_AURA_SZ_1M 14
+#define NPA_AURA_SZ_MAX 15
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ int __otx2_io aura_sz; /* No of auras. See NPA_AURA_SZ_* */
+ uint32_t __otx2_io nr_pools; /* No of pools */
+ uint64_t __otx2_io way_mask;
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io stack_pg_ptrs; /* No of ptrs per stack page */
+ uint32_t __otx2_io stack_pg_bytes; /* Size of stack page */
+ uint16_t __otx2_io qints; /* NPA_AF_CONST::QINTS */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io aura_id;
+ uint8_t __otx2_io ctype;
+ uint8_t __otx2_io op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ __otx2_io struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ __otx2_io struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ __otx2_io struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ __otx2_io struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ __otx2_io struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ __otx2_io struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io ctype;
+};
+
+/* NIX mbox message formats */
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
+ NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
+ NIX_AF_ERR_MARK_CFG_FAIL = -417,
+ NIX_AF_ERR_LSO_CFG_FAIL = -418,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint32_t __otx2_io rq_cnt; /* No of receive queues */
+ uint32_t __otx2_io sq_cnt; /* No of send queues */
+ uint32_t __otx2_io cq_cnt; /* No of completion queues */
+ uint8_t __otx2_io xqe_sz;
+ uint16_t __otx2_io rss_sz;
+ uint8_t __otx2_io rss_grps;
+ uint16_t __otx2_io npa_func;
+ /* RVU_DEFAULT_PF_FUNC == default pf_func associated with lf */
+ uint16_t __otx2_io sso_func;
+ uint64_t __otx2_io rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+ uint64_t __otx2_io way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+ uint64_t flags;
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io sqb_size;
+ uint16_t __otx2_io rx_chan_base;
+ uint16_t __otx2_io tx_chan_base;
+ uint8_t __otx2_io rx_chan_cnt; /* Total number of RX channels */
+ uint8_t __otx2_io tx_chan_cnt; /* Total number of TX channels */
+ uint8_t __otx2_io lso_tsov4_idx;
+ uint8_t __otx2_io lso_tsov6_idx;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+ uint8_t __otx2_io lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ uint8_t __otx2_io lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ uint16_t __otx2_io cints; /* NIX_AF_CONST2::CINTS */
+ uint16_t __otx2_io qints; /* NIX_AF_CONST2::QINTS */
+ uint8_t __otx2_io hw_rx_tstamp_en; /*set if rx timestamping enabled */
+ uint8_t __otx2_io cgx_links; /* No. of CGX links present in HW */
+ uint8_t __otx2_io lbk_links; /* No. of LBK links present in HW */
+ uint8_t __otx2_io sdp_links; /* No. of SDP links present in HW */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ uint64_t __otx2_io flags;
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io qidx;
+ uint8_t __otx2_io ctype;
+ uint8_t __otx2_io op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RQ */
+ __otx2_io struct nix_rq_ctx_s rq;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_SQ */
+ __otx2_io struct nix_sq_ctx_s sq;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_CQ */
+ __otx2_io struct nix_cq_ctx_s cq;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RSS */
+ __otx2_io struct nix_rsse_s rss;
+ /* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_MCE */
+ __otx2_io struct nix_rx_mce_s mce;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RQ */
+ __otx2_io struct nix_rq_ctx_s rq_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_SQ */
+ __otx2_io struct nix_sq_ctx_s sq_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_CQ */
+ __otx2_io struct nix_cq_ctx_s cq_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RSS */
+ __otx2_io struct nix_rsse_s rss_mask;
+ /* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_MCE */
+ __otx2_io struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ __otx2_io struct nix_rq_ctx_s rq;
+ __otx2_io struct nix_sq_ctx_s sq;
+ __otx2_io struct nix_cq_ctx_s cq;
+ __otx2_io struct nix_rsse_s rss;
+ __otx2_io struct nix_rx_mce_s mce;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ uint16_t __otx2_io schq_contig[NIX_TXSCH_LVL_CNT]; /* Contig. queues */
+ uint16_t __otx2_io schq[NIX_TXSCH_LVL_CNT]; /* Non-Contig. queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ uint16_t __otx2_io schq_contig[NIX_TXSCH_LVL_CNT]; /* Contig. queues */
+ uint16_t __otx2_io schq[NIX_TXSCH_LVL_CNT]; /* Non-Contig. queues */
+ /* Scheduler queue list allocated at each level */
+ uint16_t __otx2_io
+ schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ uint16_t __otx2_io schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ /* Traffic aggregation scheduler level */
+ uint8_t __otx2_io aggr_level;
+ /* Aggregation lvl's RR_PRIO config */
+ uint8_t __otx2_io aggr_lvl_rr_prio;
+ /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
+ uint8_t __otx2_io link_cfg_lvl;
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ uint16_t __otx2_io flags;
+ /* Scheduler queue level to be freed */
+ uint16_t __otx2_io schq_lvl;
+ /* List of scheduler queues to be freed */
+ uint16_t __otx2_io schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ uint8_t __otx2_io read;
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ uint8_t __otx2_io num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ uint64_t __otx2_io reg[MAX_REGS_PER_MBOX_MSG];
+ uint64_t __otx2_io regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ uint64_t __otx2_io regval_mask[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
+ uint8_t __otx2_io vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ uint8_t __otx2_io cfg_type;
+ union {
+ /* Valid when cfg_type is '0' */
+ struct {
+ uint64_t __otx2_io vtag0;
+ uint64_t __otx2_io vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ uint8_t __otx2_io cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ uint8_t __otx2_io cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int __otx2_io vtag0_idx;
+ int __otx2_io vtag1_idx;
+
+ /* Free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* Free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ uint8_t __otx2_io free_vtag0 :1;
+ /* Free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ uint8_t __otx2_io free_vtag1 :1;
+ } tx;
+
+ /* Valid when cfg_type is '1' */
+ struct {
+ /* Rx vtag type index, valid values are in 0..7 range */
+ uint8_t __otx2_io vtag_type;
+ /* Rx vtag strip */
+ uint8_t __otx2_io strip_vtag :1;
+ /* Rx vtag capture */
+ uint8_t __otx2_io capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+ int __otx2_io vtag0_idx;
+ int __otx2_io vtag1_idx;
+};
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int __otx2_io mcam_index; /* MCAM entry index to modify */
+ uint32_t __otx2_io flowkey_cfg; /* Flowkey types selected */
+#define FLOW_KEY_TYPE_PORT BIT(0)
+#define FLOW_KEY_TYPE_IPV4 BIT(1)
+#define FLOW_KEY_TYPE_IPV6 BIT(2)
+#define FLOW_KEY_TYPE_TCP BIT(3)
+#define FLOW_KEY_TYPE_UDP BIT(4)
+#define FLOW_KEY_TYPE_SCTP BIT(5)
+#define FLOW_KEY_TYPE_NVGRE BIT(6)
+#define FLOW_KEY_TYPE_VXLAN BIT(7)
+#define FLOW_KEY_TYPE_GENEVE BIT(8)
+#define FLOW_KEY_TYPE_ETH_DMAC BIT(9)
+#define FLOW_KEY_TYPE_IPV6_EXT BIT(10)
+#define FLOW_KEY_TYPE_GTPU BIT(11)
+#define FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
+#define FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
+#define FLOW_KEY_TYPE_INNR_TCP BIT(14)
+#define FLOW_KEY_TYPE_INNR_UDP BIT(15)
+#define FLOW_KEY_TYPE_INNR_SCTP BIT(16)
+#define FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define FLOW_KEY_TYPE_CH_LEN_90B BIT(18)
+#define FLOW_KEY_TYPE_L4_DST BIT(28)
+#define FLOW_KEY_TYPE_L4_SRC BIT(29)
+#define FLOW_KEY_TYPE_L3_DST BIT(30)
+#define FLOW_KEY_TYPE_L3_SRC BIT(31)
+ uint8_t __otx2_io group; /* RSS context or group */
+};
+
+struct nix_rss_flowkey_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io alg_idx; /* Selected algo index */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+struct nix_mark_format_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io offset;
+ uint8_t __otx2_io y_mask;
+ uint8_t __otx2_io y_val;
+ uint8_t __otx2_io r_mask;
+ uint8_t __otx2_io r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io mark_format_idx;
+};
+
+struct nix_lso_format_cfg {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io field_mask;
+ uint64_t __otx2_io fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io lso_format_idx;
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+ uint16_t __otx2_io mode;
+};
+
+struct nix_rx_cfg {
+ struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY BIT(0)
+#define NIX_RX_OL4_VERIFY BIT(1)
+ uint8_t __otx2_io len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
+ uint8_t __otx2_io csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io update_smq; /* Update SMQ's min/max lens */
+ uint8_t __otx2_io update_minlen; /* Set minlen also */
+ uint8_t __otx2_io sdp_link; /* Set SDP RX link */
+ uint16_t __otx2_io maxlen;
+ uint16_t __otx2_io minlen;
+};
+
+struct nix_set_vlan_tpid {
+ struct mbox_msghdr hdr;
+#define NIX_VLAN_TYPE_INNER 0
+#define NIX_VLAN_TYPE_OUTER 1
+ uint8_t __otx2_io vlan_type;
+ uint16_t __otx2_io tpid;
+};
+
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io chan_base; /* Starting channel number */
+ uint8_t __otx2_io chan_cnt; /* Number of channels */
+ uint8_t __otx2_io bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ /* Channel and bpid mapping */
+ uint16_t __otx2_io chan_bpid[NIX_MAX_CHAN];
+ /* Number of channel for which bpids are assigned */
+ uint8_t __otx2_io chan_cnt;
+};
+
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io cpt_credit;
+ struct {
+ uint8_t __otx2_io egrp;
+ uint8_t __otx2_io opcode;
+ } gen_cfg;
+ struct {
+ uint16_t __otx2_io cpt_pf_func;
+ uint8_t __otx2_io cpt_slot;
+ } inst_qsel;
+ uint8_t __otx2_io enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io sa_base_addr;
+ struct {
+ uint32_t __otx2_io tag_const;
+ uint16_t __otx2_io lenm1_max;
+ uint8_t __otx2_io sa_pow2_size;
+ uint8_t __otx2_io tt;
+ } ipsec_cfg0;
+ struct {
+ uint32_t __otx2_io sa_idx_max;
+ uint8_t __otx2_io sa_idx_w;
+ } ipsec_cfg1;
+ uint8_t __otx2_io enable;
+};
+
+/* SSO mailbox error codes
+ * Range 501 - 600.
+ */
+enum sso_af_status {
+ SSO_AF_ERR_PARAM = -501,
+ SSO_AF_ERR_LF_INVALID = -502,
+ SSO_AF_ERR_AF_LF_ALLOC = -503,
+ SSO_AF_ERR_GRP_EBUSY = -504,
+ SSO_AF_INVAL_NPA_PF_FUNC = -505,
+};
+
+struct sso_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hwgrps;
+};
+
+struct sso_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io xaq_buf_size;
+ uint32_t __otx2_io xaq_wq_entries;
+ uint32_t __otx2_io in_unit_entries;
+ uint16_t __otx2_io hwgrps;
+};
+
+struct sso_lf_free_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hwgrps;
+};
+
+/* SSOW mailbox error codes
+ * Range 601 - 700.
+ */
+enum ssow_af_status {
+ SSOW_AF_ERR_PARAM = -601,
+ SSOW_AF_ERR_LF_INVALID = -602,
+ SSOW_AF_ERR_AF_LF_ALLOC = -603,
+};
+
+struct ssow_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hws;
+};
+
+struct ssow_lf_free_req {
+ struct mbox_msghdr hdr;
+ int __otx2_io node;
+ uint16_t __otx2_io hws;
+};
+
+struct sso_hw_setconfig {
+ struct mbox_msghdr hdr;
+ uint32_t __otx2_io npa_aura_id;
+ uint16_t __otx2_io npa_pf_func;
+ uint16_t __otx2_io hwgrps;
+};
+
+struct sso_info_req {
+ struct mbox_msghdr hdr;
+ union {
+ uint16_t __otx2_io grp;
+ uint16_t __otx2_io hws;
+ };
+};
+
+struct sso_grp_priority {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io grp;
+ uint8_t __otx2_io priority;
+ uint8_t __otx2_io affinity;
+ uint8_t __otx2_io weight;
+};
+
+struct sso_grp_qos_cfg {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io grp;
+ uint32_t __otx2_io xaq_limit;
+ uint16_t __otx2_io taq_thr;
+ uint16_t __otx2_io iaq_thr;
+};
+
+struct sso_grp_stats {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io grp;
+ uint64_t __otx2_io ws_pc;
+ uint64_t __otx2_io ext_pc;
+ uint64_t __otx2_io wa_pc;
+ uint64_t __otx2_io ts_pc;
+ uint64_t __otx2_io ds_pc;
+ uint64_t __otx2_io dq_pc;
+ uint64_t __otx2_io aw_status;
+ uint64_t __otx2_io page_cnt;
+};
+
+struct sso_hws_stats {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io hws;
+ uint64_t __otx2_io arbitration;
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io reg_offset;
+ uint64_t __otx2_io *ret_val;
+ uint64_t __otx2_io val;
+ uint8_t __otx2_io is_write;
+};
+
+struct cpt_set_crypto_grp_req_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io crypto_eng_grp;
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io nix_pf_func;
+ uint16_t __otx2_io sso_pf_func;
+};
+
+struct cpt_lf_alloc_rsp_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io crypto_eng_grp;
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io enable;
+ uint8_t __otx2_io slot;
+ uint8_t __otx2_io dir;
+ uint16_t __otx2_io sso_pf_func; /* Inbound path SSO_PF_FUNC */
+ uint16_t __otx2_io nix_pf_func; /* Outbound path NIX_PF_FUNC */
+};
+
+struct cpt_rx_inline_lf_cfg_msg {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io sso_pf_func;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+ NPC_AF_ERR_HIGIG_CONFIG_FAIL = -705,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ uint8_t __otx2_io contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ uint8_t __otx2_io priority; /* Lower or higher w.r.t ref_entry */
+ uint16_t __otx2_io ref_entry;
+ uint16_t __otx2_io count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ /* Entry alloc'ed or start index if contiguous.
+ * Invalid in case of non-contiguous.
+ */
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io count; /* Number of entries allocated */
+ uint16_t __otx2_io free_count; /* Number of entries available */
+ uint16_t __otx2_io entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry; /* Entry index to be freed */
+ uint8_t __otx2_io all; /* Free all entries alloc'ed to this PFVF */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max key width */
+ uint64_t __otx2_io kw[NPC_MAX_KWS_IN_KEY];
+ uint64_t __otx2_io kw_mask[NPC_MAX_KWS_IN_KEY];
+ uint64_t __otx2_io action;
+ uint64_t __otx2_io vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ uint16_t __otx2_io entry; /* MCAM entry to write this match key */
+ uint16_t __otx2_io cntr; /* Counter for this MCAM entry */
+ uint8_t __otx2_io intf; /* Rx or Tx interface */
+ uint8_t __otx2_io enable_entry;/* Enable this MCAM entry ? */
+ uint8_t __otx2_io set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ uint16_t __otx2_io curr_entry[NPC_MCAM_MAX_SHIFTS];
+ uint16_t __otx2_io new_entry[NPC_MCAM_MAX_SHIFTS];
+ uint16_t __otx2_io shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ /* Index in 'curr_entry', not entry itself */
+ uint16_t __otx2_io failed_entry_idx;
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ uint16_t __otx2_io count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ /* Counter alloc'ed or start idx if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ uint16_t __otx2_io cntr;
+ uint16_t __otx2_io count; /* Number of counters allocated */
+ uint16_t __otx2_io cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ /* valid only while fetching counter's stats */
+ uint64_t __otx2_io stat;
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io cntr;
+ uint16_t __otx2_io entry; /* Entry and counter to be unmapped */
+ uint8_t __otx2_io all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ uint16_t __otx2_io ref_entry;
+ uint8_t __otx2_io priority; /* Lower or higher w.r.t ref_entry */
+ uint8_t __otx2_io intf; /* Rx or Tx interface */
+ uint8_t __otx2_io enable_entry;/* Enable this MCAM entry ? */
+ uint8_t __otx2_io alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ uint64_t __otx2_io tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ uint64_t __otx2_io kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ uint64_t __otx2_io
+ intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ uint64_t __otx2_io
+ intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+ uint8_t __otx2_io mkex_pfl_name[MKEX_NAME_LEN];
+};
+
+enum header_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_FDSA_VAL,
+ NPC_HEADER_FIELDS_MAX,
+};
+
+struct flow_msg {
+ unsigned char __otx2_io dmac[6];
+ unsigned char __otx2_io smac[6];
+ uint16_t __otx2_io etype;
+ uint16_t __otx2_io vlan_etype;
+ uint16_t __otx2_io vlan_tci;
+ union {
+ uint32_t __otx2_io ip4src;
+ uint32_t __otx2_io ip6src[4];
+ };
+ union {
+ uint32_t __otx2_io ip4dst;
+ uint32_t __otx2_io ip6dst[4];
+ };
+ uint8_t __otx2_io tos;
+ uint8_t __otx2_io ip_ver;
+ uint8_t __otx2_io ip_proto;
+ uint8_t __otx2_io tc;
+ uint16_t __otx2_io sport;
+ uint16_t __otx2_io dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ uint64_t __otx2_io features;
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io channel;
+ uint8_t __otx2_io intf;
+ uint8_t __otx2_io set_cntr;
+ uint8_t __otx2_io default_rule;
+ /* Overwrite(0) or append(1) flow to default rule? */
+ uint8_t __otx2_io append;
+ uint16_t __otx2_io vf;
+ /* action */
+ uint32_t __otx2_io index;
+ uint16_t __otx2_io match_id;
+ uint8_t __otx2_io flow_key_alg;
+ uint8_t __otx2_io op;
+ /* vtag action */
+ uint8_t __otx2_io vtag0_type;
+ uint8_t __otx2_io vtag0_valid;
+ uint8_t __otx2_io vtag1_type;
+ uint8_t __otx2_io vtag1_valid;
+
+ /* vtag tx action */
+ uint16_t __otx2_io vtag0_def;
+ uint8_t __otx2_io vtag0_op;
+ uint16_t __otx2_io vtag1_def;
+ uint8_t __otx2_io vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ /* Negative if no counter else counter number */
+ int __otx2_io counter;
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io entry;
+ uint16_t __otx2_io start;/*Disable range of entries */
+ uint16_t __otx2_io end;
+ uint8_t __otx2_io all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ /* MCAM entry to read */
+ uint16_t __otx2_io entry;
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ uint8_t __otx2_io intf;
+ uint8_t __otx2_io enable;
+};
+
+/* TIM mailbox error codes
+ * Range 801 - 900.
+ */
+enum tim_af_status {
+ TIM_AF_NO_RINGS_LEFT = -801,
+ TIM_AF_INVALID_NPA_PF_FUNC = -802,
+ TIM_AF_INVALID_SSO_PF_FUNC = -803,
+ TIM_AF_RING_STILL_RUNNING = -804,
+ TIM_AF_LF_INVALID = -805,
+ TIM_AF_CSIZE_NOT_ALIGNED = -806,
+ TIM_AF_CSIZE_TOO_SMALL = -807,
+ TIM_AF_CSIZE_TOO_BIG = -808,
+ TIM_AF_INTERVAL_TOO_SMALL = -809,
+ TIM_AF_INVALID_BIG_ENDIAN_VALUE = -810,
+ TIM_AF_INVALID_CLOCK_SOURCE = -811,
+ TIM_AF_GPIO_CLK_SRC_NOT_ENABLED = -812,
+ TIM_AF_INVALID_BSIZE = -813,
+ TIM_AF_INVALID_ENABLE_PERIODIC = -814,
+ TIM_AF_INVALID_ENABLE_DONTFREE = -815,
+ TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,
+ TIM_AF_RING_ALREADY_DISABLED = -817,
+};
+
+enum tim_clk_srcs {
+ TIM_CLK_SRCS_TENNS = 0,
+ TIM_CLK_SRCS_GPIO = 1,
+ TIM_CLK_SRCS_GTI = 2,
+ TIM_CLK_SRCS_PTP = 3,
+ TIM_CLK_SRSC_INVALID,
+};
+
+enum tim_gpio_edge {
+ TIM_GPIO_NO_EDGE = 0,
+ TIM_GPIO_LTOH_TRANS = 1,
+ TIM_GPIO_HTOL_TRANS = 2,
+ TIM_GPIO_BOTH_TRANS = 3,
+ TIM_GPIO_INVALID,
+};
+
+enum ptp_op {
+ PTP_OP_ADJFINE = 0, /* adjfine(req.scaled_ppm); */
+ PTP_OP_GET_CLOCK = 1, /* rsp.clk = get_clock() */
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io op;
+ int64_t __otx2_io scaled_ppm;
+ uint8_t __otx2_io is_pmu;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io clk;
+ uint64_t __otx2_io tsc;
+};
+
+struct get_hw_cap_rsp {
+ struct mbox_msghdr hdr;
+ /* Schq mapping fixed or flexible */
+ uint8_t __otx2_io nix_fixed_txschq_mapping;
+ uint8_t __otx2_io nix_shaping; /* Is shaping and coloring supported */
+};
+
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ uint8_t __otx2_io nix_lf_tx_sync;
+ uint8_t __otx2_io nix_lf_rx_sync;
+ uint8_t __otx2_io npa_lf_sync;
+};
+
+struct tim_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io ring;
+ uint16_t __otx2_io npa_pf_func;
+ uint16_t __otx2_io sso_pf_func;
+};
+
+struct tim_ring_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io ring;
+};
+
+struct tim_config_req {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io ring;
+ uint8_t __otx2_io bigendian;
+ uint8_t __otx2_io clocksource;
+ uint8_t __otx2_io enableperiodic;
+ uint8_t __otx2_io enabledontfreebuffer;
+ uint32_t __otx2_io bucketsize;
+ uint32_t __otx2_io chunksize;
+ uint32_t __otx2_io interval;
+};
+
+struct tim_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io tenns_clk;
+};
+
+struct tim_enable_rsp {
+ struct mbox_msghdr hdr;
+ uint64_t __otx2_io timestarted;
+ uint32_t __otx2_io currentbucket;
+};
+
+__rte_internal
+const char *otx2_mbox_id2name(uint16_t id);
+int otx2_mbox_id2size(uint16_t id);
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
+ int direction, int ndevsi, uint64_t intr_offset);
+void otx2_mbox_fini(struct otx2_mbox *mbox);
+__rte_internal
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+__rte_internal
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo);
+__rte_internal
+int otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg);
+__rte_internal
+int otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
+ uint32_t tmo);
+int otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid);
+__rte_internal
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+
+static inline struct mbox_msghdr *
+otx2_mbox_alloc_msg(struct otx2_mbox *mbox, int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+static inline void
+otx2_mbox_req_init(uint16_t mbox_id, void *msghdr)
+{
+ struct mbox_msghdr *hdr = msghdr;
+
+ hdr->sig = OTX2_MBOX_REQ_SIG;
+ hdr->ver = OTX2_MBOX_VERSION;
+ hdr->id = mbox_id;
+ hdr->pcifunc = 0;
+}
+
+static inline void
+otx2_mbox_rsp_init(uint16_t mbox_id, void *msghdr)
+{
+ struct mbox_msghdr *hdr = msghdr;
+
+ hdr->sig = OTX2_MBOX_RSP_SIG;
+ hdr->rc = -ETIMEDOUT;
+ hdr->id = mbox_id;
+}
+
+static inline bool
+otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ rte_spinlock_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ rte_spinlock_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+
+static inline int
+otx2_mbox_process(struct otx2_mbox *mbox)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp(mbox, 0, NULL);
+}
+
+static inline int
+otx2_mbox_process_msg(struct otx2_mbox *mbox, void **msg)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp(mbox, 0, msg);
+}
+
+static inline int
+otx2_mbox_process_tmo(struct otx2_mbox *mbox, uint32_t tmo)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp_tmo(mbox, 0, NULL, tmo);
+}
+
+static inline int
+otx2_mbox_process_msg_tmo(struct otx2_mbox *mbox, void **msg, uint32_t tmo)
+{
+ otx2_mbox_msg_send(mbox, 0);
+ return otx2_mbox_get_rsp_tmo(mbox, 0, msg, tmo);
+}
+
+int otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pf_func /* out */);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pf_func,
+ uint16_t id);
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static inline struct _req_type \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct otx2_mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ otx2_mbox_dbg("id=0x%x (%s)", \
+ req->hdr.id, otx2_mbox_id2name(req->hdr.id)); \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+/* This is required for copy operations from device memory which do not work on
+ * addresses which are unaligned to 16B. This is because of specific
+ * optimizations to libc memcpy.
+ */
+static inline volatile void *
+otx2_mbox_memcpy(volatile void *d, const volatile void *s, size_t l)
+{
+ const volatile uint8_t *sb;
+ volatile uint8_t *db;
+ size_t i;
+
+ if (!d || !s)
+ return NULL;
+ db = (volatile uint8_t *)d;
+ sb = (const volatile uint8_t *)s;
+ for (i = 0; i < l; i++)
+ db[i] = sb[i];
+ return d;
+}
+
+/* This is required for memory operations from device memory which do not
+ * work on addresses which are unaligned to 16B. This is because of specific
+ * optimizations to libc memset.
+ */
+static inline void
+otx2_mbox_memset(volatile void *d, uint8_t val, size_t l)
+{
+ volatile uint8_t *db;
+ size_t i = 0;
+
+ if (!d || !l)
+ return;
+ db = (volatile uint8_t *)d;
+ for (i = 0; i < l; i++)
+ db[i] = val;
+}
+
+#endif /* __OTX2_MBOX_H__ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c
new file mode 100644
index 000000000..6e9643c38
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.c
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_atomic.h>
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_spinlock.h>
+
+#include "otx2_common.h"
+#include "otx2_sec_idev.h"
+
+static struct otx2_sec_idev_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
+
+/**
+ * @internal
+ * Check if rte_eth_dev is security offload capable otx2_eth_dev
+ */
+uint8_t
+otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF)
+ return 1;
+
+ return 0;
+}
+
+int
+otx2_sec_idev_cfg_init(int port_id)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ int i;
+
+ cfg = &sec_cfg[port_id];
+ cfg->tx_cpt_idx = 0;
+ rte_spinlock_init(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ cfg->tx_cpt[i].qp = NULL;
+ rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
+ }
+
+ return 0;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ int i, ret;
+
+ if (qp == NULL || port_id >= OTX2_MAX_INLINE_PORTS)
+ return -EINVAL;
+
+ cfg = &sec_cfg[port_id];
+
+ /* Find a free slot to save CPT LF */
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp == NULL) {
+ cfg->tx_cpt[i].qp = qp;
+ ret = 0;
+ goto unlock;
+ }
+ }
+
+ ret = -EINVAL;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t port_id;
+ int i, ret;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
+ cfg = &sec_cfg[port_id];
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp != qp)
+ continue;
+
+ /* Don't free if the QP is in use by any sec session */
+ if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
+ ret = -EBUSY;
+ } else {
+ cfg->tx_cpt[i].qp = NULL;
+ ret = 0;
+ }
+
+ goto unlock;
+ }
+
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ }
+
+ return -ENOENT;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t index;
+ int i, ret;
+
+ if (port_id >= OTX2_MAX_INLINE_PORTS || qp == NULL)
+ return -EINVAL;
+
+ cfg = &sec_cfg[port_id];
+
+ rte_spinlock_lock(&cfg->tx_cpt_lock);
+
+ index = cfg->tx_cpt_idx;
+
+ /* Get the next index with valid data */
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[index].qp != NULL)
+ break;
+ index = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
+ }
+
+ if (i >= OTX2_MAX_CPT_QP_PER_PORT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ *qp = cfg->tx_cpt[index].qp;
+ rte_atomic16_inc(&cfg->tx_cpt[index].ref_cnt);
+
+ cfg->tx_cpt_idx = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&cfg->tx_cpt_lock);
+ return ret;
+}
+
+int
+otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp)
+{
+ struct otx2_sec_idev_cfg *cfg;
+ uint16_t port_id;
+ int i;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
+ cfg = &sec_cfg[port_id];
+ for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
+ if (cfg->tx_cpt[i].qp == qp) {
+ rte_atomic16_dec(&cfg->tx_cpt[i].ref_cnt);
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h
new file mode 100644
index 000000000..89cdaf66a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/otx2_sec_idev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_SEC_IDEV_H_
+#define _OTX2_SEC_IDEV_H_
+
+#include <rte_ethdev.h>
+
+#define OTX2_MAX_CPT_QP_PER_PORT 64
+#define OTX2_MAX_INLINE_PORTS 64
+
+struct otx2_cpt_qp;
+
+struct otx2_sec_idev_cfg {
+ struct {
+ struct otx2_cpt_qp *qp;
+ rte_atomic16_t ref_cnt;
+ } tx_cpt[OTX2_MAX_CPT_QP_PER_PORT];
+
+ uint16_t tx_cpt_idx;
+ rte_spinlock_t tx_cpt_lock;
+};
+
+__rte_internal
+uint8_t otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev);
+
+__rte_internal
+int otx2_sec_idev_cfg_init(int port_id);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp);
+
+__rte_internal
+int otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp);
+
+#endif /* _OTX2_SEC_IDEV_H_ */
diff --git a/src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map b/src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map
new file mode 100644
index 000000000..d26bd7117
--- /dev/null
+++ b/src/spdk/dpdk/drivers/common/octeontx2/rte_common_octeontx2_version.map
@@ -0,0 +1,45 @@
+DPDK_20.0 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ otx2_dev_active_vfs;
+ otx2_dev_fini;
+ otx2_dev_priv_init;
+ otx2_disable_irqs;
+ otx2_eth_dev_is_sec_capable;
+ otx2_intra_dev_get_cfg;
+ otx2_logtype_base;
+ otx2_logtype_dpi;
+ otx2_logtype_ep;
+ otx2_logtype_mbox;
+ otx2_logtype_nix;
+ otx2_logtype_npa;
+ otx2_logtype_npc;
+ otx2_logtype_sso;
+ otx2_logtype_tim;
+ otx2_logtype_tm;
+ otx2_mbox_alloc_msg_rsp;
+ otx2_mbox_get_rsp;
+ otx2_mbox_get_rsp_tmo;
+ otx2_mbox_id2name;
+ otx2_mbox_msg_send;
+ otx2_mbox_wait_for_rsp;
+ otx2_npa_lf_active;
+ otx2_npa_lf_obj_get;
+ otx2_npa_lf_obj_ref;
+ otx2_npa_pf_func_get;
+ otx2_npa_set_defaults;
+ otx2_parse_common_devargs;
+ otx2_register_irq;
+ otx2_sec_idev_cfg_init;
+ otx2_sec_idev_tx_cpt_qp_add;
+ otx2_sec_idev_tx_cpt_qp_get;
+ otx2_sec_idev_tx_cpt_qp_put;
+ otx2_sec_idev_tx_cpt_qp_remove;
+ otx2_sso_pf_func_get;
+ otx2_sso_pf_func_set;
+ otx2_unregister_irq;
+};