summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/event/octeontx2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/event/octeontx2
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/event/octeontx2')
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/Makefile47
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/meson.build29
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.c1825
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.h400
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_adptr.c492
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_irq.c272
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c1511
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_stats.h286
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.c783
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.h253
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.c191
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.h583
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.c371
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.h310
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.c343
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.h101
-rw-r--r--src/spdk/dpdk/drivers/event/octeontx2/rte_pmd_octeontx2_event_version.map3
17 files changed, 7800 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/Makefile b/src/spdk/dpdk/drivers/event/octeontx2/Makefile
new file mode 100644
index 000000000..9d67b00c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/Makefile
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx2_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/event/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2
+CFLAGS += -O3
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_pmd_octeontx2_event_version.map
+
+#
+# all source are stored in SRCS-y
+#
+
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_worker_dual.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_tim_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_adptr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_tim_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_selftest.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_irq.c
+
+LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci -lrte_kvargs
+LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf -lrte_ethdev
+LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/meson.build b/src/spdk/dpdk/drivers/event/octeontx2/meson.build
new file mode 100644
index 000000000..0ade51cec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+sources = files('otx2_worker.c',
+ 'otx2_worker_dual.c',
+ 'otx2_evdev.c',
+ 'otx2_evdev_adptr.c',
+ 'otx2_evdev_irq.c',
+ 'otx2_evdev_selftest.c',
+ 'otx2_tim_evdev.c',
+ 'otx2_tim_worker.c'
+ )
+
+extra_flags = []
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2']
+
+includes += include_directories('../../crypto/octeontx2')
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.c
new file mode 100644
index 000000000..630073de5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.c
@@ -0,0 +1,1825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_kvargs.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_pci.h>
+
+#include "otx2_evdev_stats.h"
+#include "otx2_evdev.h"
+#include "otx2_irq.h"
+#include "otx2_tim_evdev.h"
+
+static inline int
+sso_get_msix_offsets(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msix_offset_rsp *msix_rsp;
+ int i, rc;
+
+ /* Get SSO and SSOW MSIX vector offsets */
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
+
+ for (i = 0; i < nb_ports; i++)
+ dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
+
+ for (i = 0; i < dev->nb_event_queues; i++)
+ dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
+
+ return rc;
+}
+
+void
+sso_fastpath_fns_set(struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ /* Single WS modes */
+ const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_deq_timeout_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_deq_seg_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_deq_seg_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_deq_seg_timeout_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_deq_seg_timeout_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+
+ /* Dual WS modes */
+ const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_dual_deq_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_deq_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_deq_timeout_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_deq_timeout_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_deq_seg_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_t
+ ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_deq_seg_timeout_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ /* Tx modes */
+ const event_tx_adapter_enqueue
+ ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_tx_adptr_enq_ ## name,
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+ };
+
+ const event_tx_adapter_enqueue
+ ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_tx_adptr_enq_seg_ ## name,
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+ };
+
+ const event_tx_adapter_enqueue
+ ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_tx_adptr_enq_ ## name,
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+ };
+
+ const event_tx_adapter_enqueue
+ ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = \
+ otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+ };
+
+ event_dev->enqueue = otx2_ssogws_enq;
+ event_dev->enqueue_burst = otx2_ssogws_enq_burst;
+ event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst;
+ event_dev->enqueue_forward_burst = otx2_ssogws_enq_fwd_burst;
+ if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+ event_dev->dequeue = ssogws_deq_seg
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = ssogws_deq_seg_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = ssogws_deq_seg_timeout
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst =
+ ssogws_deq_seg_timeout_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ }
+ } else {
+ event_dev->dequeue = ssogws_deq
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = ssogws_deq_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = ssogws_deq_timeout
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst =
+ ssogws_deq_timeout_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ }
+ }
+
+ if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
+ /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
+ event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
+ } else {
+ event_dev->txa_enqueue = ssogws_tx_adptr_enq
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
+ }
+
+ if (dev->dual_ws) {
+ event_dev->enqueue = otx2_ssogws_dual_enq;
+ event_dev->enqueue_burst = otx2_ssogws_dual_enq_burst;
+ event_dev->enqueue_new_burst =
+ otx2_ssogws_dual_enq_new_burst;
+ event_dev->enqueue_forward_burst =
+ otx2_ssogws_dual_enq_fwd_burst;
+
+ if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+ event_dev->dequeue = ssogws_dual_deq_seg
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue =
+ ssogws_dual_deq_seg_timeout
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst =
+ ssogws_dual_deq_seg_timeout_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ }
+ } else {
+ event_dev->dequeue = ssogws_dual_deq
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = ssogws_dual_deq_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue =
+ ssogws_dual_deq_timeout
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst =
+ ssogws_dual_deq_timeout_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ }
+ }
+
+ if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
+ /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
+ event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_SECURITY_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
+ } else {
+ event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_SECURITY_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(dev->tx_offloads &
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
+ }
+ }
+
+ event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
+ rte_mb();
+}
+
+static void
+otx2_sso_info_get(struct rte_eventdev *event_dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
+ dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
+ dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
+ dev_info->max_event_queues = dev->max_event_queues;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 1;
+ dev_info->max_event_ports = dev->max_event_ports;
+ dev_info->max_event_port_dequeue_depth = 1;
+ dev_info->max_event_port_enqueue_depth = 1;
+ dev_info->max_num_events = dev->max_num_events;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+}
+
+static void
+sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
+{
+ uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
+ uint64_t val;
+
+ val = queue;
+ val |= 0ULL << 12; /* SET 0 */
+ val |= 0x8000800080000000; /* Dont modify rest of the masks */
+ val |= (uint64_t)enable << 14; /* Enable/Disable Membership. */
+
+ otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
+}
+
+static int
+otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint8_t port_id = 0;
+ uint16_t link;
+
+ RTE_SET_USED(priorities);
+ for (link = 0; link < nb_links; link++) {
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], queues[link], true);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], queues[link], true);
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify(ws, queues[link], true);
+ }
+ }
+ sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
+
+ return (int)nb_links;
+}
+
+static int
+otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint8_t port_id = 0;
+ uint16_t unlink;
+
+ for (unlink = 0; unlink < nb_unlinks; unlink++) {
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], queues[unlink],
+ false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], queues[unlink],
+ false);
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify(ws, queues[unlink], false);
+ }
+ }
+ sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
+
+ return (int)nb_unlinks;
+}
+
+static int
+sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
+ uint16_t nb_lf, uint8_t attach)
+{
+ if (attach) {
+ struct rsrc_attach_req *req;
+
+ req = otx2_mbox_alloc_msg_attach_resources(mbox);
+ switch (type) {
+ case SSO_LF_GGRP:
+ req->sso = nb_lf;
+ break;
+ case SSO_LF_GWS:
+ req->ssow = nb_lf;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req->modify = true;
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+ } else {
+ struct rsrc_detach_req *req;
+
+ req = otx2_mbox_alloc_msg_detach_resources(mbox);
+ switch (type) {
+ case SSO_LF_GGRP:
+ req->sso = true;
+ break;
+ case SSO_LF_GWS:
+ req->ssow = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req->partial = true;
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
+ enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
+{
+ void *rsp;
+ int rc;
+
+ if (alloc) {
+ switch (type) {
+ case SSO_LF_GGRP:
+ {
+ struct sso_lf_alloc_req *req_ggrp;
+ req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
+ req_ggrp->hwgrps = nb_lf;
+ }
+ break;
+ case SSO_LF_GWS:
+ {
+ struct ssow_lf_alloc_req *req_hws;
+ req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
+ req_hws->hws = nb_lf;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (type) {
+ case SSO_LF_GGRP:
+ {
+ struct sso_lf_free_req *req_ggrp;
+ req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
+ req_ggrp->hwgrps = nb_lf;
+ }
+ break;
+ case SSO_LF_GWS:
+ {
+ struct ssow_lf_free_req *req_hws;
+ req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
+ req_hws->hws = nb_lf;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
+ if (rc < 0)
+ return rc;
+
+ if (alloc && type == SSO_LF_GGRP) {
+ struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
+
+ dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
+ dev->xae_waes = rsp_ggrp->xaq_wq_entries;
+ dev->iue = rsp_ggrp->in_unit_entries;
+ }
+
+ return 0;
+}
+
+static void
+otx2_sso_port_release(void *port)
+{
+ rte_free(port);
+}
+
+static void
+otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
+{
+ RTE_SET_USED(event_dev);
+ RTE_SET_USED(queue_id);
+}
+
+static void
+sso_clr_links(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ int i, j;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws;
+
+ ws = event_dev->data->ports[i];
+ for (j = 0; j < dev->nb_event_queues; j++) {
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], j, false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], j, false);
+ }
+ } else {
+ struct otx2_ssogws *ws;
+
+ ws = event_dev->data->ports[i];
+ for (j = 0; j < dev->nb_event_queues; j++)
+ sso_port_link_modify(ws, j, false);
+ }
+ }
+}
+
+static void
+sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
+{
+ ws->tag_op = base + SSOW_LF_GWS_TAG;
+ ws->wqp_op = base + SSOW_LF_GWS_WQP;
+ ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK;
+ ws->swtp_op = base + SSOW_LF_GWS_SWTP;
+ ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
+ ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
+}
+
+static int
+sso_configure_dual_ports(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ uint8_t vws = 0;
+ uint8_t nb_lf;
+ int i, rc;
+
+ otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
+
+ nb_lf = dev->nb_event_ports * 2;
+ /* Ask AF to attach required LFs. */
+ rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
+ if (rc < 0) {
+ otx2_err("Failed to attach SSO GWS LF");
+ return -ENODEV;
+ }
+
+ if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
+ sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
+ otx2_err("Failed to init SSO GWS LF");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ struct otx2_ssogws_dual *ws;
+ uintptr_t base;
+
+ /* Free memory prior to re-allocation if needed */
+ if (event_dev->data->ports[i] != NULL) {
+ ws = event_dev->data->ports[i];
+ rte_free(ws);
+ ws = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("otx2_sso_ws",
+ sizeof(struct otx2_ssogws_dual),
+ RTE_CACHE_LINE_SIZE,
+ event_dev->data->socket_id);
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d", i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ ws->port = i;
+ base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
+ sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
+ vws++;
+
+ base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
+ sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
+ vws++;
+
+ event_dev->data->ports[i] = ws;
+ }
+
+ if (rc < 0) {
+ sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
+ sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
+ }
+
+ return rc;
+}
+
+static int
+sso_configure_ports(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ uint8_t nb_lf;
+ int i, rc;
+
+ otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
+
+ nb_lf = dev->nb_event_ports;
+ /* Ask AF to attach required LFs. */
+ rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
+ if (rc < 0) {
+ otx2_err("Failed to attach SSO GWS LF");
+ return -ENODEV;
+ }
+
+ if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
+ sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
+ otx2_err("Failed to init SSO GWS LF");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < nb_lf; i++) {
+ struct otx2_ssogws *ws;
+ uintptr_t base;
+
+ /* Free memory prior to re-allocation if needed */
+ if (event_dev->data->ports[i] != NULL) {
+ ws = event_dev->data->ports[i];
+ rte_free(ws);
+ ws = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("otx2_sso_ws",
+ sizeof(struct otx2_ssogws),
+ RTE_CACHE_LINE_SIZE,
+ event_dev->data->socket_id);
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d", i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ ws->port = i;
+ base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
+ sso_set_port_ops(ws, base);
+
+ event_dev->data->ports[i] = ws;
+ }
+
+ if (rc < 0) {
+ sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
+ sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
+ }
+
+ return rc;
+}
+
+static int
+sso_configure_queues(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ uint8_t nb_lf;
+ int rc;
+
+ otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
+
+ nb_lf = dev->nb_event_queues;
+ /* Ask AF to attach required LFs. */
+ rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
+ if (rc < 0) {
+ otx2_err("Failed to attach SSO GGRP LF");
+ return -ENODEV;
+ }
+
+ if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
+ sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
+ otx2_err("Failed to init SSO GGRP LF");
+ return -ENODEV;
+ }
+
+ return rc;
+}
+
+static int
+sso_xaq_allocate(struct otx2_sso_evdev *dev)
+{
+ const struct rte_memzone *mz;
+ struct npa_aura_s *aura;
+ static int reconfig_cnt;
+ char pool_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t xaq_cnt;
+ int rc;
+
+ if (dev->xaq_pool)
+ rte_mempool_free(dev->xaq_pool);
+
+ /*
+ * Allocate memory for Add work backpressure.
+ */
+ mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
+ if (mz == NULL)
+ mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
+ OTX2_ALIGN +
+ sizeof(struct npa_aura_s),
+ rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG,
+ OTX2_ALIGN);
+ if (mz == NULL) {
+ otx2_err("Failed to allocate mem for fcmem");
+ return -ENOMEM;
+ }
+
+ dev->fc_iova = mz->iova;
+ dev->fc_mem = mz->addr;
+
+ aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
+ memset(aura, 0, sizeof(struct npa_aura_s));
+
+ aura->fc_ena = 1;
+ aura->fc_addr = dev->fc_iova;
+ aura->fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Taken from HRM 14.3.3(4) */
+ xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
+ if (dev->xae_cnt)
+ xaq_cnt += dev->xae_cnt / dev->xae_waes;
+ else if (dev->adptr_xae_cnt)
+ xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
+ (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
+ else
+ xaq_cnt += (dev->iue / dev->xae_waes) +
+ (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
+
+ otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
+ /* Setup XAQ based on number of nb queues. */
+ snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
+ dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
+ xaq_cnt, dev->xaq_buf_size, 0, 0,
+ rte_socket_id(), 0);
+
+ if (dev->xaq_pool == NULL) {
+ otx2_err("Unable to create empty mempool.");
+ rte_memzone_free(mz);
+ return -ENOMEM;
+ }
+
+ rc = rte_mempool_set_ops_byname(dev->xaq_pool,
+ rte_mbuf_platform_mempool_ops(), aura);
+ if (rc != 0) {
+ otx2_err("Unable to set xaqpool ops.");
+ goto alloc_fail;
+ }
+
+ rc = rte_mempool_populate_default(dev->xaq_pool);
+ if (rc < 0) {
+ otx2_err("Unable to set populate xaqpool.");
+ goto alloc_fail;
+ }
+ reconfig_cnt++;
+ /* When SW does addwork (enqueue) check if there is space in XAQ by
+ * comparing fc_addr above against the xaq_lmt calculated below.
+ * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
+ * to request XAQ to cache them even before enqueue is called.
+ */
+ dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
+ dev->nb_event_queues);
+ dev->nb_xaq_cfg = xaq_cnt;
+
+ return 0;
+alloc_fail:
+ rte_mempool_free(dev->xaq_pool);
+ rte_memzone_free(mz);
+ return rc;
+}
+
+static int
+sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct sso_hw_setconfig *req;
+
+ otx2_sso_dbg("Configuring XAQ for GGRPs");
+ req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
+ req->npa_pf_func = otx2_npa_pf_func_get();
+ req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
+ req->hwgrps = dev->nb_event_queues;
+
+ return otx2_mbox_process(mbox);
+}
+
+static void
+sso_lf_teardown(struct otx2_sso_evdev *dev,
+ enum otx2_sso_lf_type lf_type)
+{
+ uint8_t nb_lf;
+
+ switch (lf_type) {
+ case SSO_LF_GGRP:
+ nb_lf = dev->nb_event_queues;
+ break;
+ case SSO_LF_GWS:
+ nb_lf = dev->nb_event_ports;
+ nb_lf *= dev->dual_ws ? 2 : 1;
+ break;
+ default:
+ return;
+ }
+
+ sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
+ sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
+}
+
+static int
+otx2_sso_configure(const struct rte_eventdev *event_dev)
+{
+ struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint32_t deq_tmo_ns;
+ int rc;
+
+ sso_func_trace();
+ deq_tmo_ns = conf->dequeue_timeout_ns;
+
+ if (deq_tmo_ns == 0)
+ deq_tmo_ns = dev->min_dequeue_timeout_ns;
+
+ if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
+ deq_tmo_ns > dev->max_dequeue_timeout_ns) {
+ otx2_err("Unsupported dequeue timeout requested");
+ return -EINVAL;
+ }
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
+ dev->is_timeout_deq = 1;
+
+ dev->deq_tmo_ns = deq_tmo_ns;
+
+ if (conf->nb_event_ports > dev->max_event_ports ||
+ conf->nb_event_queues > dev->max_event_queues) {
+ otx2_err("Unsupported event queues/ports requested");
+ return -EINVAL;
+ }
+
+ if (conf->nb_event_port_dequeue_depth > 1) {
+ otx2_err("Unsupported event port deq depth requested");
+ return -EINVAL;
+ }
+
+ if (conf->nb_event_port_enqueue_depth > 1) {
+ otx2_err("Unsupported event port enq depth requested");
+ return -EINVAL;
+ }
+
+ if (dev->configured)
+ sso_unregister_irqs(event_dev);
+
+ if (dev->nb_event_queues) {
+ /* Finit any previous queues. */
+ sso_lf_teardown(dev, SSO_LF_GGRP);
+ }
+ if (dev->nb_event_ports) {
+ /* Finit any previous ports. */
+ sso_lf_teardown(dev, SSO_LF_GWS);
+ }
+
+ dev->nb_event_queues = conf->nb_event_queues;
+ dev->nb_event_ports = conf->nb_event_ports;
+
+ if (dev->dual_ws)
+ rc = sso_configure_dual_ports(event_dev);
+ else
+ rc = sso_configure_ports(event_dev);
+
+ if (rc < 0) {
+ otx2_err("Failed to configure event ports");
+ return -ENODEV;
+ }
+
+ if (sso_configure_queues(event_dev) < 0) {
+ otx2_err("Failed to configure event queues");
+ rc = -ENODEV;
+ goto teardown_hws;
+ }
+
+ if (sso_xaq_allocate(dev) < 0) {
+ rc = -ENOMEM;
+ goto teardown_hwggrp;
+ }
+
+ /* Clear any prior port-queue mapping. */
+ sso_clr_links(event_dev);
+ rc = sso_ggrp_alloc_xaq(dev);
+ if (rc < 0) {
+ otx2_err("Failed to alloc xaq to ggrp %d", rc);
+ goto teardown_hwggrp;
+ }
+
+ rc = sso_get_msix_offsets(event_dev);
+ if (rc < 0) {
+ otx2_err("Failed to get msix offsets %d", rc);
+ goto teardown_hwggrp;
+ }
+
+ rc = sso_register_irqs(event_dev);
+ if (rc < 0) {
+ otx2_err("Failed to register irq %d", rc);
+ goto teardown_hwggrp;
+ }
+
+ dev->configured = 1;
+ rte_mb();
+
+ return 0;
+teardown_hwggrp:
+ sso_lf_teardown(dev, SSO_LF_GGRP);
+teardown_hws:
+ sso_lf_teardown(dev, SSO_LF_GWS);
+ dev->nb_event_queues = 0;
+ dev->nb_event_ports = 0;
+ dev->configured = 0;
+ return rc;
+}
+
+static void
+otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(event_dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static int
+otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct sso_grp_priority *req;
+ int rc;
+
+ sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
+
+ req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
+ req->grp = queue_id;
+ req->weight = 0xFF;
+ req->affinity = 0xFF;
+ /* Normalize <0-255> to <0-7> */
+ req->priority = queue_conf->priority / 32;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to set priority queue=%d", queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void
+otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+
+ RTE_SET_USED(port_id);
+ port_conf->new_event_threshold = dev->max_num_events;
+ port_conf->dequeue_depth = 1;
+ port_conf->enqueue_depth = 1;
+}
+
+static int
+otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
+ uint64_t val;
+ uint16_t q;
+
+ sso_func_trace("Port=%d", port_id);
+ RTE_SET_USED(port_conf);
+
+ if (event_dev->data->ports[port_id] == NULL) {
+ otx2_err("Invalid port Id %d", port_id);
+ return -EINVAL;
+ }
+
+ for (q = 0; q < dev->nb_event_queues; q++) {
+ grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
+ if (grps_base[q] == 0) {
+ otx2_err("Failed to get grp[%d] base addr", q);
+ return -EINVAL;
+ }
+ }
+
+ /* Set get_work timeout for HWS */
+ val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
+
+ rte_memcpy(ws->grps_base, grps_base,
+ sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+ ws->tstamp = dev->tstamp;
+ otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
+ ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
+ otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
+ ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
+ } else {
+ struct otx2_ssogws *ws = event_dev->data->ports[port_id];
+ uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
+
+ rte_memcpy(ws->grps_base, grps_base,
+ sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+ ws->tstamp = dev->tstamp;
+ otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
+ }
+
+ otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
+
+ return 0;
+}
+
+static int
+otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
+ uint64_t *tmo_ticks)
+{
+ RTE_SET_USED(event_dev);
+ *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
+
+ return 0;
+}
+
+static void
+ssogws_dump(struct otx2_ssogws *ws, FILE *f)
+{
+ uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
+
+ fprintf(f, "SSOW_LF_GWS Base addr 0x%" PRIx64 "\n", (uint64_t)base);
+ fprintf(f, "SSOW_LF_GWS_LINKS 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_LINKS));
+ fprintf(f, "SSOW_LF_GWS_PENDWQP 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_PENDWQP));
+ fprintf(f, "SSOW_LF_GWS_PENDSTATE 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
+ fprintf(f, "SSOW_LF_GWS_NW_TIM 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_NW_TIM));
+ fprintf(f, "SSOW_LF_GWS_TAG 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_TAG));
+ fprintf(f, "SSOW_LF_GWS_WQP 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_TAG));
+ fprintf(f, "SSOW_LF_GWS_SWTP 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_SWTP));
+ fprintf(f, "SSOW_LF_GWS_PENDTAG 0x%" PRIx64 "\n",
+ otx2_read64(base + SSOW_LF_GWS_PENDTAG));
+}
+
+static void
+ssoggrp_dump(uintptr_t base, FILE *f)
+{
+ fprintf(f, "SSO_LF_GGRP Base addr 0x%" PRIx64 "\n", (uint64_t)base);
+ fprintf(f, "SSO_LF_GGRP_QCTL 0x%" PRIx64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_QCTL));
+ fprintf(f, "SSO_LF_GGRP_XAQ_CNT 0x%" PRIx64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
+ fprintf(f, "SSO_LF_GGRP_INT_THR 0x%" PRIx64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_INT_THR));
+ fprintf(f, "SSO_LF_GGRP_INT_CNT 0x%" PRIX64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_INT_CNT));
+ fprintf(f, "SSO_LF_GGRP_AQ_CNT 0x%" PRIX64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
+ fprintf(f, "SSO_LF_GGRP_AQ_THR 0x%" PRIX64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_AQ_THR));
+ fprintf(f, "SSO_LF_GGRP_MISC_CNT 0x%" PRIx64 "\n",
+ otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
+}
+
+static void
+otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint8_t queue;
+ uint8_t port;
+
+ fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
+ "dual_ws" : "single_ws");
+ /* Dump SSOW registers */
+ for (port = 0; port < dev->nb_event_ports; port++) {
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws =
+ event_dev->data->ports[port];
+
+ fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
+ __func__, port, 0);
+ ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
+ fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
+ __func__, port, 1);
+ ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
+ } else {
+ fprintf(f, "[%s]SSO single workslot[%d] dump\n",
+ __func__, port);
+ ssogws_dump(event_dev->data->ports[port], f);
+ }
+ }
+
+ /* Dump SSO registers */
+ for (queue = 0; queue < dev->nb_event_queues; queue++) {
+ fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
+ ssoggrp_dump(ws->grps_base[queue], f);
+ } else {
+ struct otx2_ssogws *ws = event_dev->data->ports[0];
+ ssoggrp_dump(ws->grps_base[queue], f);
+ }
+ }
+}
+
+static void
+otx2_handle_event(void *arg, struct rte_event event)
+{
+ struct rte_eventdev *event_dev = arg;
+
+ if (event_dev->dev_ops->dev_stop_flush != NULL)
+ event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
+ event, event_dev->data->dev_stop_flush_arg);
+}
+
+static void
+sso_qos_cfg(struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct sso_grp_qos_cfg *req;
+ uint16_t i;
+
+ for (i = 0; i < dev->qos_queue_cnt; i++) {
+ uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
+ uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
+ uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
+
+ if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
+ continue;
+
+ req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
+ req->xaq_limit = (dev->nb_xaq_cfg *
+ (xaq_prcnt ? xaq_prcnt : 100)) / 100;
+ req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
+ (iaq_prcnt ? iaq_prcnt : 100)) / 100;
+ req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
+ (taq_prcnt ? taq_prcnt : 100)) / 100;
+ }
+
+ if (dev->qos_queue_cnt)
+ otx2_mbox_process(dev->mbox);
+}
+
+static void
+sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint16_t i;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws;
+
+ ws = event_dev->data->ports[i];
+ ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
+ ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
+ ws->swtag_req = 0;
+ ws->vws = 0;
+ ws->ws_state[0].cur_grp = 0;
+ ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
+ ws->ws_state[1].cur_grp = 0;
+ ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
+ } else {
+ struct otx2_ssogws *ws;
+
+ ws = event_dev->data->ports[i];
+ ssogws_reset(ws);
+ ws->swtag_req = 0;
+ ws->cur_grp = 0;
+ ws->cur_tt = SSO_SYNC_EMPTY;
+ }
+ }
+
+ rte_mb();
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
+ struct otx2_ssogws temp_ws;
+
+ memcpy(&temp_ws, &ws->ws_state[0],
+ sizeof(struct otx2_ssogws_state));
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
+ otx2_handle_event, event_dev);
+ /* Enable/Disable SSO GGRP */
+ otx2_write64(enable, ws->grps_base[i] +
+ SSO_LF_GGRP_QCTL);
+ }
+ ws->ws_state[0].cur_grp = 0;
+ ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
+ } else {
+ struct otx2_ssogws *ws = event_dev->data->ports[0];
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssogws_flush_events(ws, i, ws->grps_base[i],
+ otx2_handle_event, event_dev);
+ /* Enable/Disable SSO GGRP */
+ otx2_write64(enable, ws->grps_base[i] +
+ SSO_LF_GGRP_QCTL);
+ }
+ ws->cur_grp = 0;
+ ws->cur_tt = SSO_SYNC_EMPTY;
+ }
+
+ /* reset SSO GWS cache */
+ otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
+ otx2_mbox_process(dev->mbox);
+}
+
+int
+sso_xae_reconfigure(struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct rte_mempool *prev_xaq_pool;
+ int rc = 0;
+
+ if (event_dev->data->dev_started)
+ sso_cleanup(event_dev, 0);
+
+ prev_xaq_pool = dev->xaq_pool;
+ dev->xaq_pool = NULL;
+ rc = sso_xaq_allocate(dev);
+ if (rc < 0) {
+ otx2_err("Failed to alloc xaq pool %d", rc);
+ rte_mempool_free(prev_xaq_pool);
+ return rc;
+ }
+ rc = sso_ggrp_alloc_xaq(dev);
+ if (rc < 0) {
+ otx2_err("Failed to alloc xaq to ggrp %d", rc);
+ rte_mempool_free(prev_xaq_pool);
+ return rc;
+ }
+
+ rte_mempool_free(prev_xaq_pool);
+ rte_mb();
+ if (event_dev->data->dev_started)
+ sso_cleanup(event_dev, 1);
+
+ return 0;
+}
+
+static int
+otx2_sso_start(struct rte_eventdev *event_dev)
+{
+ sso_func_trace();
+ sso_qos_cfg(event_dev);
+ sso_cleanup(event_dev, 1);
+ sso_fastpath_fns_set(event_dev);
+
+ return 0;
+}
+
+static void
+otx2_sso_stop(struct rte_eventdev *event_dev)
+{
+ sso_func_trace();
+ sso_cleanup(event_dev, 0);
+ rte_mb();
+}
+
+static int
+otx2_sso_close(struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t i;
+
+ if (!dev->configured)
+ return 0;
+
+ sso_unregister_irqs(event_dev);
+
+ for (i = 0; i < dev->nb_event_queues; i++)
+ all_queues[i] = i;
+
+ for (i = 0; i < dev->nb_event_ports; i++)
+ otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
+ all_queues, dev->nb_event_queues);
+
+ sso_lf_teardown(dev, SSO_LF_GGRP);
+ sso_lf_teardown(dev, SSO_LF_GWS);
+ dev->nb_event_ports = 0;
+ dev->nb_event_queues = 0;
+ rte_mempool_free(dev->xaq_pool);
+ rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
+
+ return 0;
+}
+
+/* Initialize and register event driver with DPDK Application */
+static struct rte_eventdev_ops otx2_sso_ops = {
+ .dev_infos_get = otx2_sso_info_get,
+ .dev_configure = otx2_sso_configure,
+ .queue_def_conf = otx2_sso_queue_def_conf,
+ .queue_setup = otx2_sso_queue_setup,
+ .queue_release = otx2_sso_queue_release,
+ .port_def_conf = otx2_sso_port_def_conf,
+ .port_setup = otx2_sso_port_setup,
+ .port_release = otx2_sso_port_release,
+ .port_link = otx2_sso_port_link,
+ .port_unlink = otx2_sso_port_unlink,
+ .timeout_ticks = otx2_sso_timeout_ticks,
+
+ .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
+ .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
+ .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
+
+ .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
+ .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
+ .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
+
+ .timer_adapter_caps_get = otx2_tim_caps_get,
+
+ .xstats_get = otx2_sso_xstats_get,
+ .xstats_reset = otx2_sso_xstats_reset,
+ .xstats_get_names = otx2_sso_xstats_get_names,
+
+ .dump = otx2_sso_dump,
+ .dev_start = otx2_sso_start,
+ .dev_stop = otx2_sso_stop,
+ .dev_close = otx2_sso_close,
+ .dev_selftest = otx2_sso_selftest,
+};
+
+#define OTX2_SSO_XAE_CNT "xae_cnt"
+#define OTX2_SSO_SINGLE_WS "single_ws"
+#define OTX2_SSO_GGRP_QOS "qos"
+#define OTX2_SSO_SELFTEST "selftest"
+
+static void
+parse_queue_param(char *value, void *opaque)
+{
+ struct otx2_sso_qos queue_qos = {0};
+ uint8_t *val = (uint8_t *)&queue_qos;
+ struct otx2_sso_evdev *dev = opaque;
+ char *tok = strtok(value, "-");
+ struct otx2_sso_qos *old_ptr;
+
+ if (!strlen(value))
+ return;
+
+ while (tok != NULL) {
+ *val = atoi(tok);
+ tok = strtok(NULL, "-");
+ val++;
+ }
+
+ if (val != (&queue_qos.iaq_prcnt + 1)) {
+ otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
+ return;
+ }
+
+ dev->qos_queue_cnt++;
+ old_ptr = dev->qos_parse_data;
+ dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
+ sizeof(struct otx2_sso_qos) *
+ dev->qos_queue_cnt, 0);
+ if (dev->qos_parse_data == NULL) {
+ dev->qos_parse_data = old_ptr;
+ dev->qos_queue_cnt--;
+ return;
+ }
+ dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
+}
+
+static void
+parse_qos_list(const char *value, void *opaque)
+{
+ char *s = strdup(value);
+ char *start = NULL;
+ char *end = NULL;
+ char *f = s;
+
+ while (*s) {
+ if (*s == '[')
+ start = s;
+ else if (*s == ']')
+ end = s;
+
+ if (start && start < end) {
+ *end = 0;
+ parse_queue_param(start + 1, opaque);
+ s = end;
+ start = end;
+ }
+ s++;
+ }
+
+ free(f);
+}
+
+static int
+parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
+{
+ RTE_SET_USED(key);
+
+ /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
+ * isn't allowed. Everything is expressed in percentages, 0 represents
+ * default.
+ */
+ parse_qos_list(value, opaque);
+
+ return 0;
+}
+
+static void
+sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ uint8_t single_ws = 0;
+
+ if (devargs == NULL)
+ return;
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,
+ &dev->selftest);
+ rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
+ &dev->xae_cnt);
+ rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
+ &single_ws);
+ rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
+ dev);
+ otx2_parse_common_devargs(kvlist);
+ dev->dual_ws = !single_ws;
+ rte_kvargs_free(kvlist);
+}
+
+static int
+otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+ sizeof(struct otx2_sso_evdev),
+ otx2_sso_init);
+}
+
+static int
+otx2_sso_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
+}
+
+static const struct rte_pci_id pci_sso_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_sso = {
+ .id_table = pci_sso_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = otx2_sso_probe,
+ .remove = otx2_sso_remove,
+};
+
+int
+otx2_sso_init(struct rte_eventdev *event_dev)
+{
+ struct free_rsrcs_rsp *rsrc_cnt;
+ struct rte_pci_device *pci_dev;
+ struct otx2_sso_evdev *dev;
+ int rc;
+
+ event_dev->dev_ops = &otx2_sso_ops;
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ sso_fastpath_fns_set(event_dev);
+ return 0;
+ }
+
+ dev = sso_pmd_priv(event_dev);
+
+ pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
+
+ /* Initialize the base otx2_dev object */
+ rc = otx2_dev_init(pci_dev, dev);
+ if (rc < 0) {
+ otx2_err("Failed to initialize otx2_dev rc=%d", rc);
+ goto error;
+ }
+
+ /* Get SSO and SSOW MSIX rsrc cnt */
+ otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
+ rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
+ if (rc < 0) {
+ otx2_err("Unable to get free rsrc count");
+ goto otx2_dev_uninit;
+ }
+ otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
+ rsrc_cnt->ssow, rsrc_cnt->npa);
+
+ dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
+ dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
+ /* Grab the NPA LF if required */
+ rc = otx2_npa_lf_init(pci_dev, dev);
+ if (rc < 0) {
+ otx2_err("Unable to init NPA lf. It might not be provisioned");
+ goto otx2_dev_uninit;
+ }
+
+ dev->drv_inited = true;
+ dev->is_timeout_deq = 0;
+ dev->min_dequeue_timeout_ns = USEC2NSEC(1);
+ dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
+ dev->max_num_events = -1;
+ dev->nb_event_queues = 0;
+ dev->nb_event_ports = 0;
+
+ if (!dev->max_event_ports || !dev->max_event_queues) {
+ otx2_err("Not enough eventdev resource queues=%d ports=%d",
+ dev->max_event_queues, dev->max_event_ports);
+ rc = -ENODEV;
+ goto otx2_npa_lf_uninit;
+ }
+
+ dev->dual_ws = 1;
+ sso_parse_devargs(dev, pci_dev->device.devargs);
+ if (dev->dual_ws) {
+ otx2_sso_dbg("Using dual workslot mode");
+ dev->max_event_ports = dev->max_event_ports / 2;
+ } else {
+ otx2_sso_dbg("Using single workslot mode");
+ }
+
+ otx2_sso_pf_func_set(dev->pf_func);
+ otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
+ event_dev->data->name, dev->max_event_queues,
+ dev->max_event_ports);
+ if (dev->selftest) {
+ event_dev->dev->driver = &pci_sso.driver;
+ event_dev->dev_ops->dev_selftest();
+ }
+
+ otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
+
+ return 0;
+
+otx2_npa_lf_uninit:
+ otx2_npa_lf_fini();
+otx2_dev_uninit:
+ otx2_dev_fini(pci_dev, dev);
+error:
+ return rc;
+}
+
+int
+otx2_sso_fini(struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct rte_pci_device *pci_dev;
+
+ /* For secondary processes, nothing to be done */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
+
+ if (!dev->drv_inited)
+ goto dev_fini;
+
+ dev->drv_inited = false;
+ otx2_npa_lf_fini();
+
+dev_fini:
+ if (otx2_npa_lf_active(dev)) {
+ otx2_info("Common resource in use by other devices");
+ return -EAGAIN;
+ }
+
+ otx2_tim_fini();
+ otx2_dev_fini(pci_dev, dev);
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
+RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
+RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
+ OTX2_SSO_SINGLE_WS "=1"
+ OTX2_SSO_GGRP_QOS "=<string>"
+ OTX2_SSO_SELFTEST "=1"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.h b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.h
new file mode 100644
index 000000000..3b477820f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev.h
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_EVDEV_H__
+#define __OTX2_EVDEV_H__
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+
+#include "otx2_common.h"
+#include "otx2_dev.h"
+#include "otx2_ethdev.h"
+#include "otx2_mempool.h"
+#include "otx2_tim_evdev.h"
+
+#define EVENTDEV_NAME_OCTEONTX2_PMD event_octeontx2
+
+#define sso_func_trace otx2_sso_dbg
+
+#define OTX2_SSO_MAX_VHGRP RTE_EVENT_MAX_QUEUES_PER_DEV
+#define OTX2_SSO_MAX_VHWS (UINT8_MAX)
+#define OTX2_SSO_FC_NAME "otx2_evdev_xaq_fc"
+#define OTX2_SSO_SQB_LIMIT (0x180)
+#define OTX2_SSO_XAQ_SLACK (8)
+#define OTX2_SSO_XAQ_CACHE_CNT (0x7)
+#define OTX2_SSO_WQE_SG_PTR (9)
+
+/* SSO LF register offsets (BAR2) */
+#define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
+#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
+
+#define SSO_LF_GGRP_QCTL (0x20ull)
+#define SSO_LF_GGRP_EXE_DIS (0x80ull)
+#define SSO_LF_GGRP_INT (0x100ull)
+#define SSO_LF_GGRP_INT_W1S (0x108ull)
+#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
+#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
+#define SSO_LF_GGRP_INT_THR (0x140ull)
+#define SSO_LF_GGRP_INT_CNT (0x180ull)
+#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
+#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
+#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
+#define SSO_LF_GGRP_MISC_CNT (0x200ull)
+
+/* SSOW LF register offsets (BAR2) */
+#define SSOW_LF_GWS_LINKS (0x10ull)
+#define SSOW_LF_GWS_PENDWQP (0x40ull)
+#define SSOW_LF_GWS_PENDSTATE (0x50ull)
+#define SSOW_LF_GWS_NW_TIM (0x70ull)
+#define SSOW_LF_GWS_GRPMSK_CHG (0x80ull)
+#define SSOW_LF_GWS_INT (0x100ull)
+#define SSOW_LF_GWS_INT_W1S (0x108ull)
+#define SSOW_LF_GWS_INT_ENA_W1S (0x110ull)
+#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
+#define SSOW_LF_GWS_TAG (0x200ull)
+#define SSOW_LF_GWS_WQP (0x210ull)
+#define SSOW_LF_GWS_SWTP (0x220ull)
+#define SSOW_LF_GWS_PENDTAG (0x230ull)
+#define SSOW_LF_GWS_OP_ALLOC_WE (0x400ull)
+#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
+#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
+#define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull)
+#define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull)
+#define SSOW_LF_GWS_OP_UPD_WQP_GRP0 (0x830ull)
+#define SSOW_LF_GWS_OP_UPD_WQP_GRP1 (0x838ull)
+#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
+#define SSOW_LF_GWS_OP_DESCHED_NOSCH (0x8c0ull)
+#define SSOW_LF_GWS_OP_SWTAG_DESCHED (0x980ull)
+#define SSOW_LF_GWS_OP_SWTAG_NOSCHED (0x9c0ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xa00ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED1 (0xa08ull)
+#define SSOW_LF_GWS_OP_SWTP_SET (0xc00ull)
+#define SSOW_LF_GWS_OP_SWTAG_NORM (0xc10ull)
+#define SSOW_LF_GWS_OP_SWTAG_FULL0 (0xc20ull)
+#define SSOW_LF_GWS_OP_SWTAG_FULL1 (0xc28ull)
+#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
+
+#define OTX2_SSOW_GET_BASE_ADDR(_GW) ((_GW) - SSOW_LF_GWS_OP_GET_WORK)
+
+#define NSEC2USEC(__ns) ((__ns) / 1E3)
+#define USEC2NSEC(__us) ((__us) * 1E3)
+#define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
+#define TICK2NSEC(__tck, __freq) (((__tck) * 1E9) / (__freq))
+
+enum otx2_sso_lf_type {
+ SSO_LF_GGRP,
+ SSO_LF_GWS
+};
+
+union otx2_sso_event {
+ uint64_t get_work0;
+ struct {
+ uint32_t flow_id:20;
+ uint32_t sub_event_type:8;
+ uint32_t event_type:4;
+ uint8_t op:2;
+ uint8_t rsvd:4;
+ uint8_t sched_type:2;
+ uint8_t queue_id;
+ uint8_t priority;
+ uint8_t impl_opaque;
+ };
+} __rte_aligned(64);
+
+enum {
+ SSO_SYNC_ORDERED,
+ SSO_SYNC_ATOMIC,
+ SSO_SYNC_UNTAGGED,
+ SSO_SYNC_EMPTY
+};
+
+struct otx2_sso_qos {
+ uint8_t queue;
+ uint8_t xaq_prcnt;
+ uint8_t taq_prcnt;
+ uint8_t iaq_prcnt;
+};
+
+struct otx2_sso_evdev {
+ OTX2_DEV; /* Base class */
+ uint8_t max_event_queues;
+ uint8_t max_event_ports;
+ uint8_t is_timeout_deq;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t configured;
+ uint32_t deq_tmo_ns;
+ uint32_t min_dequeue_timeout_ns;
+ uint32_t max_dequeue_timeout_ns;
+ int32_t max_num_events;
+ uint64_t *fc_mem;
+ uint64_t xaq_lmt;
+ uint64_t nb_xaq_cfg;
+ rte_iova_t fc_iova;
+ struct rte_mempool *xaq_pool;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+ uint64_t adptr_xae_cnt;
+ uint16_t rx_adptr_pool_cnt;
+ uint64_t *rx_adptr_pools;
+ uint16_t tim_adptr_ring_cnt;
+ uint16_t *timer_adptr_rings;
+ uint64_t *timer_adptr_sz;
+ /* Dev args */
+ uint8_t dual_ws;
+ uint8_t selftest;
+ uint32_t xae_cnt;
+ uint8_t qos_queue_cnt;
+ struct otx2_sso_qos *qos_parse_data;
+ /* HW const */
+ uint32_t xae_waes;
+ uint32_t xaq_buf_size;
+ uint32_t iue;
+ /* MSIX offsets */
+ uint16_t sso_msixoff[OTX2_SSO_MAX_VHGRP];
+ uint16_t ssow_msixoff[OTX2_SSO_MAX_VHWS];
+ /* PTP timestamp */
+ struct otx2_timesync_info *tstamp;
+} __rte_cache_aligned;
+
+#define OTX2_SSOGWS_OPS \
+ /* WS ops */ \
+ uintptr_t getwrk_op; \
+ uintptr_t tag_op; \
+ uintptr_t wqp_op; \
+ uintptr_t swtp_op; \
+ uintptr_t swtag_norm_op; \
+ uintptr_t swtag_desched_op; \
+ uint8_t cur_tt; \
+ uint8_t cur_grp
+
+/* Event port aka GWS */
+struct otx2_ssogws {
+ /* Get Work Fastpath data */
+ OTX2_SSOGWS_OPS;
+ uint8_t swtag_req;
+ void *lookup_mem;
+ uint8_t port;
+ /* Add Work Fastpath data */
+ uint64_t xaq_lmt __rte_cache_aligned;
+ uint64_t *fc_mem;
+ uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
+ /* PTP timestamp */
+ struct otx2_timesync_info *tstamp;
+} __rte_cache_aligned;
+
+struct otx2_ssogws_state {
+ OTX2_SSOGWS_OPS;
+};
+
+struct otx2_ssogws_dual {
+ /* Get Work Fastpath data */
+ struct otx2_ssogws_state ws_state[2]; /* Ping and Pong */
+ uint8_t swtag_req;
+ uint8_t vws; /* Ping pong bit */
+ void *lookup_mem;
+ uint8_t port;
+ /* Add Work Fastpath data */
+ uint64_t xaq_lmt __rte_cache_aligned;
+ uint64_t *fc_mem;
+ uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
+ /* PTP timestamp */
+ struct otx2_timesync_info *tstamp;
+} __rte_cache_aligned;
+
+static inline struct otx2_sso_evdev *
+sso_pmd_priv(const struct rte_eventdev *event_dev)
+{
+ return event_dev->data->dev_private;
+}
+
+static const union mbuf_initializer mbuf_init = {
+ .fields = {
+ .data_off = RTE_PKTMBUF_HEADROOM,
+ .refcnt = 1,
+ .nb_segs = 1,
+ .port = 0
+ }
+};
+
+static __rte_always_inline void
+otx2_wqe_to_mbuf(uint64_t get_work1, const uint64_t mbuf, uint8_t port_id,
+ const uint32_t tag, const uint32_t flags,
+ const void * const lookup_mem)
+{
+ struct nix_wqe_hdr_s *wqe = (struct nix_wqe_hdr_s *)get_work1;
+ uint64_t val = mbuf_init.value | (uint64_t)port_id << 48;
+
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ val |= NIX_TIMESYNC_RX_OFFSET;
+
+ otx2_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
+ (struct rte_mbuf *)mbuf, lookup_mem,
+ val, flags);
+
+}
+
+static inline int
+parse_kvargs_flag(const char *key, const char *value, void *opaque)
+{
+ RTE_SET_USED(key);
+
+ *(uint8_t *)opaque = !!atoi(value);
+ return 0;
+}
+
+static inline int
+parse_kvargs_value(const char *key, const char *value, void *opaque)
+{
+ RTE_SET_USED(key);
+
+ *(uint32_t *)opaque = (uint32_t)atoi(value);
+ return 0;
+}
+
+#define SSO_RX_ADPTR_ENQ_FASTPATH_FUNC NIX_RX_FASTPATH_MODES
+#define SSO_TX_ADPTR_ENQ_FASTPATH_FUNC NIX_TX_FASTPATH_MODES
+
+/* Single WS API's */
+uint16_t otx2_ssogws_enq(void *port, const struct rte_event *ev);
+uint16_t otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events);
+
+/* Dual WS API's */
+uint16_t otx2_ssogws_dual_enq(void *port, const struct rte_event *ev);
+uint16_t otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events);
+
+/* Auto generated API's */
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+uint16_t otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_timeout_ ##name(void *port, \
+ struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_seg_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_seg_timeout_ ##name(void *port, \
+ struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ \
+uint16_t otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_timeout_ ##name(void *port, \
+ struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_seg_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port, \
+ struct rte_event *ev, \
+ uint64_t timeout_ticks); \
+uint16_t otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks);\
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+uint16_t otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],\
+ uint16_t nb_events); \
+uint16_t otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events); \
+uint16_t otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events); \
+uint16_t otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events); \
+
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
+ uint32_t event_type);
+int sso_xae_reconfigure(struct rte_eventdev *event_dev);
+void sso_fastpath_fns_set(struct rte_eventdev *event_dev);
+
+int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps);
+int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id);
+int otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev);
+int otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev);
+int otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps);
+int otx2_sso_tx_adapter_queue_add(uint8_t id,
+ const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t tx_queue_id);
+
+int otx2_sso_tx_adapter_queue_del(uint8_t id,
+ const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t tx_queue_id);
+
+/* Clean up API's */
+typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
+void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,
+ uintptr_t base, otx2_handle_event_t fn, void *arg);
+void ssogws_reset(struct otx2_ssogws *ws);
+/* Selftest */
+int otx2_sso_selftest(void);
+/* Init and Fini API's */
+int otx2_sso_init(struct rte_eventdev *event_dev);
+int otx2_sso_fini(struct rte_eventdev *event_dev);
+/* IRQ handlers */
+int sso_register_irqs(const struct rte_eventdev *event_dev);
+void sso_unregister_irqs(const struct rte_eventdev *event_dev);
+
+#endif /* __OTX2_EVDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_adptr.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_adptr.c
new file mode 100644
index 000000000..8bdcfa3ea
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_adptr.c
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_evdev.h"
+
+int
+otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+ if (rc)
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ;
+
+ return 0;
+}
+
+static inline int
+sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
+ uint16_t eth_port_id)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ int rc;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->cq.ena = 0;
+ aq->cq.caching = 0;
+
+ otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
+ aq->cq_mask.ena = ~(aq->cq_mask.ena);
+ aq->cq_mask.caching = ~(aq->cq_mask.caching);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to disable cq context");
+ goto fail;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->rq.sso_ena = 1;
+ aq->rq.sso_tt = tt;
+ aq->rq.sso_grp = ggrp;
+ aq->rq.ena_wqwd = 1;
+ /* Mbuf Header generation :
+ * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
+ * it already has data related to mbuf size, headroom, private area.
+ * > Using WQE_SKIP we can directly assign
+ * mbuf = wqe - sizeof(struct mbuf);
+ * so that mbuf header will not have unpredicted values while headroom
+ * and private data starts at the beginning of wqe_data.
+ */
+ aq->rq.wqe_skip = 1;
+ aq->rq.wqe_caching = 1;
+ aq->rq.spb_ena = 0;
+ aq->rq.flow_tagw = 20; /* 20-bits */
+
+ /* Flow Tag calculation :
+ *
+ * rq_tag <31:24> = good/bad_tag<8:0>;
+ * rq_tag <23:0> = [ltag]
+ *
+ * flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
+ * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
+ *
+ * Setup :
+ * ltag<23:0> = (eth_port_id & 0xF) << 20;
+ * good/bad_tag<8:0> =
+ * ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
+ *
+ * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
+ * <27:20> (eth_port_id) | <20:0> [TAG]
+ */
+
+ aq->rq.ltag = (eth_port_id & 0xF) << 20;
+ aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
+ (RTE_EVENT_TYPE_ETHDEV << 4);
+ aq->rq.bad_utag = aq->rq.good_utag;
+
+ aq->rq.ena = 0; /* Don't enable RQ yet */
+ aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+
+ otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
+ /* mask the bits to write. */
+ aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
+ aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
+ aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
+ aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
+ aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
+ aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
+ aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
+ aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
+ aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
+ aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
+ aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
+ aq->rq_mask.ena = ~(aq->rq_mask.ena);
+ aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
+ aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to init rx adapter context");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return rc;
+}
+
+static inline int
+sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ int rc;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->cq.ena = 1;
+ aq->cq.caching = 1;
+
+ otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
+ aq->cq_mask.ena = ~(aq->cq_mask.ena);
+ aq->cq_mask.caching = ~(aq->cq_mask.caching);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to enable cq context");
+ goto fail;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->rq.sso_ena = 0;
+ aq->rq.sso_tt = SSO_TT_UNTAGGED;
+ aq->rq.sso_grp = 0;
+ aq->rq.ena_wqwd = 0;
+ aq->rq.wqe_caching = 0;
+ aq->rq.wqe_skip = 0;
+ aq->rq.spb_ena = 0;
+ aq->rq.flow_tagw = 0x20;
+ aq->rq.ltag = 0;
+ aq->rq.good_utag = 0;
+ aq->rq.bad_utag = 0;
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+
+ otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
+ /* mask the bits to write. */
+ aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
+ aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
+ aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
+ aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
+ aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
+ aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
+ aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
+ aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
+ aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
+ aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
+ aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
+ aq->rq_mask.ena = ~(aq->rq_mask.ena);
+ aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
+ aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to clear rx adapter context");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return rc;
+}
+
+void
+sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
+{
+ int i;
+
+ switch (event_type) {
+ case RTE_EVENT_TYPE_ETHDEV:
+ {
+ struct otx2_eth_rxq *rxq = data;
+ uint64_t *old_ptr;
+
+ for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
+ if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
+ return;
+ }
+
+ dev->rx_adptr_pool_cnt++;
+ old_ptr = dev->rx_adptr_pools;
+ dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
+ sizeof(uint64_t) *
+ dev->rx_adptr_pool_cnt, 0);
+ if (dev->rx_adptr_pools == NULL) {
+ dev->adptr_xae_cnt += rxq->pool->size;
+ dev->rx_adptr_pools = old_ptr;
+ dev->rx_adptr_pool_cnt--;
+ return;
+ }
+ dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
+ (uint64_t)rxq->pool;
+
+ dev->adptr_xae_cnt += rxq->pool->size;
+ break;
+ }
+ case RTE_EVENT_TYPE_TIMER:
+ {
+ struct otx2_tim_ring *timr = data;
+ uint16_t *old_ring_ptr;
+ uint64_t *old_sz_ptr;
+
+ for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
+ if (timr->ring_id != dev->timer_adptr_rings[i])
+ continue;
+ if (timr->nb_timers == dev->timer_adptr_sz[i])
+ return;
+ dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_sz[i] = timr->nb_timers;
+
+ return;
+ }
+
+ dev->tim_adptr_ring_cnt++;
+ old_ring_ptr = dev->timer_adptr_rings;
+ old_sz_ptr = dev->timer_adptr_sz;
+
+ dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
+ sizeof(uint16_t) *
+ dev->tim_adptr_ring_cnt,
+ 0);
+ if (dev->timer_adptr_rings == NULL) {
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_rings = old_ring_ptr;
+ dev->tim_adptr_ring_cnt--;
+ return;
+ }
+
+ dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
+ sizeof(uint64_t) *
+ dev->tim_adptr_ring_cnt,
+ 0);
+
+ if (dev->timer_adptr_sz == NULL) {
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_sz = old_sz_ptr;
+ dev->tim_adptr_ring_cnt--;
+ return;
+ }
+
+ dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
+ timr->ring_id;
+ dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
+ timr->nb_timers;
+
+ dev->adptr_xae_cnt += timr->nb_timers;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static inline void
+sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ int i;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
+
+ ws->lookup_mem = lookup_mem;
+ } else {
+ struct otx2_ssogws *ws = event_dev->data->ports[i];
+
+ ws->lookup_mem = lookup_mem;
+ }
+ }
+}
+
+int
+otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint16_t port = eth_dev->data->port_id;
+ struct otx2_eth_rxq *rxq;
+ int i, rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+ if (rc)
+ return -EINVAL;
+
+ if (rx_queue_id < 0) {
+ for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
+ rc = sso_xae_reconfigure((struct rte_eventdev *)
+ (uintptr_t)event_dev);
+ rc |= sso_rxq_enable(otx2_eth_dev, i,
+ queue_conf->ev.sched_type,
+ queue_conf->ev.queue_id, port);
+ }
+ rxq = eth_dev->data->rx_queues[0];
+ sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
+ } else {
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
+ rc = sso_xae_reconfigure((struct rte_eventdev *)
+ (uintptr_t)event_dev);
+ rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
+ queue_conf->ev.sched_type,
+ queue_conf->ev.queue_id, port);
+ sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
+ }
+
+ if (rc < 0) {
+ otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
+ queue_conf->ev.queue_id);
+ return rc;
+ }
+
+ dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
+ dev->tstamp = &otx2_eth_dev->tstamp;
+ sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+ return 0;
+}
+
+int
+otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+ int i, rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+ if (rc)
+ return -EINVAL;
+
+ if (rx_queue_id < 0) {
+ for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
+ rc = sso_rxq_disable(dev, i);
+ } else {
+ rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
+ }
+
+ if (rc < 0)
+ otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
+ eth_dev->data->port_id, rx_queue_id);
+
+ return rc;
+}
+
+int
+otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(event_dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+int
+otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(event_dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+int
+otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int ret;
+
+ RTE_SET_USED(dev);
+ ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
+ if (ret)
+ *caps = 0;
+ else
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+
+ return 0;
+}
+
+static int
+sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *aura_req;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+ aura_req->aura.limit = nb_sqb_bufs;
+ aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
+
+ return otx2_mbox_process(npa_lf->mbox);
+}
+
+int
+otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t tx_queue_id)
+{
+ struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_eth_txq *txq;
+ int i;
+
+ RTE_SET_USED(id);
+ if (tx_queue_id < 0) {
+ for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ sso_sqb_aura_limit_edit(txq->sqb_pool,
+ OTX2_SSO_SQB_LIMIT);
+ }
+ } else {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
+ }
+
+ dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
+ sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+ return 0;
+}
+
+int
+otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t tx_queue_id)
+{
+ struct otx2_eth_txq *txq;
+ int i;
+
+ RTE_SET_USED(id);
+ RTE_SET_USED(eth_dev);
+ RTE_SET_USED(event_dev);
+ if (tx_queue_id < 0) {
+ for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ sso_sqb_aura_limit_edit(txq->sqb_pool,
+ txq->nb_sqb_bufs);
+ }
+ } else {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_irq.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_irq.c
new file mode 100644
index 000000000..a2033646e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_irq.c
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_evdev.h"
+#include "otx2_tim_evdev.h"
+
+static void
+sso_lf_irq(void *param)
+{
+ uintptr_t base = (uintptr_t)param;
+ uint64_t intr;
+ uint8_t ggrp;
+
+ ggrp = (base >> 12) & 0xFF;
+
+ intr = otx2_read64(base + SSO_LF_GGRP_INT);
+ if (intr == 0)
+ return;
+
+ otx2_err("GGRP %d GGRP_INT=0x%" PRIx64 "", ggrp, intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, base + SSO_LF_GGRP_INT);
+}
+
+static int
+sso_lf_register_irq(const struct rte_eventdev *event_dev, uint16_t ggrp_msixoff,
+ uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int rc, vec;
+
+ vec = ggrp_msixoff + SSO_LF_INT_VEC_GRP;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + SSO_LF_GGRP_INT_ENA_W1C);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, sso_lf_irq, (void *)base, vec);
+ /* Enable hw interrupt */
+ otx2_write64(~0ull, base + SSO_LF_GGRP_INT_ENA_W1S);
+
+ return rc;
+}
+
+static void
+ssow_lf_irq(void *param)
+{
+ uintptr_t base = (uintptr_t)param;
+ uint8_t gws = (base >> 12) & 0xFF;
+ uint64_t intr;
+
+ intr = otx2_read64(base + SSOW_LF_GWS_INT);
+ if (intr == 0)
+ return;
+
+ otx2_err("GWS %d GWS_INT=0x%" PRIx64 "", gws, intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, base + SSOW_LF_GWS_INT);
+}
+
+static int
+ssow_lf_register_irq(const struct rte_eventdev *event_dev, uint16_t gws_msixoff,
+ uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int rc, vec;
+
+ vec = gws_msixoff + SSOW_LF_INT_VEC_IOP;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + SSOW_LF_GWS_INT_ENA_W1C);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, ssow_lf_irq, (void *)base, vec);
+ /* Enable hw interrupt */
+ otx2_write64(~0ull, base + SSOW_LF_GWS_INT_ENA_W1S);
+
+ return rc;
+}
+
+static void
+sso_lf_unregister_irq(const struct rte_eventdev *event_dev,
+ uint16_t ggrp_msixoff, uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int vec;
+
+ vec = ggrp_msixoff + SSO_LF_INT_VEC_GRP;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + SSO_LF_GGRP_INT_ENA_W1C);
+ otx2_unregister_irq(handle, sso_lf_irq, (void *)base, vec);
+}
+
+static void
+ssow_lf_unregister_irq(const struct rte_eventdev *event_dev,
+ uint16_t gws_msixoff, uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int vec;
+
+ vec = gws_msixoff + SSOW_LF_INT_VEC_IOP;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + SSOW_LF_GWS_INT_ENA_W1C);
+ otx2_unregister_irq(handle, ssow_lf_irq, (void *)base, vec);
+}
+
+int
+sso_register_irqs(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ int i, rc = -EINVAL;
+ uint8_t nb_ports;
+
+ nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ if (dev->sso_msixoff[i] == MSIX_VECTOR_INVALID) {
+ otx2_err("Invalid SSOLF MSIX offset[%d] vector: 0x%x",
+ i, dev->sso_msixoff[i]);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < nb_ports; i++) {
+ if (dev->ssow_msixoff[i] == MSIX_VECTOR_INVALID) {
+ otx2_err("Invalid SSOWLF MSIX offset[%d] vector: 0x%x",
+ i, dev->ssow_msixoff[i]);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 |
+ i << 12);
+ rc = sso_lf_register_irq(event_dev, dev->sso_msixoff[i], base);
+ }
+
+ for (i = 0; i < nb_ports; i++) {
+ uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 |
+ i << 12);
+ rc = ssow_lf_register_irq(event_dev, dev->ssow_msixoff[i],
+ base);
+ }
+
+fail:
+ return rc;
+}
+
+void
+sso_unregister_irqs(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint8_t nb_ports;
+ int i;
+
+ nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 |
+ i << 12);
+ sso_lf_unregister_irq(event_dev, dev->sso_msixoff[i], base);
+ }
+
+ for (i = 0; i < nb_ports; i++) {
+ uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 |
+ i << 12);
+ ssow_lf_unregister_irq(event_dev, dev->ssow_msixoff[i], base);
+ }
+}
+
+static void
+tim_lf_irq(void *param)
+{
+ uintptr_t base = (uintptr_t)param;
+ uint64_t intr;
+ uint8_t ring;
+
+ ring = (base >> 12) & 0xFF;
+
+ intr = otx2_read64(base + TIM_LF_NRSPERR_INT);
+ otx2_err("TIM RING %d TIM_LF_NRSPERR_INT=0x%" PRIx64 "", ring, intr);
+ intr = otx2_read64(base + TIM_LF_RAS_INT);
+ otx2_err("TIM RING %d TIM_LF_RAS_INT=0x%" PRIx64 "", ring, intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, base + TIM_LF_NRSPERR_INT);
+ otx2_write64(intr, base + TIM_LF_RAS_INT);
+}
+
+static int
+tim_lf_register_irq(struct rte_pci_device *pci_dev, uint16_t tim_msixoff,
+ uintptr_t base)
+{
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int rc, vec;
+
+ vec = tim_msixoff + TIM_LF_INT_VEC_NRSPERR_INT;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + TIM_LF_NRSPERR_INT);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, tim_lf_irq, (void *)base, vec);
+ /* Enable hw interrupt */
+ otx2_write64(~0ull, base + TIM_LF_NRSPERR_INT_ENA_W1S);
+
+ vec = tim_msixoff + TIM_LF_INT_VEC_RAS_INT;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + TIM_LF_RAS_INT);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, tim_lf_irq, (void *)base, vec);
+ /* Enable hw interrupt */
+ otx2_write64(~0ull, base + TIM_LF_RAS_INT_ENA_W1S);
+
+ return rc;
+}
+
+static void
+tim_lf_unregister_irq(struct rte_pci_device *pci_dev, uint16_t tim_msixoff,
+ uintptr_t base)
+{
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int vec;
+
+ vec = tim_msixoff + TIM_LF_INT_VEC_NRSPERR_INT;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + TIM_LF_NRSPERR_INT_ENA_W1C);
+ otx2_unregister_irq(handle, tim_lf_irq, (void *)base, vec);
+
+ vec = tim_msixoff + TIM_LF_INT_VEC_RAS_INT;
+
+ /* Clear err interrupt */
+ otx2_write64(~0ull, base + TIM_LF_RAS_INT_ENA_W1C);
+ otx2_unregister_irq(handle, tim_lf_irq, (void *)base, vec);
+}
+
+int
+tim_register_irq(uint16_t ring_id)
+{
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ int rc = -EINVAL;
+ uintptr_t base;
+
+ if (dev->tim_msixoff[ring_id] == MSIX_VECTOR_INVALID) {
+ otx2_err("Invalid TIMLF MSIX offset[%d] vector: 0x%x",
+ ring_id, dev->tim_msixoff[ring_id]);
+ goto fail;
+ }
+
+ base = dev->bar2 + (RVU_BLOCK_ADDR_TIM << 20 | ring_id << 12);
+ rc = tim_lf_register_irq(dev->pci_dev, dev->tim_msixoff[ring_id], base);
+fail:
+ return rc;
+}
+
+void
+tim_unregister_irq(uint16_t ring_id)
+{
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ uintptr_t base;
+
+ base = dev->bar2 + (RVU_BLOCK_ADDR_TIM << 20 | ring_id << 12);
+ tim_lf_unregister_irq(dev->pci_dev, dev->tim_msixoff[ring_id], base);
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c
new file mode 100644
index 000000000..8440a50aa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_selftest.c
@@ -0,0 +1,1511 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_test.h>
+
+#include "otx2_evdev.h"
+
+#define NUM_PACKETS (1024)
+#define MAX_EVENTS (1024)
+
+#define OCTEONTX2_TEST_RUN(setup, teardown, test) \
+ octeontx_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+ uint32_t flow_id;
+ uint8_t event_type;
+ uint8_t sub_event_type;
+ uint8_t sched_type;
+ uint8_t queue;
+ uint8_t port;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+ RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+ memset(seqn_list, 0, sizeof(seqn_list));
+ seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+ if (seqn_list_index >= NUM_PACKETS)
+ return -1;
+
+ seqn_list[seqn_list_index++] = val;
+ rte_smp_wmb();
+ return 0;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ if (seqn_list[i] != i) {
+ otx2_err("Seqn mismatch %d %d", seqn_list[i], i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+struct test_core_param {
+ rte_atomic32_t *total_events;
+ uint64_t dequeue_tmo_ticks;
+ uint8_t port;
+ uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+ const char *eventdev_name = "event_octeontx2";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ otx2_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
+ return -1;
+ }
+ return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_close(evdev);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+enum {
+ TEST_EVENTDEV_SETUP_DEFAULT,
+ TEST_EVENTDEV_SETUP_PRIORITY,
+ TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+ const char *pool_name = "evdev_octeontx_test_pool";
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ int i, ret;
+
+ /* Create and destrory pool for each test case to make it standalone */
+ eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
+ 0, 0, 512,
+ rte_socket_id());
+ if (!eventdev_test_mempool) {
+ otx2_err("ERROR creating mempool");
+ return -1;
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+ dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ if (queue_count > 8)
+ queue_count = 8;
+
+ /* Configure event queues(0 to n) with
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+ * RTE_EVENT_DEV_PRIORITY_LOWEST
+ */
+ uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+ queue_count;
+ for (i = 0; i < (int)queue_count; i++) {
+ struct rte_event_queue_conf queue_conf;
+
+ ret = rte_event_queue_default_conf_get(evdev, i,
+ &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+ i);
+ queue_conf.priority = i * step;
+ ret = rte_event_queue_setup(evdev, i, &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+
+ } else {
+ /* Configure event queues with default priority */
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+ }
+ /* Configure event ports */
+ uint32_t port_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
+ "Port count get failed");
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+ ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+ i);
+ }
+
+ ret = rte_event_dev_start(evdev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return 0;
+}
+
+static inline int
+eventdev_setup(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+ uint32_t flow_id, uint8_t event_type,
+ uint8_t sub_event_type, uint8_t sched_type,
+ uint8_t queue, uint8_t port)
+{
+ struct event_attr *attr;
+
+ /* Store the event attributes in mbuf for future reference */
+ attr = rte_pktmbuf_mtod(m, struct event_attr *);
+ attr->flow_id = flow_id;
+ attr->event_type = event_type;
+ attr->sub_event_type = sub_event_type;
+ attr->sched_type = sched_type;
+ attr->queue = queue;
+ attr->port = port;
+
+ ev->flow_id = flow_id;
+ ev->sub_event_type = sub_event_type;
+ ev->event_type = event_type;
+ /* Inject the new event */
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = queue;
+ ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+ uint8_t sched_type, uint8_t queue, uint8_t port,
+ unsigned int events)
+{
+ struct rte_mbuf *m;
+ unsigned int i;
+
+ for (i = 0; i < events; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ update_event_and_validation_attr(m, &ev, flow_id, event_type,
+ sub_event_type, sched_type,
+ queue, port);
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+ uint16_t valid_event;
+ struct rte_event ev;
+ int i;
+
+ /* Check for excess events, try for a few times and exit */
+ for (i = 0; i < 32; i++) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+ RTE_TEST_ASSERT_SUCCESS(valid_event,
+ "Unexpected valid event=%d",
+ ev.mbuf->seqn);
+ }
+ return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+ struct rte_event_dev_info info;
+ uint32_t queue_count;
+ unsigned int i;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ for (i = 0; i < total_events; i++) {
+ ret = inject_events(
+ rte_rand() % info.max_event_queue_flows /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ rte_rand() % queue_count /* queue */,
+ 0 /* port */,
+ 1 /* events */);
+ if (ret)
+ return -1;
+ }
+ return ret;
+}
+
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+ struct event_attr *attr;
+
+ attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+ RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+ "flow_id mismatch enq=%d deq =%d",
+ attr->flow_id, ev->flow_id);
+ RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+ "event_type mismatch enq=%d deq =%d",
+ attr->event_type, ev->event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+ "sub_event_type mismatch enq=%d deq =%d",
+ attr->sub_event_type, ev->sub_event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+ "sched_type mismatch enq=%d deq =%d",
+ attr->sched_type, ev->sched_type);
+ RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ attr->queue, ev->queue_id);
+ return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+ struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+ uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+ uint16_t valid_event;
+ struct rte_event ev;
+ int ret;
+
+ while (1) {
+ if (++forward_progress_cnt > UINT16_MAX) {
+ otx2_err("Detected deadlock");
+ return -1;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ forward_progress_cnt = 0;
+ ret = validate_event(&ev);
+ if (ret)
+ return -1;
+
+ if (fn != NULL) {
+ ret = fn(index, port, &ev);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to validate test specific event");
+ }
+
+ ++index;
+
+ rte_pktmbuf_free(ev.mbuf);
+ if (++events >= total_events)
+ break;
+ }
+
+ return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
+ index, ev->mbuf->seqn);
+ return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+ int ret;
+
+ ret = inject_events(0 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type */,
+ sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_ordered(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+ int ret;
+
+ ret = generate_random_events(MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+/*
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
+ * operation
+ *
+ * For example, Inject 32 events over 0..7 queues
+ * enqueue events 0, 8, 16, 24 in queue 0
+ * enqueue events 1, 9, 17, 25 in queue 1
+ * ..
+ * ..
+ * enqueue events 7, 15, 23, 31 in queue 7
+ *
+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
+ * order from queue0(highest priority) to queue7(lowest_priority)
+ */
+static int
+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ uint32_t queue_count;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ if (queue_count > 8)
+ queue_count = 8;
+ uint32_t range = MAX_EVENTS / queue_count;
+ uint32_t expected_val = (index % range) * queue_count;
+
+ expected_val += ev->queue_id;
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ ev->mbuf->seqn, index, expected_val, range,
+ queue_count, MAX_EVENTS);
+ return 0;
+}
+
+static int
+test_multi_queue_priority(void)
+{
+ int i, max_evts_roundoff;
+ /* See validate_queue_priority() comments for priority validate logic */
+ uint32_t queue_count;
+ struct rte_mbuf *m;
+ uint8_t queue;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ if (queue_count > 8)
+ queue_count = 8;
+ max_evts_roundoff = MAX_EVENTS / queue_count;
+ max_evts_roundoff *= queue_count;
+
+ for (i = 0; i < max_evts_roundoff; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ queue = i % queue_count;
+ update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
+ 0, RTE_SCHED_TYPE_PARALLEL,
+ queue, 0);
+ rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ }
+
+ return consume_events(0, max_evts_roundoff, validate_queue_priority);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+ struct test_core_param *param = arg;
+ rte_atomic32_t *total_events = param->total_events;
+ uint8_t port = param->port;
+ uint16_t valid_event;
+ struct rte_event ev;
+ int ret;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ ret = validate_event(&ev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ }
+
+ return 0;
+}
+
+static inline int
+wait_workers_to_join(const rte_atomic32_t *count)
+{
+ uint64_t cycles, print_cycles;
+
+ cycles = rte_get_timer_cycles();
+ print_cycles = cycles;
+ while (rte_atomic32_read(count)) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ otx2_err("Events %d", rte_atomic32_read(count));
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
+ otx2_err("No schedules for seconds, deadlock (%d)",
+ rte_atomic32_read(count));
+ rte_event_dev_dump(evdev, stdout);
+ cycles = new_cycles;
+ return -1;
+ }
+ }
+ rte_eal_mp_wait_lcore();
+
+ return 0;
+}
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+ int (*slave_workers)(void *), uint32_t total_events,
+ uint8_t nb_workers, uint8_t sched_type)
+{
+ rte_atomic32_t atomic_total_events;
+ struct test_core_param *param;
+ uint64_t dequeue_tmo_ticks;
+ uint8_t port = 0;
+ int w_lcore;
+ int ret;
+
+ if (!nb_workers)
+ return 0;
+
+ rte_atomic32_set(&atomic_total_events, total_events);
+ seqn_list_init();
+
+ param = malloc(sizeof(struct test_core_param) * nb_workers);
+ if (!param)
+ return -1;
+
+ ret = rte_event_dequeue_timeout_ticks(evdev,
+ rte_rand() % 10000000/* 10ms */,
+ &dequeue_tmo_ticks);
+ if (ret) {
+ free(param);
+ return -1;
+ }
+
+ param[0].total_events = &atomic_total_events;
+ param[0].sched_type = sched_type;
+ param[0].port = 0;
+ param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_wmb();
+
+ w_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+ for (port = 1; port < nb_workers; port++) {
+ param[port].total_events = &atomic_total_events;
+ param[port].sched_type = sched_type;
+ param[port].port = port;
+ param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+ w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+ rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+ }
+
+ rte_smp_wmb();
+ ret = wait_workers_to_join(&atomic_total_events);
+ free(param);
+
+ return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
+ "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ otx2_err("Not enough ports=%d or workers=%d", nr_ports,
+ rte_lcore_count() - 1);
+ return 0;
+ }
+
+ return launch_workers_and_wait(worker_multi_port_fn,
+ worker_multi_port_fn, total_events,
+ nr_ports, 0xff /* invalid */);
+}
+
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+
+ return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+
+ return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+ int i, nr_links, ret;
+ uint32_t queue_count;
+ uint32_t port_count;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &port_count),
+ "Port count get failed");
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_unlink(evdev, i, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0,
+ "Failed to unlink all queues port=%d", i);
+ }
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ nr_links = RTE_MIN(port_count, queue_count);
+ const unsigned int total_events = MAX_EVENTS / nr_links;
+
+ /* Link queue x to port x and inject events to queue x through port x */
+ for (i = 0; i < nr_links; i++) {
+ uint8_t queue = (uint8_t)i;
+
+ ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+ ret = inject_events(0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */, i /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+ }
+
+ /* Verify the events generated from correct queue */
+ for (i = 0; i < nr_links; i++) {
+ ret = consume_events(i /* port */, total_events,
+ validate_queue_to_port_single_link);
+ if (ret)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+
+ return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+ int ret, port0_events = 0, port1_events = 0;
+ uint32_t nr_queues = 0;
+ uint32_t nr_ports = 0;
+ uint8_t queue, port;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
+ "Queue count get failed");
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &nr_queues),
+ "Queue count get failed");
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
+ "Port count get failed");
+
+ if (nr_ports < 2) {
+ otx2_err("Not enough ports to test ports=%d", nr_ports);
+ return 0;
+ }
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (port = 0; port < nr_ports; port++) {
+ ret = rte_event_port_unlink(evdev, port, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+ port);
+ }
+
+ const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+ /* Link all even number of queues to port0 and odd numbers to port 1*/
+ for (queue = 0; queue < nr_queues; queue++) {
+ port = queue & 0x1;
+ ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+ queue, port);
+
+ ret = inject_events(0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */, port /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ if (port == 0)
+ port0_events += total_events;
+ else
+ port1_events += total_events;
+ }
+
+ ret = consume_events(0 /* port */, port0_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+ ret = consume_events(1 /* port */, port1_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static int
+worker_flow_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+ rte_atomic32_t *total_events = param->total_events;
+ uint8_t new_sched_type = param->sched_type;
+ uint8_t port = param->port;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0 */
+ if (ev.sub_event_type == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 1; /* stage 1 */
+ ev.sched_type = new_sched_type;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+ if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ otx2_err("Failed to update seqn_list");
+ return -1;
+ }
+ } else {
+ otx2_err("Invalid ev.sub_event_type = %d",
+ ev.sub_event_type);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
+ "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ otx2_err("Not enough ports=%d or workers=%d", nr_ports,
+ rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ rte_mb();
+ ret = launch_workers_and_wait(worker_flow_based_pipeline,
+ worker_flow_based_pipeline, total_events,
+ nr_ports, out_sched_type);
+ if (ret)
+ return -1;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+
+ return 0;
+}
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+ rte_atomic32_t *total_events = param->total_events;
+ uint8_t new_sched_type = param->sched_type;
+ uint8_t port = param->port;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0(group 0) */
+ if (ev.queue_id == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = new_sched_type;
+ ev.queue_id = 1; /* Stage 1*/
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+ if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ otx2_err("Failed to update seqn_list");
+ return -1;
+ }
+ } else {
+ otx2_err("Invalid ev.queue_id = %d", ev.queue_id);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t queue_count;
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
+ "Port count get failed");
+
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ if (queue_count < 2 || !nr_ports) {
+ otx2_err("Not enough queues=%d ports=%d or workers=%d",
+ queue_count, nr_ports,
+ rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ ret = launch_workers_and_wait(worker_group_based_pipeline,
+ worker_group_based_pipeline, total_events,
+ nr_ports, out_sched_type);
+ if (ret)
+ return -1;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+
+ return 0;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ rte_atomic32_t *total_events = param->total_events;
+ uint8_t port = param->port;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.sub_event_type == 255) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
+ "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ otx2_err("Not enough ports=%d or workers=%d",
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ rte_rand() %
+ (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS /* events */);
+ if (ret)
+ return -1;
+
+ return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+ 0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+
+ return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* Last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sub_event_type = rte_rand() % 256;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+
+ return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ struct rte_mbuf *m;
+ int counter = 0;
+
+ while (counter < NUM_PACKETS) {
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ if (m == NULL)
+ continue;
+
+ m->seqn = counter++;
+
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ ev.flow_id = 0x1; /* Generate a fat flow */
+ ev.sub_event_type = 0;
+ /* Inject the new event */
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.queue_id = 0;
+ ev.mbuf = m;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+
+ return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT, &nr_ports),
+ "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (rte_lcore_count() < 3 || nr_ports < 2) {
+ otx2_err("### Not enough cores for test.");
+ return 0;
+ }
+
+ launch_workers_and_wait(worker_ordered_flow_producer, fn,
+ NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+ /* Check the events order maintained or not */
+ return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_group_based_pipeline);
+}
+
+static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
+ int (*test)(void), const char *name)
+{
+ if (setup() < 0) {
+ printf("Error setting up test %s", name);
+ unsupported++;
+ } else {
+ if (test() < 0) {
+ failed++;
+ printf("+ TestCase [%2d] : %s failed\n", total, name);
+ } else {
+ passed++;
+ printf("+ TestCase [%2d] : %s succeeded\n", total,
+ name);
+ }
+ }
+
+ total++;
+ tdown();
+}
+
+int
+otx2_sso_selftest(void)
+{
+ testsuite_setup();
+
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_single_port_deq);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_multi_port_deq);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_single_link);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_multi_link);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_ordered);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_parallel);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_max_stages_random_sched_type);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_max_stages_random_sched_type);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_mixed_max_stages_random_sched_type);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_flow_producer_consumer_ingress_order_test);
+ OCTEONTX2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_producer_consumer_ingress_order_test);
+ OCTEONTX2_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
+ test_multi_queue_priority);
+ OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic);
+ OCTEONTX2_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic);
+ printf("Total tests : %d\n", total);
+ printf("Passed : %d\n", passed);
+ printf("Failed : %d\n", failed);
+ printf("Not supported : %d\n", unsupported);
+
+ testsuite_teardown();
+
+ if (failed)
+ return -1;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_stats.h b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_stats.h
new file mode 100644
index 000000000..74fcec8a0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_evdev_stats.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_EVDEV_STATS_H__
+#define __OTX2_EVDEV_STATS_H__
+
+#include "otx2_evdev.h"
+
+struct otx2_sso_xstats_name {
+ const char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
+ const size_t offset;
+ const uint64_t mask;
+ const uint8_t shift;
+ uint64_t reset_snap[OTX2_SSO_MAX_VHGRP];
+};
+
+static struct otx2_sso_xstats_name sso_hws_xstats[] = {
+ {"last_grp_serviced", offsetof(struct sso_hws_stats, arbitration),
+ 0x3FF, 0, {0} },
+ {"affinity_arbitration_credits",
+ offsetof(struct sso_hws_stats, arbitration),
+ 0xF, 16, {0} },
+};
+
+static struct otx2_sso_xstats_name sso_grp_xstats[] = {
+ {"wrk_sched", offsetof(struct sso_grp_stats, ws_pc), ~0x0, 0,
+ {0} },
+ {"xaq_dram", offsetof(struct sso_grp_stats, ext_pc), ~0x0,
+ 0, {0} },
+ {"add_wrk", offsetof(struct sso_grp_stats, wa_pc), ~0x0, 0,
+ {0} },
+ {"tag_switch_req", offsetof(struct sso_grp_stats, ts_pc), ~0x0, 0,
+ {0} },
+ {"desched_req", offsetof(struct sso_grp_stats, ds_pc), ~0x0, 0,
+ {0} },
+ {"desched_wrk", offsetof(struct sso_grp_stats, dq_pc), ~0x0, 0,
+ {0} },
+ {"xaq_cached", offsetof(struct sso_grp_stats, aw_status), 0x3,
+ 0, {0} },
+ {"work_inflight", offsetof(struct sso_grp_stats, aw_status), 0x3F,
+ 16, {0} },
+ {"inuse_pages", offsetof(struct sso_grp_stats, page_cnt),
+ 0xFFFFFFFF, 0, {0} },
+};
+
+#define OTX2_SSO_NUM_HWS_XSTATS RTE_DIM(sso_hws_xstats)
+#define OTX2_SSO_NUM_GRP_XSTATS RTE_DIM(sso_grp_xstats)
+
+#define OTX2_SSO_NUM_XSTATS (OTX2_SSO_NUM_HWS_XSTATS + OTX2_SSO_NUM_GRP_XSTATS)
+
+static int
+otx2_sso_xstats_get(const struct rte_eventdev *event_dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_sso_xstats_name *xstats;
+ struct otx2_sso_xstats_name *xstat;
+ struct otx2_mbox *mbox = dev->mbox;
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+ unsigned int i;
+ uint64_t value;
+ void *req_rsp;
+ int rc;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ return 0;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)dev->nb_event_ports)
+ goto invalid_value;
+
+ xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
+ xstats = sso_hws_xstats;
+
+ req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
+ 2 * queue_port_id : queue_port_id;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+
+ if (dev->dual_ws) {
+ for (i = 0; i < n && i < xstats_mode_count; i++) {
+ xstat = &xstats[ids[i] - start_offset];
+ values[i] = *(uint64_t *)
+ ((char *)req_rsp + xstat->offset);
+ values[i] = (values[i] >> xstat->shift) &
+ xstat->mask;
+ }
+
+ req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->hws =
+ (2 * queue_port_id) + 1;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+ }
+
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)dev->nb_event_queues)
+ goto invalid_value;
+
+ xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
+ start_offset = OTX2_SSO_NUM_HWS_XSTATS;
+ xstats = sso_grp_xstats;
+
+ req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->grp = queue_port_id;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+
+ break;
+ default:
+ otx2_err("Invalid mode received");
+ goto invalid_value;
+ };
+
+ for (i = 0; i < n && i < xstats_mode_count; i++) {
+ xstat = &xstats[ids[i] - start_offset];
+ value = *(uint64_t *)((char *)req_rsp + xstat->offset);
+ value = (value >> xstat->shift) & xstat->mask;
+
+ if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
+ values[i] += value;
+ else
+ values[i] = value;
+
+ values[i] -= xstat->reset_snap[queue_port_id];
+ }
+
+ return i;
+invalid_value:
+ return -EINVAL;
+}
+
+static int
+otx2_sso_xstats_reset(struct rte_eventdev *event_dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id, const uint32_t ids[], uint32_t n)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_sso_xstats_name *xstats;
+ struct otx2_sso_xstats_name *xstat;
+ struct otx2_mbox *mbox = dev->mbox;
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+ unsigned int i;
+ uint64_t value;
+ void *req_rsp;
+ int rc;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ return 0;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)dev->nb_event_ports)
+ goto invalid_value;
+
+ xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
+ xstats = sso_hws_xstats;
+
+ req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
+ 2 * queue_port_id : queue_port_id;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+
+ if (dev->dual_ws) {
+ for (i = 0; i < n && i < xstats_mode_count; i++) {
+ xstat = &xstats[ids[i] - start_offset];
+ xstat->reset_snap[queue_port_id] = *(uint64_t *)
+ ((char *)req_rsp + xstat->offset);
+ xstat->reset_snap[queue_port_id] =
+ (xstat->reset_snap[queue_port_id] >>
+ xstat->shift) & xstat->mask;
+ }
+
+ req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->hws =
+ (2 * queue_port_id) + 1;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+ }
+
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)dev->nb_event_queues)
+ goto invalid_value;
+
+ xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
+ start_offset = OTX2_SSO_NUM_HWS_XSTATS;
+ xstats = sso_grp_xstats;
+
+ req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->grp = queue_port_id;
+ rc = otx2_mbox_process_msg(mbox, (void *)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+
+ break;
+ default:
+ otx2_err("Invalid mode received");
+ goto invalid_value;
+ };
+
+ for (i = 0; i < n && i < xstats_mode_count; i++) {
+ xstat = &xstats[ids[i] - start_offset];
+ value = *(uint64_t *)((char *)req_rsp + xstat->offset);
+ value = (value >> xstat->shift) & xstat->mask;
+
+ if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
+ xstat->reset_snap[queue_port_id] += value;
+ else
+ xstat->reset_snap[queue_port_id] = value;
+ }
+ return i;
+invalid_value:
+ return -EINVAL;
+}
+
+static int
+otx2_sso_xstats_get_names(const struct rte_eventdev *event_dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ struct rte_event_dev_xstats_name xstats_names_copy[OTX2_SSO_NUM_XSTATS];
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+ unsigned int xidx = 0;
+ unsigned int i;
+
+ for (i = 0; i < OTX2_SSO_NUM_HWS_XSTATS; i++) {
+ snprintf(xstats_names_copy[i].name,
+ sizeof(xstats_names_copy[i].name), "%s",
+ sso_hws_xstats[i].name);
+ }
+
+ for (; i < OTX2_SSO_NUM_XSTATS; i++) {
+ snprintf(xstats_names_copy[i].name,
+ sizeof(xstats_names_copy[i].name), "%s",
+ sso_grp_xstats[i - OTX2_SSO_NUM_HWS_XSTATS].name);
+ }
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)dev->nb_event_ports)
+ break;
+ xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)dev->nb_event_queues)
+ break;
+ xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
+ start_offset = OTX2_SSO_NUM_HWS_XSTATS;
+ break;
+ default:
+ otx2_err("Invalid mode received");
+ return -EINVAL;
+ };
+
+ if (xstats_mode_count > size || !ids || !xstats_names)
+ return xstats_mode_count;
+
+ for (i = 0; i < xstats_mode_count; i++) {
+ xidx = i + start_offset;
+ strncpy(xstats_names[i].name, xstats_names_copy[xidx].name,
+ sizeof(xstats_names[i].name));
+ ids[i] = xidx;
+ }
+
+ return i;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.c
new file mode 100644
index 000000000..4c24cc8a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -0,0 +1,783 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include "otx2_evdev.h"
+#include "otx2_tim_evdev.h"
+
+static struct rte_event_timer_adapter_ops otx2_tim_ops;
+
+static inline int
+tim_get_msix_offsets(void)
+{
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msix_offset_rsp *msix_rsp;
+ int i, rc;
+
+ /* Get TIM MSIX vector offsets */
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
+
+ for (i = 0; i < dev->nb_rings; i++)
+ dev->tim_msixoff[i] = msix_rsp->timlf_msixoff[i];
+
+ return rc;
+}
+
+static void
+tim_set_fp_ops(struct otx2_tim_ring *tim_ring)
+{
+ uint8_t prod_flag = !tim_ring->prod_type_sp;
+
+ /* [MOD/AND] [DFB/FB] [SP][MP]*/
+ const rte_event_timer_arm_burst_t arm_burst[2][2][2][2] = {
+#define FP(_name, _f4, _f3, _f2, _f1, flags) \
+ [_f4][_f3][_f2][_f1] = otx2_tim_arm_burst_ ## _name,
+TIM_ARM_FASTPATH_MODES
+#undef FP
+ };
+
+ const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2][2] = {
+#define FP(_name, _f3, _f2, _f1, flags) \
+ [_f3][_f2][_f1] = otx2_tim_arm_tmo_tick_burst_ ## _name,
+TIM_ARM_TMO_FASTPATH_MODES
+#undef FP
+ };
+
+ otx2_tim_ops.arm_burst =
+ arm_burst[tim_ring->enable_stats][tim_ring->optimized]
+ [tim_ring->ena_dfb][prod_flag];
+ otx2_tim_ops.arm_tmo_tick_burst =
+ arm_tmo_burst[tim_ring->enable_stats][tim_ring->optimized]
+ [tim_ring->ena_dfb];
+ otx2_tim_ops.cancel_burst = otx2_tim_timer_cancel_burst;
+}
+
+static void
+otx2_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer_adapter_info *adptr_info)
+{
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+
+ adptr_info->max_tmo_ns = tim_ring->max_tout;
+ adptr_info->min_resolution_ns = tim_ring->tck_nsec;
+ rte_memcpy(&adptr_info->conf, &adptr->data->conf,
+ sizeof(struct rte_event_timer_adapter_conf));
+}
+
+static void
+tim_optimze_bkt_param(struct otx2_tim_ring *tim_ring)
+{
+ uint64_t tck_nsec;
+ uint32_t hbkts;
+ uint32_t lbkts;
+
+ hbkts = rte_align32pow2(tim_ring->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL(tim_ring->max_tout / (hbkts - 1), 10);
+
+ if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
+ tim_ring->tenns_clk_freq) ||
+ hbkts > OTX2_TIM_MAX_BUCKETS))
+ hbkts = 0;
+
+ lbkts = rte_align32prevpow2(tim_ring->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout / (lbkts - 1)), 10);
+
+ if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
+ tim_ring->tenns_clk_freq) ||
+ lbkts > OTX2_TIM_MAX_BUCKETS))
+ lbkts = 0;
+
+ if (!hbkts && !lbkts)
+ return;
+
+ if (!hbkts) {
+ tim_ring->nb_bkts = lbkts;
+ goto end;
+ } else if (!lbkts) {
+ tim_ring->nb_bkts = hbkts;
+ goto end;
+ }
+
+ tim_ring->nb_bkts = (hbkts - tim_ring->nb_bkts) <
+ (tim_ring->nb_bkts - lbkts) ? hbkts : lbkts;
+end:
+ tim_ring->optimized = true;
+ tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout /
+ (tim_ring->nb_bkts - 1)), 10);
+ otx2_tim_dbg("Optimized configured values");
+ otx2_tim_dbg("Nb_bkts : %" PRIu32 "", tim_ring->nb_bkts);
+ otx2_tim_dbg("Tck_nsec : %" PRIu64 "", tim_ring->tck_nsec);
+}
+
+static int
+tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
+ struct rte_event_timer_adapter_conf *rcfg)
+{
+ unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
+ unsigned int mp_flags = 0;
+ char pool_name[25];
+ int rc;
+
+ cache_sz /= rte_lcore_count();
+ /* Create chunk pool. */
+ if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
+ mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ otx2_tim_dbg("Using single producer mode");
+ tim_ring->prod_type_sp = true;
+ }
+
+ snprintf(pool_name, sizeof(pool_name), "otx2_tim_chunk_pool%d",
+ tim_ring->ring_id);
+
+ if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
+ cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
+
+ if (!tim_ring->disable_npa) {
+ tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
+ tim_ring->nb_chunks, tim_ring->chunk_sz,
+ cache_sz, 0, rte_socket_id(), mp_flags);
+
+ if (tim_ring->chunk_pool == NULL) {
+ otx2_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+
+ rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
+ rte_mbuf_platform_mempool_ops(),
+ NULL);
+ if (rc < 0) {
+ otx2_err("Unable to set chunkpool ops");
+ goto free;
+ }
+
+ rc = rte_mempool_populate_default(tim_ring->chunk_pool);
+ if (rc < 0) {
+ otx2_err("Unable to set populate chunkpool.");
+ goto free;
+ }
+ tim_ring->aura = npa_lf_aura_handle_to_aura(
+ tim_ring->chunk_pool->pool_id);
+ tim_ring->ena_dfb = 0;
+ } else {
+ tim_ring->chunk_pool = rte_mempool_create(pool_name,
+ tim_ring->nb_chunks, tim_ring->chunk_sz,
+ cache_sz, 0, NULL, NULL, NULL, NULL,
+ rte_socket_id(),
+ mp_flags);
+ if (tim_ring->chunk_pool == NULL) {
+ otx2_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+ tim_ring->ena_dfb = 1;
+ }
+
+ return 0;
+
+free:
+ rte_mempool_free(tim_ring->chunk_pool);
+ return rc;
+}
+
+static void
+tim_err_desc(int rc)
+{
+ switch (rc) {
+ case TIM_AF_NO_RINGS_LEFT:
+ otx2_err("Unable to allocat new TIM ring.");
+ break;
+ case TIM_AF_INVALID_NPA_PF_FUNC:
+ otx2_err("Invalid NPA pf func.");
+ break;
+ case TIM_AF_INVALID_SSO_PF_FUNC:
+ otx2_err("Invalid SSO pf func.");
+ break;
+ case TIM_AF_RING_STILL_RUNNING:
+ otx2_tim_dbg("Ring busy.");
+ break;
+ case TIM_AF_LF_INVALID:
+ otx2_err("Invalid Ring id.");
+ break;
+ case TIM_AF_CSIZE_NOT_ALIGNED:
+ otx2_err("Chunk size specified needs to be multiple of 16.");
+ break;
+ case TIM_AF_CSIZE_TOO_SMALL:
+ otx2_err("Chunk size too small.");
+ break;
+ case TIM_AF_CSIZE_TOO_BIG:
+ otx2_err("Chunk size too big.");
+ break;
+ case TIM_AF_INTERVAL_TOO_SMALL:
+ otx2_err("Bucket traversal interval too small.");
+ break;
+ case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
+ otx2_err("Invalid Big endian value.");
+ break;
+ case TIM_AF_INVALID_CLOCK_SOURCE:
+ otx2_err("Invalid Clock source specified.");
+ break;
+ case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
+ otx2_err("GPIO clock source not enabled.");
+ break;
+ case TIM_AF_INVALID_BSIZE:
+ otx2_err("Invalid bucket size.");
+ break;
+ case TIM_AF_INVALID_ENABLE_PERIODIC:
+ otx2_err("Invalid bucket size.");
+ break;
+ case TIM_AF_INVALID_ENABLE_DONTFREE:
+ otx2_err("Invalid Don't free value.");
+ break;
+ case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
+ otx2_err("Don't free bit not set when periodic is enabled.");
+ break;
+ case TIM_AF_RING_ALREADY_DISABLED:
+ otx2_err("Ring already stopped");
+ break;
+ default:
+ otx2_err("Unknown Error.");
+ }
+}
+
+static int
+otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
+{
+ struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct otx2_tim_ring *tim_ring;
+ struct tim_config_req *cfg_req;
+ struct tim_ring_req *free_req;
+ struct tim_lf_alloc_req *req;
+ struct tim_lf_alloc_rsp *rsp;
+ int i, rc;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (adptr->data->id >= dev->nb_rings)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_tim_lf_alloc(dev->mbox);
+ req->npa_pf_func = otx2_npa_pf_func_get();
+ req->sso_pf_func = otx2_sso_pf_func_get();
+ req->ring = adptr->data->id;
+
+ rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
+ if (rc < 0) {
+ tim_err_desc(rc);
+ return -ENODEV;
+ }
+
+ if (NSEC2TICK(RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10),
+ rsp->tenns_clk) < OTX2_TIM_MIN_TMO_TKS) {
+ if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
+ rcfg->timer_tick_ns = TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
+ rsp->tenns_clk);
+ else {
+ rc = -ERANGE;
+ goto rng_mem_err;
+ }
+ }
+
+ tim_ring = rte_zmalloc("otx2_tim_prv", sizeof(struct otx2_tim_ring), 0);
+ if (tim_ring == NULL) {
+ rc = -ENOMEM;
+ goto rng_mem_err;
+ }
+
+ adptr->data->adapter_priv = tim_ring;
+
+ tim_ring->tenns_clk_freq = rsp->tenns_clk;
+ tim_ring->clk_src = (int)rcfg->clk_src;
+ tim_ring->ring_id = adptr->data->id;
+ tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
+ tim_ring->max_tout = rcfg->max_tmo_ns;
+ tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
+ tim_ring->chunk_sz = dev->chunk_sz;
+ tim_ring->nb_timers = rcfg->nb_timers;
+ tim_ring->disable_npa = dev->disable_npa;
+ tim_ring->enable_stats = dev->enable_stats;
+
+ for (i = 0; i < dev->ring_ctl_cnt ; i++) {
+ struct otx2_tim_ctl *ring_ctl = &dev->ring_ctl_data[i];
+
+ if (ring_ctl->ring == tim_ring->ring_id) {
+ tim_ring->chunk_sz = ring_ctl->chunk_slots ?
+ ((uint32_t)(ring_ctl->chunk_slots + 1) *
+ OTX2_TIM_CHUNK_ALIGNMENT) : tim_ring->chunk_sz;
+ tim_ring->enable_stats = ring_ctl->enable_stats;
+ tim_ring->disable_npa = ring_ctl->disable_npa;
+ }
+ }
+
+ tim_ring->nb_chunks = tim_ring->nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
+ tim_ring->chunk_sz);
+ tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
+
+ /* Try to optimize the bucket parameters. */
+ if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) {
+ if (rte_is_power_of_2(tim_ring->nb_bkts))
+ tim_ring->optimized = true;
+ else
+ tim_optimze_bkt_param(tim_ring);
+ }
+
+ if (tim_ring->disable_npa)
+ tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
+ else
+ tim_ring->nb_chunks = tim_ring->nb_chunks + tim_ring->nb_bkts;
+
+ /* Create buckets. */
+ tim_ring->bkt = rte_zmalloc("otx2_tim_bucket", (tim_ring->nb_bkts) *
+ sizeof(struct otx2_tim_bkt),
+ RTE_CACHE_LINE_SIZE);
+ if (tim_ring->bkt == NULL)
+ goto bkt_mem_err;
+
+ rc = tim_chnk_pool_create(tim_ring, rcfg);
+ if (rc < 0)
+ goto chnk_mem_err;
+
+ cfg_req = otx2_mbox_alloc_msg_tim_config_ring(dev->mbox);
+
+ cfg_req->ring = tim_ring->ring_id;
+ cfg_req->bigendian = false;
+ cfg_req->clocksource = tim_ring->clk_src;
+ cfg_req->enableperiodic = false;
+ cfg_req->enabledontfreebuffer = tim_ring->ena_dfb;
+ cfg_req->bucketsize = tim_ring->nb_bkts;
+ cfg_req->chunksize = tim_ring->chunk_sz;
+ cfg_req->interval = NSEC2TICK(tim_ring->tck_nsec,
+ tim_ring->tenns_clk_freq);
+
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc < 0) {
+ tim_err_desc(rc);
+ goto chnk_mem_err;
+ }
+
+ tim_ring->base = dev->bar2 +
+ (RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
+
+ rc = tim_register_irq(tim_ring->ring_id);
+ if (rc < 0)
+ goto chnk_mem_err;
+
+ otx2_write64((uint64_t)tim_ring->bkt,
+ tim_ring->base + TIM_LF_RING_BASE);
+ otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
+
+ /* Set fastpath ops. */
+ tim_set_fp_ops(tim_ring);
+
+ /* Update SSO xae count. */
+ sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)tim_ring,
+ RTE_EVENT_TYPE_TIMER);
+ sso_xae_reconfigure(dev->event_dev);
+
+ otx2_tim_dbg("Total memory used %"PRIu64"MB\n",
+ (uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz)
+ + (tim_ring->nb_bkts * sizeof(struct otx2_tim_bkt))) /
+ BIT_ULL(20)));
+
+ return rc;
+
+chnk_mem_err:
+ rte_free(tim_ring->bkt);
+bkt_mem_err:
+ rte_free(tim_ring);
+rng_mem_err:
+ free_req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
+ free_req->ring = adptr->data->id;
+ otx2_mbox_process(dev->mbox);
+ return rc;
+}
+
+static void
+otx2_tim_calibrate_start_tsc(struct otx2_tim_ring *tim_ring)
+{
+#define OTX2_TIM_CALIB_ITER 1E6
+ uint32_t real_bkt, bucket;
+ int icount, ecount = 0;
+ uint64_t bkt_cyc;
+
+ for (icount = 0; icount < OTX2_TIM_CALIB_ITER; icount++) {
+ real_bkt = otx2_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
+ bkt_cyc = rte_rdtsc();
+ bucket = (bkt_cyc - tim_ring->ring_start_cyc) /
+ tim_ring->tck_int;
+ bucket = bucket % (tim_ring->nb_bkts);
+ tim_ring->ring_start_cyc = bkt_cyc - (real_bkt *
+ tim_ring->tck_int);
+ if (bucket != real_bkt)
+ ecount++;
+ }
+ tim_ring->last_updt_cyc = bkt_cyc;
+ otx2_tim_dbg("Bucket mispredict %3.2f distance %d\n",
+ 100 - (((double)(icount - ecount) / (double)icount) * 100),
+ bucket - real_bkt);
+}
+
+static int
+otx2_tim_ring_start(const struct rte_event_timer_adapter *adptr)
+{
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct tim_enable_rsp *rsp;
+ struct tim_ring_req *req;
+ int rc;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_tim_enable_ring(dev->mbox);
+ req->ring = tim_ring->ring_id;
+
+ rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
+ if (rc < 0) {
+ tim_err_desc(rc);
+ goto fail;
+ }
+#ifdef RTE_ARM_EAL_RDTSC_USE_PMU
+ uint64_t tenns_stmp, tenns_diff;
+ uint64_t pmu_stmp;
+
+ pmu_stmp = rte_rdtsc();
+ asm volatile("mrs %0, cntvct_el0" : "=r" (tenns_stmp));
+
+ tenns_diff = tenns_stmp - rsp->timestarted;
+ pmu_stmp = pmu_stmp - (NSEC2TICK(tenns_diff * 10, rte_get_timer_hz()));
+ tim_ring->ring_start_cyc = pmu_stmp;
+#else
+ tim_ring->ring_start_cyc = rsp->timestarted;
+#endif
+ tim_ring->tck_int = NSEC2TICK(tim_ring->tck_nsec, rte_get_timer_hz());
+ tim_ring->tot_int = tim_ring->tck_int * tim_ring->nb_bkts;
+ tim_ring->fast_div = rte_reciprocal_value_u64(tim_ring->tck_int);
+
+ otx2_tim_calibrate_start_tsc(tim_ring);
+
+fail:
+ return rc;
+}
+
+static int
+otx2_tim_ring_stop(const struct rte_event_timer_adapter *adptr)
+{
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct tim_ring_req *req;
+ int rc;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_tim_disable_ring(dev->mbox);
+ req->ring = tim_ring->ring_id;
+
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc < 0) {
+ tim_err_desc(rc);
+ rc = -EBUSY;
+ }
+
+ return rc;
+}
+
+static int
+otx2_tim_ring_free(struct rte_event_timer_adapter *adptr)
+{
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct tim_ring_req *req;
+ int rc;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ tim_unregister_irq(tim_ring->ring_id);
+
+ req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
+ req->ring = tim_ring->ring_id;
+
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc < 0) {
+ tim_err_desc(rc);
+ return -EBUSY;
+ }
+
+ rte_free(tim_ring->bkt);
+ rte_mempool_free(tim_ring->chunk_pool);
+ rte_free(adptr->data->adapter_priv);
+
+ return 0;
+}
+
+static int
+otx2_tim_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
+ uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
+
+
+ stats->evtim_exp_count = __atomic_load_n(&tim_ring->arm_cnt,
+ __ATOMIC_RELAXED);
+ stats->ev_enq_count = stats->evtim_exp_count;
+ stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
+ &tim_ring->fast_div);
+ return 0;
+}
+
+static int
+otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
+{
+ struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
+
+ __atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED);
+ return 0;
+}
+
+int
+otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops)
+{
+ struct otx2_tim_evdev *dev = tim_priv_get();
+
+ RTE_SET_USED(flags);
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ otx2_tim_ops.init = otx2_tim_ring_create;
+ otx2_tim_ops.uninit = otx2_tim_ring_free;
+ otx2_tim_ops.start = otx2_tim_ring_start;
+ otx2_tim_ops.stop = otx2_tim_ring_stop;
+ otx2_tim_ops.get_info = otx2_tim_ring_info_get;
+
+ if (dev->enable_stats) {
+ otx2_tim_ops.stats_get = otx2_tim_stats_get;
+ otx2_tim_ops.stats_reset = otx2_tim_stats_reset;
+ }
+
+ /* Store evdev pointer for later use. */
+ dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
+ *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
+ *ops = &otx2_tim_ops;
+
+ return 0;
+}
+
+#define OTX2_TIM_DISABLE_NPA "tim_disable_npa"
+#define OTX2_TIM_CHNK_SLOTS "tim_chnk_slots"
+#define OTX2_TIM_STATS_ENA "tim_stats_ena"
+#define OTX2_TIM_RINGS_LMT "tim_rings_lmt"
+#define OTX2_TIM_RING_CTL "tim_ring_ctl"
+
+static void
+tim_parse_ring_param(char *value, void *opaque)
+{
+ struct otx2_tim_evdev *dev = opaque;
+ struct otx2_tim_ctl ring_ctl = {0};
+ char *tok = strtok(value, "-");
+ struct otx2_tim_ctl *old_ptr;
+ uint16_t *val;
+
+ val = (uint16_t *)&ring_ctl;
+
+ if (!strlen(value))
+ return;
+
+ while (tok != NULL) {
+ *val = atoi(tok);
+ tok = strtok(NULL, "-");
+ val++;
+ }
+
+ if (val != (&ring_ctl.enable_stats + 1)) {
+ otx2_err(
+ "Invalid ring param expected [ring-chunk_sz-disable_npa-enable_stats]");
+ return;
+ }
+
+ dev->ring_ctl_cnt++;
+ old_ptr = dev->ring_ctl_data;
+ dev->ring_ctl_data = rte_realloc(dev->ring_ctl_data,
+ sizeof(struct otx2_tim_ctl) *
+ dev->ring_ctl_cnt, 0);
+ if (dev->ring_ctl_data == NULL) {
+ dev->ring_ctl_data = old_ptr;
+ dev->ring_ctl_cnt--;
+ return;
+ }
+
+ dev->ring_ctl_data[dev->ring_ctl_cnt - 1] = ring_ctl;
+}
+
+static void
+tim_parse_ring_ctl_list(const char *value, void *opaque)
+{
+ char *s = strdup(value);
+ char *start = NULL;
+ char *end = NULL;
+ char *f = s;
+
+ while (*s) {
+ if (*s == '[')
+ start = s;
+ else if (*s == ']')
+ end = s;
+
+ if (start && start < end) {
+ *end = 0;
+ tim_parse_ring_param(start + 1, opaque);
+ start = end;
+ s = end;
+ }
+ s++;
+ }
+
+ free(f);
+}
+
+static int
+tim_parse_kvargs_dict(const char *key, const char *value, void *opaque)
+{
+ RTE_SET_USED(key);
+
+ /* Dict format [ring-chunk_sz-disable_npa-enable_stats] use '-' as ','
+ * isn't allowed. 0 represents default.
+ */
+ tim_parse_ring_ctl_list(value, opaque);
+
+ return 0;
+}
+
+static void
+tim_parse_devargs(struct rte_devargs *devargs, struct otx2_tim_evdev *dev)
+{
+ struct rte_kvargs *kvlist;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_TIM_DISABLE_NPA,
+ &parse_kvargs_flag, &dev->disable_npa);
+ rte_kvargs_process(kvlist, OTX2_TIM_CHNK_SLOTS,
+ &parse_kvargs_value, &dev->chunk_slots);
+ rte_kvargs_process(kvlist, OTX2_TIM_STATS_ENA, &parse_kvargs_flag,
+ &dev->enable_stats);
+ rte_kvargs_process(kvlist, OTX2_TIM_RINGS_LMT, &parse_kvargs_value,
+ &dev->min_ring_cnt);
+ rte_kvargs_process(kvlist, OTX2_TIM_RING_CTL,
+ &tim_parse_kvargs_dict, &dev);
+
+ rte_kvargs_free(kvlist);
+}
+
+void
+otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
+{
+ struct rsrc_attach_req *atch_req;
+ struct rsrc_detach_req *dtch_req;
+ struct free_rsrcs_rsp *rsrc_cnt;
+ const struct rte_memzone *mz;
+ struct otx2_tim_evdev *dev;
+ int rc;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ mz = rte_memzone_reserve(RTE_STR(OTX2_TIM_EVDEV_NAME),
+ sizeof(struct otx2_tim_evdev),
+ rte_socket_id(), 0);
+ if (mz == NULL) {
+ otx2_tim_dbg("Unable to allocate memory for TIM Event device");
+ return;
+ }
+
+ dev = mz->addr;
+ dev->pci_dev = pci_dev;
+ dev->mbox = cmn_dev->mbox;
+ dev->bar2 = cmn_dev->bar2;
+
+ tim_parse_devargs(pci_dev->device.devargs, dev);
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
+ rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
+ if (rc < 0) {
+ otx2_err("Unable to get free rsrc count.");
+ goto mz_free;
+ }
+
+ dev->nb_rings = dev->min_ring_cnt ?
+ RTE_MIN(dev->min_ring_cnt, rsrc_cnt->tim) : rsrc_cnt->tim;
+
+ if (!dev->nb_rings) {
+ otx2_tim_dbg("No TIM Logical functions provisioned.");
+ goto mz_free;
+ }
+
+ atch_req = otx2_mbox_alloc_msg_attach_resources(dev->mbox);
+ atch_req->modify = true;
+ atch_req->timlfs = dev->nb_rings;
+
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc < 0) {
+ otx2_err("Unable to attach TIM rings.");
+ goto mz_free;
+ }
+
+ rc = tim_get_msix_offsets();
+ if (rc < 0) {
+ otx2_err("Unable to get MSIX offsets for TIM.");
+ goto detach;
+ }
+
+ if (dev->chunk_slots &&
+ dev->chunk_slots <= OTX2_TIM_MAX_CHUNK_SLOTS &&
+ dev->chunk_slots >= OTX2_TIM_MIN_CHUNK_SLOTS) {
+ dev->chunk_sz = (dev->chunk_slots + 1) *
+ OTX2_TIM_CHUNK_ALIGNMENT;
+ } else {
+ dev->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
+ }
+
+ return;
+
+detach:
+ dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
+ dtch_req->partial = true;
+ dtch_req->timlfs = true;
+
+ otx2_mbox_process(dev->mbox);
+mz_free:
+ rte_memzone_free(mz);
+}
+
+void
+otx2_tim_fini(void)
+{
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct rsrc_detach_req *dtch_req;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
+ dtch_req->partial = true;
+ dtch_req->timlfs = true;
+
+ otx2_mbox_process(dev->mbox);
+ rte_memzone_free(rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME)));
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.h b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.h
new file mode 100644
index 000000000..44e3c7b51
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_TIM_EVDEV_H__
+#define __OTX2_TIM_EVDEV_H__
+
+#include <rte_event_timer_adapter.h>
+#include <rte_event_timer_adapter_pmd.h>
+#include <rte_reciprocal.h>
+
+#include "otx2_dev.h"
+
+#define OTX2_TIM_EVDEV_NAME otx2_tim_eventdev
+
+#define otx2_tim_func_trace otx2_tim_dbg
+
+#define TIM_LF_RING_AURA (0x0)
+#define TIM_LF_RING_BASE (0x130)
+#define TIM_LF_NRSPERR_INT (0x200)
+#define TIM_LF_NRSPERR_INT_W1S (0x208)
+#define TIM_LF_NRSPERR_INT_ENA_W1S (0x210)
+#define TIM_LF_NRSPERR_INT_ENA_W1C (0x218)
+#define TIM_LF_RAS_INT (0x300)
+#define TIM_LF_RAS_INT_W1S (0x308)
+#define TIM_LF_RAS_INT_ENA_W1S (0x310)
+#define TIM_LF_RAS_INT_ENA_W1C (0x318)
+#define TIM_LF_RING_REL (0x400)
+
+#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
+#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ULL << (64 - \
+ TIM_BUCKET_W1_S_CHUNK_REMAINDER)) - 1)
+#define TIM_BUCKET_W1_S_LOCK (40)
+#define TIM_BUCKET_W1_M_LOCK ((1ULL << \
+ (TIM_BUCKET_W1_S_CHUNK_REMAINDER - \
+ TIM_BUCKET_W1_S_LOCK)) - 1)
+#define TIM_BUCKET_W1_S_RSVD (35)
+#define TIM_BUCKET_W1_S_BSK (34)
+#define TIM_BUCKET_W1_M_BSK ((1ULL << \
+ (TIM_BUCKET_W1_S_RSVD - \
+ TIM_BUCKET_W1_S_BSK)) - 1)
+#define TIM_BUCKET_W1_S_HBT (33)
+#define TIM_BUCKET_W1_M_HBT ((1ULL << \
+ (TIM_BUCKET_W1_S_BSK - \
+ TIM_BUCKET_W1_S_HBT)) - 1)
+#define TIM_BUCKET_W1_S_SBT (32)
+#define TIM_BUCKET_W1_M_SBT ((1ULL << \
+ (TIM_BUCKET_W1_S_HBT - \
+ TIM_BUCKET_W1_S_SBT)) - 1)
+#define TIM_BUCKET_W1_S_NUM_ENTRIES (0)
+#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ULL << \
+ (TIM_BUCKET_W1_S_SBT - \
+ TIM_BUCKET_W1_S_NUM_ENTRIES)) - 1)
+
+#define TIM_BUCKET_SEMA (TIM_BUCKET_CHUNK_REMAIN)
+
+#define TIM_BUCKET_CHUNK_REMAIN \
+ (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER)
+
+#define TIM_BUCKET_LOCK \
+ (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK)
+
+#define TIM_BUCKET_SEMA_WLOCK \
+ (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+
+#define OTX2_MAX_TIM_RINGS (256)
+#define OTX2_TIM_MAX_BUCKETS (0xFFFFF)
+#define OTX2_TIM_RING_DEF_CHUNK_SZ (4096)
+#define OTX2_TIM_CHUNK_ALIGNMENT (16)
+#define OTX2_TIM_MAX_BURST (RTE_CACHE_LINE_SIZE / \
+ OTX2_TIM_CHUNK_ALIGNMENT)
+#define OTX2_TIM_NB_CHUNK_SLOTS(sz) (((sz) / OTX2_TIM_CHUNK_ALIGNMENT) - 1)
+#define OTX2_TIM_MIN_CHUNK_SLOTS (0x1)
+#define OTX2_TIM_MAX_CHUNK_SLOTS (0x1FFE)
+#define OTX2_TIM_MIN_TMO_TKS (256)
+
+#define OTX2_TIM_SP 0x1
+#define OTX2_TIM_MP 0x2
+#define OTX2_TIM_BKT_AND 0x4
+#define OTX2_TIM_BKT_MOD 0x8
+#define OTX2_TIM_ENA_FB 0x10
+#define OTX2_TIM_ENA_DFB 0x20
+#define OTX2_TIM_ENA_STATS 0x40
+
+enum otx2_tim_clk_src {
+ OTX2_TIM_CLK_SRC_10NS = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ OTX2_TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ OTX2_TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ OTX2_TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+};
+
+struct otx2_tim_bkt {
+ uint64_t first_chunk;
+ union {
+ uint64_t w1;
+ struct {
+ uint32_t nb_entry;
+ uint8_t sbt:1;
+ uint8_t hbt:1;
+ uint8_t bsk:1;
+ uint8_t rsvd:5;
+ uint8_t lock;
+ int16_t chunk_remainder;
+ };
+ };
+ uint64_t current_chunk;
+ uint64_t pad;
+} __rte_packed __rte_aligned(32);
+
+struct otx2_tim_ent {
+ uint64_t w0;
+ uint64_t wqe;
+} __rte_packed;
+
+struct otx2_tim_ctl {
+ uint16_t ring;
+ uint16_t chunk_slots;
+ uint16_t disable_npa;
+ uint16_t enable_stats;
+};
+
+struct otx2_tim_evdev {
+ struct rte_pci_device *pci_dev;
+ struct rte_eventdev *event_dev;
+ struct otx2_mbox *mbox;
+ uint16_t nb_rings;
+ uint32_t chunk_sz;
+ uintptr_t bar2;
+ /* Dev args */
+ uint8_t disable_npa;
+ uint16_t chunk_slots;
+ uint16_t min_ring_cnt;
+ uint8_t enable_stats;
+ uint16_t ring_ctl_cnt;
+ struct otx2_tim_ctl *ring_ctl_data;
+ /* HW const */
+ /* MSIX offsets */
+ uint16_t tim_msixoff[OTX2_MAX_TIM_RINGS];
+};
+
+struct otx2_tim_ring {
+ uintptr_t base;
+ uint16_t nb_chunk_slots;
+ uint32_t nb_bkts;
+ uint64_t last_updt_cyc;
+ uint64_t ring_start_cyc;
+ uint64_t tck_int;
+ uint64_t tot_int;
+ struct otx2_tim_bkt *bkt;
+ struct rte_mempool *chunk_pool;
+ struct rte_reciprocal_u64 fast_div;
+ uint64_t arm_cnt;
+ uint8_t prod_type_sp;
+ uint8_t enable_stats;
+ uint8_t disable_npa;
+ uint8_t optimized;
+ uint8_t ena_dfb;
+ uint16_t ring_id;
+ uint32_t aura;
+ uint64_t nb_timers;
+ uint64_t tck_nsec;
+ uint64_t max_tout;
+ uint64_t nb_chunks;
+ uint64_t chunk_sz;
+ uint64_t tenns_clk_freq;
+ enum otx2_tim_clk_src clk_src;
+} __rte_cache_aligned;
+
+static inline struct otx2_tim_evdev *
+tim_priv_get(void)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME));
+ if (mz == NULL)
+ return NULL;
+
+ return mz->addr;
+}
+
+#define TIM_ARM_FASTPATH_MODES \
+FP(mod_sp, 0, 0, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
+FP(mod_mp, 0, 0, 0, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
+FP(mod_fb_sp, 0, 0, 1, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
+FP(mod_fb_mp, 0, 0, 1, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB | OTX2_TIM_MP) \
+FP(and_sp, 0, 1, 0, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
+FP(and_mp, 0, 1, 0, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
+FP(and_fb_sp, 0, 1, 1, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
+FP(and_fb_mp, 0, 1, 1, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB | OTX2_TIM_MP) \
+FP(stats_mod_sp, 1, 0, 0, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_MOD | \
+ OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
+FP(stats_mod_mp, 1, 0, 0, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_MOD | \
+ OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
+FP(stats_mod_fb_sp, 1, 0, 1, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_MOD | \
+ OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
+FP(stats_mod_fb_mp, 1, 0, 1, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_MOD | \
+ OTX2_TIM_ENA_FB | OTX2_TIM_MP) \
+FP(stats_and_sp, 1, 1, 0, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_AND | \
+ OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
+FP(stats_and_mp, 1, 1, 0, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_AND | \
+ OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
+FP(stats_and_fb_sp, 1, 1, 1, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_AND | \
+ OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
+FP(stats_and_fb_mp, 1, 1, 1, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_AND | \
+ OTX2_TIM_ENA_FB | OTX2_TIM_MP)
+
+#define TIM_ARM_TMO_FASTPATH_MODES \
+FP(mod, 0, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB) \
+FP(mod_fb, 0, 0, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB) \
+FP(and, 0, 1, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB) \
+FP(and_fb, 0, 1, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB) \
+FP(stats_mod, 1, 0, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_MOD | \
+ OTX2_TIM_ENA_DFB) \
+FP(stats_mod_fb, 1, 0, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_MOD | \
+ OTX2_TIM_ENA_FB) \
+FP(stats_and, 1, 1, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_AND | \
+ OTX2_TIM_ENA_DFB) \
+FP(stats_and_fb, 1, 1, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_BKT_AND | \
+ OTX2_TIM_ENA_FB)
+
+#define FP(_name, _f4, _f3, _f2, _f1, flags) \
+uint16_t \
+otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
+ struct rte_event_timer **tim, \
+ const uint16_t nb_timers);
+TIM_ARM_FASTPATH_MODES
+#undef FP
+
+#define FP(_name, _f3, _f2, _f1, flags) \
+uint16_t \
+otx2_tim_arm_tmo_tick_burst_ ## _name( \
+ const struct rte_event_timer_adapter *adptr, \
+ struct rte_event_timer **tim, \
+ const uint64_t timeout_tick, const uint16_t nb_timers);
+TIM_ARM_TMO_FASTPATH_MODES
+#undef FP
+
+uint16_t otx2_tim_timer_cancel_burst(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+
+int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops);
+
+void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
+void otx2_tim_fini(void);
+
+/* TIM IRQ */
+int tim_register_irq(uint16_t ring_id);
+void tim_unregister_irq(uint16_t ring_id);
+
+#endif /* __OTX2_TIM_EVDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.c
new file mode 100644
index 000000000..4b5cfdc72
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.c
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_tim_evdev.h"
+#include "otx2_tim_worker.h"
+
+static inline int
+tim_arm_checks(const struct otx2_tim_ring * const tim_ring,
+ struct rte_event_timer * const tim)
+{
+ if (unlikely(tim->state)) {
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EALREADY;
+ goto fail;
+ }
+
+ if (unlikely(!tim->timeout_ticks ||
+ tim->timeout_ticks >= tim_ring->nb_bkts)) {
+ tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
+ : RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+static inline void
+tim_format_event(const struct rte_event_timer * const tim,
+ struct otx2_tim_ent * const entry)
+{
+ entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
+ (tim->ev.event & 0xFFFFFFFFF);
+ entry->wqe = tim->ev.u64;
+}
+
+static inline void
+tim_sync_start_cyc(struct otx2_tim_ring *tim_ring)
+{
+ uint64_t cur_cyc = rte_rdtsc();
+ uint32_t real_bkt;
+
+ if (cur_cyc - tim_ring->last_updt_cyc > tim_ring->tot_int) {
+ real_bkt = otx2_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
+ cur_cyc = rte_rdtsc();
+
+ tim_ring->ring_start_cyc = cur_cyc -
+ (real_bkt * tim_ring->tck_int);
+ tim_ring->last_updt_cyc = cur_cyc;
+ }
+
+}
+
+static __rte_always_inline uint16_t
+tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint16_t nb_timers,
+ const uint8_t flags)
+{
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ struct otx2_tim_ent entry;
+ uint16_t index;
+ int ret;
+
+ tim_sync_start_cyc(tim_ring);
+ for (index = 0; index < nb_timers; index++) {
+ if (tim_arm_checks(tim_ring, tim[index]))
+ break;
+
+ tim_format_event(tim[index], &entry);
+ if (flags & OTX2_TIM_SP)
+ ret = tim_add_entry_sp(tim_ring,
+ tim[index]->timeout_ticks,
+ tim[index], &entry, flags);
+ if (flags & OTX2_TIM_MP)
+ ret = tim_add_entry_mp(tim_ring,
+ tim[index]->timeout_ticks,
+ tim[index], &entry, flags);
+
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ if (flags & OTX2_TIM_ENA_STATS)
+ __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
+
+ return index;
+}
+
+static __rte_always_inline uint16_t
+tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint64_t timeout_tick,
+ const uint16_t nb_timers, const uint8_t flags)
+{
+ struct otx2_tim_ent entry[OTX2_TIM_MAX_BURST] __rte_cache_aligned;
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ uint16_t set_timers = 0;
+ uint16_t arr_idx = 0;
+ uint16_t idx;
+ int ret;
+
+ if (unlikely(!timeout_tick || timeout_tick >= tim_ring->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ tim_sync_start_cyc(tim_ring);
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < OTX2_TIM_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ tim_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = tim_add_entry_brst(tim_ring, timeout_tick,
+ &tim[set_timers], entry, idx, flags);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+ if (flags & OTX2_TIM_ENA_STATS)
+ __atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
+ __ATOMIC_RELAXED);
+
+ return set_timers;
+}
+
+#define FP(_name, _f4, _f3, _f2, _f1, _flags) \
+uint16_t __rte_noinline \
+otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
+ struct rte_event_timer **tim, \
+ const uint16_t nb_timers) \
+{ \
+ return tim_timer_arm_burst(adptr, tim, nb_timers, _flags); \
+}
+TIM_ARM_FASTPATH_MODES
+#undef FP
+
+#define FP(_name, _f3, _f2, _f1, _flags) \
+uint16_t __rte_noinline \
+otx2_tim_arm_tmo_tick_burst_ ## _name( \
+ const struct rte_event_timer_adapter *adptr, \
+ struct rte_event_timer **tim, \
+ const uint64_t timeout_tick, \
+ const uint16_t nb_timers) \
+{ \
+ return tim_timer_arm_tmo_brst(adptr, tim, timeout_tick, \
+ nb_timers, _flags); \
+}
+TIM_ARM_TMO_FASTPATH_MODES
+#undef FP
+
+uint16_t
+otx2_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint16_t nb_timers)
+{
+ uint16_t index;
+ int ret;
+
+ RTE_SET_USED(adptr);
+ for (index = 0; index < nb_timers; index++) {
+ if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ }
+
+ if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+ ret = tim_rm_entry(tim[index]);
+ if (ret) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.h b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.h
new file mode 100644
index 000000000..af2f864d7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_tim_worker.h
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_TIM_WORKER_H__
+#define __OTX2_TIM_WORKER_H__
+
+#include "otx2_tim_evdev.h"
+
+static inline uint8_t
+tim_bkt_fetch_lock(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_LOCK) &
+ TIM_BUCKET_W1_M_LOCK;
+}
+
+static inline int16_t
+tim_bkt_fetch_rem(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
+ TIM_BUCKET_W1_M_CHUNK_REMAINDER;
+}
+
+static inline int16_t
+tim_bkt_get_rem(struct otx2_tim_bkt *bktp)
+{
+ return __atomic_load_n(&bktp->chunk_remainder, __ATOMIC_ACQUIRE);
+}
+
+static inline void
+tim_bkt_set_rem(struct otx2_tim_bkt *bktp, uint16_t v)
+{
+ __atomic_store_n(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
+}
+
+static inline void
+tim_bkt_sub_rem(struct otx2_tim_bkt *bktp, uint16_t v)
+{
+ __atomic_fetch_sub(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
+}
+
+static inline uint8_t
+tim_bkt_get_hbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
+}
+
+static inline uint8_t
+tim_bkt_get_bsk(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
+}
+
+static inline uint64_t
+tim_bkt_clr_bsk(struct otx2_tim_bkt *bktp)
+{
+ /* Clear everything except lock. */
+ const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
+
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+tim_bkt_fetch_sema_lock(struct otx2_tim_bkt *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ __ATOMIC_ACQUIRE);
+}
+
+static inline uint64_t
+tim_bkt_fetch_sema(struct otx2_tim_bkt *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+tim_bkt_inc_lock(struct otx2_tim_bkt *bktp)
+{
+ const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
+
+ return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQUIRE);
+}
+
+static inline void
+tim_bkt_dec_lock(struct otx2_tim_bkt *bktp)
+{
+ __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_RELEASE);
+}
+
+static inline uint32_t
+tim_bkt_get_nent(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
+ TIM_BUCKET_W1_M_NUM_ENTRIES;
+}
+
+static inline void
+tim_bkt_inc_nent(struct otx2_tim_bkt *bktp)
+{
+ __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+}
+
+static inline void
+tim_bkt_add_nent(struct otx2_tim_bkt *bktp, uint32_t v)
+{
+ __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+tim_bkt_clr_nent(struct otx2_tim_bkt *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
+ TIM_BUCKET_W1_S_NUM_ENTRIES);
+
+ return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static __rte_always_inline void
+tim_get_target_bucket(struct otx2_tim_ring * const tim_ring,
+ const uint32_t rel_bkt, struct otx2_tim_bkt **bkt,
+ struct otx2_tim_bkt **mirr_bkt, const uint8_t flag)
+{
+ const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
+ uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
+ &tim_ring->fast_div) + rel_bkt;
+ uint32_t mirr_bucket = 0;
+
+ if (flag & OTX2_TIM_BKT_MOD) {
+ bucket = bucket % tim_ring->nb_bkts;
+ mirr_bucket = (bucket + (tim_ring->nb_bkts >> 1)) %
+ tim_ring->nb_bkts;
+ }
+ if (flag & OTX2_TIM_BKT_AND) {
+ bucket = bucket & (tim_ring->nb_bkts - 1);
+ mirr_bucket = (bucket + (tim_ring->nb_bkts >> 1)) &
+ (tim_ring->nb_bkts - 1);
+ }
+
+ *bkt = &tim_ring->bkt[bucket];
+ *mirr_bkt = &tim_ring->bkt[mirr_bucket];
+}
+
+static struct otx2_tim_ent *
+tim_clr_bkt(struct otx2_tim_ring * const tim_ring,
+ struct otx2_tim_bkt * const bkt)
+{
+#define TIM_MAX_OUTSTANDING_OBJ 64
+ void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ];
+ struct otx2_tim_ent *chunk;
+ struct otx2_tim_ent *pnext;
+ uint8_t objs = 0;
+
+
+ chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk);
+ chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk +
+ tim_ring->nb_chunk_slots)->w0;
+ while (chunk) {
+ pnext = (struct otx2_tim_ent *)(uintptr_t)
+ ((chunk + tim_ring->nb_chunk_slots)->w0);
+ if (objs == TIM_MAX_OUTSTANDING_OBJ) {
+ rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
+ objs);
+ objs = 0;
+ }
+ pend_chunks[objs++] = chunk;
+ chunk = pnext;
+ }
+
+ if (objs)
+ rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
+ objs);
+
+ return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk;
+}
+
+static struct otx2_tim_ent *
+tim_refill_chunk(struct otx2_tim_bkt * const bkt,
+ struct otx2_tim_bkt * const mirr_bkt,
+ struct otx2_tim_ring * const tim_ring)
+{
+ struct otx2_tim_ent *chunk;
+
+ if (bkt->nb_entry || !bkt->first_chunk) {
+ if (unlikely(rte_mempool_get(tim_ring->chunk_pool,
+ (void **)&chunk)))
+ return NULL;
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct otx2_tim_ent *)
+ mirr_bkt->current_chunk) +
+ tim_ring->nb_chunk_slots) =
+ (uintptr_t)chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ } else {
+ chunk = tim_clr_bkt(tim_ring, bkt);
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
+
+ return chunk;
+}
+
+static struct otx2_tim_ent *
+tim_insert_chunk(struct otx2_tim_bkt * const bkt,
+ struct otx2_tim_bkt * const mirr_bkt,
+ struct otx2_tim_ring * const tim_ring)
+{
+ struct otx2_tim_ent *chunk;
+
+ if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk)))
+ return NULL;
+
+ *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
+ mirr_bkt->current_chunk) +
+ tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ return chunk;
+}
+
+static __rte_always_inline int
+tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
+ const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct otx2_tim_ent * const pent,
+ const uint8_t flags)
+{
+ struct otx2_tim_bkt *mirr_bkt;
+ struct otx2_tim_ent *chunk;
+ struct otx2_tim_bkt *bkt;
+ uint64_t lock_sema;
+ int16_t rem;
+
+__retry:
+ tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt, flags);
+
+ /* Get Bucket sema*/
+ lock_sema = tim_bkt_fetch_sema_lock(bkt);
+
+ /* Bucket related checks. */
+ if (unlikely(tim_bkt_get_hbt(lock_sema))) {
+ if (tim_bkt_get_nent(lock_sema) != 0) {
+ uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ " ldaxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldaxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r" (hbt_state)
+ : [w1] "r" ((&bkt->w1))
+ : "memory"
+ );
+#else
+ do {
+ hbt_state = __atomic_load_n(&bkt->w1,
+ __ATOMIC_ACQUIRE);
+ } while (hbt_state & BIT_ULL(33));
+#endif
+
+ if (!(hbt_state & BIT_ULL(34))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+ }
+ }
+ /* Insert the work. */
+ rem = tim_bkt_fetch_rem(lock_sema);
+
+ if (!rem) {
+ if (flags & OTX2_TIM_ENA_FB)
+ chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
+ if (flags & OTX2_TIM_ENA_DFB)
+ chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
+
+ if (unlikely(chunk == NULL)) {
+ bkt->chunk_remainder = 0;
+ tim_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ mirr_bkt->current_chunk = (uintptr_t)chunk;
+ bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
+ } else {
+ chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
+ chunk += tim_ring->nb_chunk_slots - rem;
+ }
+
+ /* Copy work entry. */
+ *chunk = *pent;
+
+ tim_bkt_inc_nent(bkt);
+ tim_bkt_dec_lock(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+
+ return 0;
+}
+
+static __rte_always_inline int
+tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
+ const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct otx2_tim_ent * const pent,
+ const uint8_t flags)
+{
+ struct otx2_tim_bkt *mirr_bkt;
+ struct otx2_tim_ent *chunk;
+ struct otx2_tim_bkt *bkt;
+ uint64_t lock_sema;
+ int16_t rem;
+
+__retry:
+ tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt, flags);
+ /* Get Bucket sema*/
+ lock_sema = tim_bkt_fetch_sema_lock(bkt);
+
+ /* Bucket related checks. */
+ if (unlikely(tim_bkt_get_hbt(lock_sema))) {
+ if (tim_bkt_get_nent(lock_sema) != 0) {
+ uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ " ldaxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldaxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r" (hbt_state)
+ : [w1] "r" ((&bkt->w1))
+ : "memory"
+ );
+#else
+ do {
+ hbt_state = __atomic_load_n(&bkt->w1,
+ __ATOMIC_ACQUIRE);
+ } while (hbt_state & BIT_ULL(33));
+#endif
+
+ if (!(hbt_state & BIT_ULL(34))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+ }
+ }
+
+ rem = tim_bkt_fetch_rem(lock_sema);
+ if (rem < 0) {
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ " ldaxrh %w[rem], [%[crem]] \n"
+ " tbz %w[rem], 15, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldaxrh %w[rem], [%[crem]] \n"
+ " tbnz %w[rem], 15, rty%= \n"
+ "dne%=: \n"
+ : [rem] "=&r" (rem)
+ : [crem] "r" (&bkt->chunk_remainder)
+ : "memory"
+ );
+#else
+ while (__atomic_load_n(&bkt->chunk_remainder,
+ __ATOMIC_ACQUIRE) < 0)
+ ;
+#endif
+ /* Goto diff bucket. */
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ } else if (!rem) {
+ /* Only one thread can be here*/
+ if (flags & OTX2_TIM_ENA_FB)
+ chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
+ if (flags & OTX2_TIM_ENA_DFB)
+ chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
+
+ if (unlikely(chunk == NULL)) {
+ tim_bkt_set_rem(bkt, 0);
+ tim_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ *chunk = *pent;
+ while (tim_bkt_fetch_lock(lock_sema) !=
+ (-tim_bkt_fetch_rem(lock_sema)))
+ lock_sema = __atomic_load_n(&bkt->w1, __ATOMIC_ACQUIRE);
+
+ mirr_bkt->current_chunk = (uintptr_t)chunk;
+ __atomic_store_n(&bkt->chunk_remainder,
+ tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
+ } else {
+ chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
+ chunk += tim_ring->nb_chunk_slots - rem;
+ *chunk = *pent;
+ }
+
+ /* Copy work entry. */
+ tim_bkt_inc_nent(bkt);
+ tim_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+
+ return 0;
+}
+
+static inline uint16_t
+tim_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
+ struct otx2_tim_ent *chunk,
+ struct rte_event_timer ** const tim,
+ const struct otx2_tim_ent * const ents,
+ const struct otx2_tim_bkt * const bkt)
+{
+ for (; index < cpy_lmt; index++) {
+ *chunk = *(ents + index);
+ tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
+ tim[index]->impl_opaque[1] = (uintptr_t)bkt;
+ tim[index]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ return index;
+}
+
+/* Burst mode functions */
+static inline int
+tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
+ const uint16_t rel_bkt,
+ struct rte_event_timer ** const tim,
+ const struct otx2_tim_ent *ents,
+ const uint16_t nb_timers, const uint8_t flags)
+{
+ struct otx2_tim_ent *chunk = NULL;
+ struct otx2_tim_bkt *mirr_bkt;
+ struct otx2_tim_bkt *bkt;
+ uint16_t chunk_remainder;
+ uint16_t index = 0;
+ uint64_t lock_sema;
+ int16_t rem, crem;
+ uint8_t lock_cnt;
+
+__retry:
+ tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt, flags);
+
+ /* Only one thread beyond this. */
+ lock_sema = tim_bkt_inc_lock(bkt);
+ lock_cnt = (uint8_t)
+ ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
+
+ if (lock_cnt) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ /* Bucket related checks. */
+ if (unlikely(tim_bkt_get_hbt(lock_sema))) {
+ if (tim_bkt_get_nent(lock_sema) != 0) {
+ uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ " ldaxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldaxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r" (hbt_state)
+ : [w1] "r" ((&bkt->w1))
+ : "memory"
+ );
+#else
+ do {
+ hbt_state = __atomic_load_n(&bkt->w1,
+ __ATOMIC_ACQUIRE);
+ } while (hbt_state & BIT_ULL(33));
+#endif
+
+ if (!(hbt_state & BIT_ULL(34))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+ }
+ }
+
+ chunk_remainder = tim_bkt_fetch_rem(lock_sema);
+ rem = chunk_remainder - nb_timers;
+ if (rem < 0) {
+ crem = tim_ring->nb_chunk_slots - chunk_remainder;
+ if (chunk_remainder && crem) {
+ chunk = ((struct otx2_tim_ent *)
+ mirr_bkt->current_chunk) + crem;
+
+ index = tim_cpy_wrk(index, chunk_remainder, chunk, tim,
+ ents, bkt);
+ tim_bkt_sub_rem(bkt, chunk_remainder);
+ tim_bkt_add_nent(bkt, chunk_remainder);
+ }
+
+ if (flags & OTX2_TIM_ENA_FB)
+ chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
+ if (flags & OTX2_TIM_ENA_DFB)
+ chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
+
+ if (unlikely(chunk == NULL)) {
+ tim_bkt_dec_lock(bkt);
+ rte_errno = ENOMEM;
+ tim[index]->state = RTE_EVENT_TIMER_ERROR;
+ return crem;
+ }
+ *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
+ mirr_bkt->current_chunk = (uintptr_t)chunk;
+ tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+
+ rem = nb_timers - chunk_remainder;
+ tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem);
+ tim_bkt_add_nent(bkt, rem);
+ } else {
+ chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
+ chunk += (tim_ring->nb_chunk_slots - chunk_remainder);
+
+ tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+ tim_bkt_sub_rem(bkt, nb_timers);
+ tim_bkt_add_nent(bkt, nb_timers);
+ }
+
+ tim_bkt_dec_lock(bkt);
+
+ return nb_timers;
+}
+
+static int
+tim_rm_entry(struct rte_event_timer *tim)
+{
+ struct otx2_tim_ent *entry;
+ struct otx2_tim_bkt *bkt;
+ uint64_t lock_sema;
+
+ if (tim->impl_opaque[1] == 0 || tim->impl_opaque[0] == 0)
+ return -ENOENT;
+
+ entry = (struct otx2_tim_ent *)(uintptr_t)tim->impl_opaque[0];
+ if (entry->wqe != tim->ev.u64) {
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ return -ENOENT;
+ }
+
+ bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
+ lock_sema = tim_bkt_inc_lock(bkt);
+ if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) {
+ tim_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ return -ENOENT;
+ }
+
+ entry->w0 = 0;
+ entry->wqe = 0;
+ tim_bkt_dec_lock(bkt);
+
+ tim->state = RTE_EVENT_TIMER_CANCELED;
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+
+ return 0;
+}
+
+#endif /* __OTX2_TIM_WORKER_H__ */
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.c
new file mode 100644
index 000000000..88bac391c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.c
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_worker.h"
+
+static __rte_noinline uint8_t
+otx2_ssogws_new_event(struct otx2_ssogws *ws, const struct rte_event *ev)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint64_t event_ptr = ev->u64;
+ const uint16_t grp = ev->queue_id;
+
+ if (ws->xaq_lmt <= *ws->fc_mem)
+ return 0;
+
+ otx2_ssogws_add_work(ws, event_ptr, tag, new_tt, grp);
+
+ return 1;
+}
+
+static __rte_always_inline void
+otx2_ssogws_fwd_swtag(struct otx2_ssogws *ws, const struct rte_event *ev)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint8_t cur_tt = ws->cur_tt;
+
+ /* 96XX model
+ * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
+ *
+ * SSO_SYNC_ORDERED norm norm untag
+ * SSO_SYNC_ATOMIC norm norm untag
+ * SSO_SYNC_UNTAGGED norm norm NOOP
+ */
+
+ if (new_tt == SSO_SYNC_UNTAGGED) {
+ if (cur_tt != SSO_SYNC_UNTAGGED)
+ otx2_ssogws_swtag_untag(ws);
+ } else {
+ otx2_ssogws_swtag_norm(ws, tag, new_tt);
+ }
+
+ ws->swtag_req = 1;
+}
+
+static __rte_always_inline void
+otx2_ssogws_fwd_group(struct otx2_ssogws *ws, const struct rte_event *ev,
+ const uint16_t grp)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+
+ otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
+ SSOW_LF_GWS_OP_UPD_WQP_GRP1);
+ rte_smp_wmb();
+ otx2_ssogws_swtag_desched(ws, tag, new_tt, grp);
+}
+
+static __rte_always_inline void
+otx2_ssogws_forward_event(struct otx2_ssogws *ws, const struct rte_event *ev)
+{
+ const uint8_t grp = ev->queue_id;
+
+ /* Group hasn't changed, Use SWTAG to forward the event */
+ if (ws->cur_grp == grp)
+ otx2_ssogws_fwd_swtag(ws, ev);
+ else
+ /*
+ * Group has been changed for group based work pipelining,
+ * Use deschedule/add_work operation to transfer the event to
+ * new group/core
+ */
+ otx2_ssogws_fwd_group(ws, ev, grp);
+}
+
+static __rte_always_inline void
+otx2_ssogws_release_event(struct otx2_ssogws *ws)
+{
+ otx2_ssogws_swtag_flush(ws);
+}
+
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+uint16_t __rte_hot \
+otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ otx2_ssogws_swtag_wait(ws); \
+ return 1; \
+ } \
+ \
+ return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws *ws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ otx2_ssogws_swtag_wait(ws); \
+ return ret; \
+ } \
+ \
+ ret = otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = otx2_ssogws_get_work(ws, ev, flags, \
+ ws->lookup_mem); \
+ \
+ return ret; \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ otx2_ssogws_swtag_wait(ws); \
+ return 1; \
+ } \
+ \
+ return otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
+ ws->lookup_mem); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws *ws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ otx2_ssogws_swtag_wait(ws); \
+ return ret; \
+ } \
+ \
+ ret = otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
+ ws->lookup_mem); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = otx2_ssogws_get_work(ws, ev, \
+ flags | NIX_RX_MULTI_SEG_F, \
+ ws->lookup_mem); \
+ \
+ return ret; \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_deq_seg_timeout_ ##name(port, ev, \
+ timeout_ticks); \
+}
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+
+uint16_t __rte_hot
+otx2_ssogws_enq(void *port, const struct rte_event *ev)
+{
+ struct otx2_ssogws *ws = port;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ rte_smp_mb();
+ return otx2_ssogws_new_event(ws, ev);
+ case RTE_EVENT_OP_FORWARD:
+ otx2_ssogws_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ otx2_ssogws_release_event(ws);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+uint16_t __rte_hot
+otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return otx2_ssogws_enq(port, ev);
+}
+
+uint16_t __rte_hot
+otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct otx2_ssogws *ws = port;
+ uint16_t i, rc = 1;
+
+ rte_smp_mb();
+ if (ws->xaq_lmt <= *ws->fc_mem)
+ return 0;
+
+ for (i = 0; i < nb_events && rc; i++)
+ rc = otx2_ssogws_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __rte_hot
+otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct otx2_ssogws *ws = port;
+
+ RTE_SET_USED(nb_events);
+ otx2_ssogws_forward_event(ws, ev);
+
+ return 1;
+}
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+uint16_t __rte_hot \
+otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+{ \
+ struct otx2_ssogws *ws = port; \
+ uint64_t cmd[sz]; \
+ \
+ RTE_SET_USED(nb_events); \
+ return otx2_ssogws_event_tx(ws, ev, cmd, flags); \
+}
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+uint16_t __rte_hot \
+otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
+ uint16_t nb_events) \
+{ \
+ struct otx2_ssogws *ws = port; \
+ uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
+ \
+ RTE_SET_USED(nb_events); \
+ return otx2_ssogws_event_tx(ws, ev, cmd, (flags) | \
+ NIX_TX_MULTI_SEG_F); \
+}
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+void
+ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, uintptr_t base,
+ otx2_handle_event_t fn, void *arg)
+{
+ uint64_t cq_ds_cnt = 1;
+ uint64_t aq_cnt = 1;
+ uint64_t ds_cnt = 1;
+ struct rte_event ev;
+ uint64_t enable;
+ uint64_t val;
+
+ enable = otx2_read64(base + SSO_LF_GGRP_QCTL);
+ if (!enable)
+ return;
+
+ val = queue_id; /* GGRP ID */
+ val |= BIT_ULL(18); /* Grouped */
+ val |= BIT_ULL(16); /* WAIT */
+
+ aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
+ ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
+ cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
+ cq_ds_cnt &= 0x3FFF3FFF0000;
+
+ while (aq_cnt || cq_ds_cnt || ds_cnt) {
+ otx2_write64(val, ws->getwrk_op);
+ otx2_ssogws_get_work_empty(ws, &ev, 0);
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
+ if (ev.sched_type != SSO_TT_EMPTY)
+ otx2_ssogws_swtag_flush(ws);
+ rte_mb();
+ aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
+ ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
+ cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x3FFF3FFF0000;
+ }
+
+ otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
+ SSOW_LF_GWS_OP_GWC_INVAL);
+ rte_mb();
+}
+
+void
+ssogws_reset(struct otx2_ssogws *ws)
+{
+ uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
+ uint64_t pend_state;
+ uint8_t pend_tt;
+ uint64_t tag;
+
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
+ rte_mb();
+ } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58)));
+
+ tag = otx2_read64(base + SSOW_LF_GWS_TAG);
+ pend_tt = (tag >> 32) & 0x3;
+ if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (pend_tt == SSO_SYNC_ATOMIC || pend_tt == SSO_SYNC_ORDERED)
+ otx2_ssogws_swtag_untag(ws);
+ otx2_ssogws_desched(ws);
+ }
+ rte_mb();
+
+ /* Wait for desched to complete. */
+ do {
+ pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
+ rte_mb();
+ } while (pend_state & BIT_ULL(58));
+}
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.h b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.h
new file mode 100644
index 000000000..5f5aa8746
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_WORKER_H__
+#define __OTX2_WORKER_H__
+
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+#include <otx2_common.h>
+#include "otx2_evdev.h"
+#include "otx2_ethdev_sec_tx.h"
+
+/* SSO Operations */
+
+static __rte_always_inline uint16_t
+otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
+ const uint32_t flags, const void * const lookup_mem)
+{
+ union otx2_sso_event event;
+ uint64_t tstamp_ptr;
+ uint64_t get_work1;
+ uint64_t mbuf;
+
+ otx2_write64(BIT_ULL(16) | /* wait for work. */
+ 1, /* Use Mask set 0. */
+ ws->getwrk_op);
+
+ if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+ rte_prefetch_non_temporal(lookup_mem);
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ " ldr %[tag], [%[tag_loc]] \n"
+ " ldr %[wqp], [%[wqp_loc]] \n"
+ " tbz %[tag], 63, done%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldr %[tag], [%[tag_loc]] \n"
+ " ldr %[wqp], [%[wqp_loc]] \n"
+ " tbnz %[tag], 63, rty%= \n"
+ "done%=: dmb ld \n"
+ " prfm pldl1keep, [%[wqp], #8] \n"
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
+ : [tag] "=&r" (event.get_work0),
+ [wqp] "=&r" (get_work1),
+ [mbuf] "=&r" (mbuf)
+ : [tag_loc] "r" (ws->tag_op),
+ [wqp_loc] "r" (ws->wqp_op)
+ );
+#else
+ event.get_work0 = otx2_read64(ws->tag_op);
+ while ((BIT_ULL(63)) & event.get_work0)
+ event.get_work0 = otx2_read64(ws->tag_op);
+
+ get_work1 = otx2_read64(ws->wqp_op);
+ rte_prefetch0((const void *)get_work1);
+ mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
+ rte_prefetch0((const void *)mbuf);
+#endif
+
+ event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
+ (event.get_work0 & (0x3FFull << 36)) << 4 |
+ (event.get_work0 & 0xffffffff);
+ ws->cur_tt = event.sched_type;
+ ws->cur_grp = event.queue_id;
+
+ if (event.sched_type != SSO_TT_EMPTY &&
+ event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
+ (uint32_t) event.get_work0, flags, lookup_mem);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
+ flags, (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
+
+ ev->event = event.get_work0;
+ ev->u64 = get_work1;
+
+ return !!get_work1;
+}
+
+/* Used in cleaning up workslot. */
+static __rte_always_inline uint16_t
+otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
+ const uint32_t flags)
+{
+ union otx2_sso_event event;
+ uint64_t tstamp_ptr;
+ uint64_t get_work1;
+ uint64_t mbuf;
+
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ " ldr %[tag], [%[tag_loc]] \n"
+ " ldr %[wqp], [%[wqp_loc]] \n"
+ " tbz %[tag], 63, done%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldr %[tag], [%[tag_loc]] \n"
+ " ldr %[wqp], [%[wqp_loc]] \n"
+ " tbnz %[tag], 63, rty%= \n"
+ "done%=: dmb ld \n"
+ " prfm pldl1keep, [%[wqp], #8] \n"
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
+ : [tag] "=&r" (event.get_work0),
+ [wqp] "=&r" (get_work1),
+ [mbuf] "=&r" (mbuf)
+ : [tag_loc] "r" (ws->tag_op),
+ [wqp_loc] "r" (ws->wqp_op)
+ );
+#else
+ event.get_work0 = otx2_read64(ws->tag_op);
+ while ((BIT_ULL(63)) & event.get_work0)
+ event.get_work0 = otx2_read64(ws->tag_op);
+
+ get_work1 = otx2_read64(ws->wqp_op);
+ rte_prefetch_non_temporal((const void *)get_work1);
+ mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
+ rte_prefetch_non_temporal((const void *)mbuf);
+#endif
+
+ event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
+ (event.get_work0 & (0x3FFull << 36)) << 4 |
+ (event.get_work0 & 0xffffffff);
+ ws->cur_tt = event.sched_type;
+ ws->cur_grp = event.queue_id;
+
+ if (event.sched_type != SSO_TT_EMPTY &&
+ event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
+ (uint32_t) event.get_work0, flags, NULL);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
+ flags, (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
+
+ ev->event = event.get_work0;
+ ev->u64 = get_work1;
+
+ return !!get_work1;
+}
+
+static __rte_always_inline void
+otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
+ const uint32_t tag, const uint8_t new_tt,
+ const uint16_t grp)
+{
+ uint64_t add_work0;
+
+ add_work0 = tag | ((uint64_t)(new_tt) << 32);
+ otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
+}
+
+static __rte_always_inline void
+otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
+ uint16_t grp)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
+ otx2_write64(val, ws->swtag_desched_op);
+}
+
+static __rte_always_inline void
+otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32);
+ otx2_write64(val, ws->swtag_norm_op);
+}
+
+static __rte_always_inline void
+otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
+{
+ otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
+ SSOW_LF_GWS_OP_SWTAG_UNTAG);
+ ws->cur_tt = SSO_SYNC_UNTAGGED;
+}
+
+static __rte_always_inline void
+otx2_ssogws_swtag_flush(struct otx2_ssogws *ws)
+{
+ otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
+ SSOW_LF_GWS_OP_SWTAG_FLUSH);
+ ws->cur_tt = SSO_SYNC_EMPTY;
+}
+
+static __rte_always_inline void
+otx2_ssogws_desched(struct otx2_ssogws *ws)
+{
+ otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
+ SSOW_LF_GWS_OP_DESCHED);
+}
+
+static __rte_always_inline void
+otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
+{
+#ifdef RTE_ARCH_ARM64
+ uint64_t swtp;
+
+ asm volatile (
+ " ldr %[swtb], [%[swtp_loc]] \n"
+ " cbz %[swtb], done%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldr %[swtb], [%[swtp_loc]] \n"
+ " cbnz %[swtb], rty%= \n"
+ "done%=: \n"
+ : [swtb] "=&r" (swtp)
+ : [swtp_loc] "r" (ws->swtp_op)
+ );
+#else
+ /* Wait for the SWTAG/SWTAG_FULL operation */
+ while (otx2_read64(ws->swtp_op))
+ ;
+#endif
+}
+
+static __rte_always_inline void
+otx2_ssogws_head_wait(struct otx2_ssogws *ws)
+{
+#ifdef RTE_ARCH_ARM64
+ uint64_t tag;
+
+ asm volatile (
+ " ldr %[tag], [%[tag_op]] \n"
+ " tbnz %[tag], 35, done%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldr %[tag], [%[tag_op]] \n"
+ " tbz %[tag], 35, rty%= \n"
+ "done%=: \n"
+ : [tag] "=&r" (tag)
+ : [tag_op] "r" (ws->tag_op)
+ );
+#else
+ /* Wait for the HEAD to be set */
+ while (!(otx2_read64(ws->tag_op) & BIT_ULL(35)))
+ ;
+#endif
+}
+
+static __rte_always_inline void
+otx2_ssogws_order(struct otx2_ssogws *ws, const uint8_t wait_flag)
+{
+ if (wait_flag)
+ otx2_ssogws_head_wait(ws);
+
+ rte_cio_wmb();
+}
+
+static __rte_always_inline const struct otx2_eth_txq *
+otx2_ssogws_xtract_meta(struct rte_mbuf *m)
+{
+ return rte_eth_devices[m->port].data->tx_queues[
+ rte_event_eth_tx_adapter_txq_get(m)];
+}
+
+static __rte_always_inline void
+otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
+ uint64_t *cmd, const uint32_t flags)
+{
+ otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
+ otx2_nix_xmit_prepare(m, cmd, flags);
+}
+
+static __rte_always_inline uint16_t
+otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event ev[],
+ uint64_t *cmd, const uint32_t flags)
+{
+ struct rte_mbuf *m = ev[0].mbuf;
+ const struct otx2_eth_txq *txq = otx2_ssogws_xtract_meta(m);
+
+ rte_prefetch_non_temporal(txq);
+
+ if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
+ (m->ol_flags & PKT_TX_SEC_OFFLOAD))
+ return otx2_sec_event_tx(ws, ev, m, txq, flags);
+
+ /* Perform header writes before barrier for TSO */
+ otx2_nix_xmit_prepare_tso(m, flags);
+ otx2_ssogws_order(ws, !ev->sched_type);
+ otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
+
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
+ otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ m->ol_flags, segdw, flags);
+ otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr, segdw);
+ } else {
+ /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
+ otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ m->ol_flags, 4, flags);
+ otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr, flags);
+ }
+
+ return 1;
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.c b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.c
new file mode 100644
index 000000000..3d55d921b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.c
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_worker_dual.h"
+#include "otx2_worker.h"
+
+static __rte_noinline uint8_t
+otx2_ssogws_dual_new_event(struct otx2_ssogws_dual *ws,
+ const struct rte_event *ev)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint64_t event_ptr = ev->u64;
+ const uint16_t grp = ev->queue_id;
+
+ if (ws->xaq_lmt <= *ws->fc_mem)
+ return 0;
+
+ otx2_ssogws_dual_add_work(ws, event_ptr, tag, new_tt, grp);
+
+ return 1;
+}
+
+static __rte_always_inline void
+otx2_ssogws_dual_fwd_swtag(struct otx2_ssogws_state *ws,
+ const struct rte_event *ev)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint8_t cur_tt = ws->cur_tt;
+
+ /* 96XX model
+ * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
+ *
+ * SSO_SYNC_ORDERED norm norm untag
+ * SSO_SYNC_ATOMIC norm norm untag
+ * SSO_SYNC_UNTAGGED norm norm NOOP
+ */
+ if (new_tt == SSO_SYNC_UNTAGGED) {
+ if (cur_tt != SSO_SYNC_UNTAGGED)
+ otx2_ssogws_swtag_untag((struct otx2_ssogws *)ws);
+ } else {
+ otx2_ssogws_swtag_norm((struct otx2_ssogws *)ws, tag, new_tt);
+ }
+}
+
+static __rte_always_inline void
+otx2_ssogws_dual_fwd_group(struct otx2_ssogws_state *ws,
+ const struct rte_event *ev, const uint16_t grp)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+
+ otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
+ SSOW_LF_GWS_OP_UPD_WQP_GRP1);
+ rte_smp_wmb();
+ otx2_ssogws_swtag_desched((struct otx2_ssogws *)ws, tag, new_tt, grp);
+}
+
+static __rte_always_inline void
+otx2_ssogws_dual_forward_event(struct otx2_ssogws_dual *ws,
+ struct otx2_ssogws_state *vws,
+ const struct rte_event *ev)
+{
+ const uint8_t grp = ev->queue_id;
+
+ /* Group hasn't changed, Use SWTAG to forward the event */
+ if (vws->cur_grp == grp) {
+ otx2_ssogws_dual_fwd_swtag(vws, ev);
+ ws->swtag_req = 1;
+ } else {
+ /*
+ * Group has been changed for group based work pipelining,
+ * Use deschedule/add_work operation to transfer the event to
+ * new group/core
+ */
+ otx2_ssogws_dual_fwd_group(vws, ev, grp);
+ }
+}
+
+uint16_t __rte_hot
+otx2_ssogws_dual_enq(void *port, const struct rte_event *ev)
+{
+ struct otx2_ssogws_dual *ws = port;
+ struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ rte_smp_mb();
+ return otx2_ssogws_dual_new_event(ws, ev);
+ case RTE_EVENT_OP_FORWARD:
+ otx2_ssogws_dual_forward_event(ws, vws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ otx2_ssogws_swtag_flush((struct otx2_ssogws *)vws);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+uint16_t __rte_hot
+otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return otx2_ssogws_dual_enq(port, ev);
+}
+
+uint16_t __rte_hot
+otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct otx2_ssogws_dual *ws = port;
+ uint16_t i, rc = 1;
+
+ rte_smp_mb();
+ if (ws->xaq_lmt <= *ws->fc_mem)
+ return 0;
+
+ for (i = 0; i < nb_events && rc; i++)
+ rc = otx2_ssogws_dual_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __rte_hot
+otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct otx2_ssogws_dual *ws = port;
+ struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
+
+ RTE_SET_USED(nb_events);
+ otx2_ssogws_dual_forward_event(ws, vws, ev);
+
+ return 1;
+}
+
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws_dual *ws = port; \
+ uint8_t gw; \
+ \
+ rte_prefetch_non_temporal(ws); \
+ RTE_SET_USED(timeout_ticks); \
+ if (ws->swtag_req) { \
+ otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
+ &ws->ws_state[!ws->vws]); \
+ ws->swtag_req = 0; \
+ return 1; \
+ } \
+ \
+ gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
+ &ws->ws_state[!ws->vws], ev, \
+ flags, ws->lookup_mem, \
+ ws->tstamp); \
+ ws->vws = !ws->vws; \
+ \
+ return gw; \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_dual_deq_ ##name(port, ev, timeout_ticks); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_timeout_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws_dual *ws = port; \
+ uint64_t iter; \
+ uint8_t gw; \
+ \
+ if (ws->swtag_req) { \
+ otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
+ &ws->ws_state[!ws->vws]); \
+ ws->swtag_req = 0; \
+ return 1; \
+ } \
+ \
+ gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
+ &ws->ws_state[!ws->vws], ev, \
+ flags, ws->lookup_mem, \
+ ws->tstamp); \
+ ws->vws = !ws->vws; \
+ for (iter = 1; iter < timeout_ticks && (gw == 0); iter++) { \
+ gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
+ &ws->ws_state[!ws->vws], \
+ ev, flags, \
+ ws->lookup_mem, \
+ ws->tstamp); \
+ ws->vws = !ws->vws; \
+ } \
+ \
+ return gw; \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_dual_deq_timeout_ ##name(port, ev, \
+ timeout_ticks); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws_dual *ws = port; \
+ uint8_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (ws->swtag_req) { \
+ otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
+ &ws->ws_state[!ws->vws]); \
+ ws->swtag_req = 0; \
+ return 1; \
+ } \
+ \
+ gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
+ &ws->ws_state[!ws->vws], ev, \
+ flags | NIX_RX_MULTI_SEG_F, \
+ ws->lookup_mem, \
+ ws->tstamp); \
+ ws->vws = !ws->vws; \
+ \
+ return gw; \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_seg_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_dual_deq_seg_ ##name(port, ev, \
+ timeout_ticks); \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port, \
+ struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct otx2_ssogws_dual *ws = port; \
+ uint64_t iter; \
+ uint8_t gw; \
+ \
+ if (ws->swtag_req) { \
+ otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
+ &ws->ws_state[!ws->vws]); \
+ ws->swtag_req = 0; \
+ return 1; \
+ } \
+ \
+ gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
+ &ws->ws_state[!ws->vws], ev, \
+ flags | NIX_RX_MULTI_SEG_F, \
+ ws->lookup_mem, \
+ ws->tstamp); \
+ ws->vws = !ws->vws; \
+ for (iter = 1; iter < timeout_ticks && (gw == 0); iter++) { \
+ gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
+ &ws->ws_state[!ws->vws], \
+ ev, flags | \
+ NIX_RX_MULTI_SEG_F, \
+ ws->lookup_mem, \
+ ws->tstamp); \
+ ws->vws = !ws->vws; \
+ } \
+ \
+ return gw; \
+} \
+ \
+uint16_t __rte_hot \
+otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return otx2_ssogws_dual_deq_seg_timeout_ ##name(port, ev, \
+ timeout_ticks); \
+}
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+uint16_t __rte_hot \
+otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events) \
+{ \
+ struct otx2_ssogws_dual *ws = port; \
+ struct otx2_ssogws *vws = \
+ (struct otx2_ssogws *)&ws->ws_state[!ws->vws]; \
+ uint64_t cmd[sz]; \
+ \
+ RTE_SET_USED(nb_events); \
+ return otx2_ssogws_event_tx(vws, ev, cmd, flags); \
+}
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+uint16_t __rte_hot \
+otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
+ struct rte_event ev[], \
+ uint16_t nb_events) \
+{ \
+ struct otx2_ssogws_dual *ws = port; \
+ struct otx2_ssogws *vws = \
+ (struct otx2_ssogws *)&ws->ws_state[!ws->vws]; \
+ uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
+ \
+ RTE_SET_USED(nb_events); \
+ return otx2_ssogws_event_tx(vws, ev, cmd, (flags) | \
+ NIX_TX_MULTI_SEG_F); \
+}
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.h b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.h
new file mode 100644
index 000000000..c88420eb4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/otx2_worker_dual.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_WORKER_DUAL_H__
+#define __OTX2_WORKER_DUAL_H__
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+
+#include <otx2_common.h>
+#include "otx2_evdev.h"
+
+/* SSO Operations */
+static __rte_always_inline uint16_t
+otx2_ssogws_dual_get_work(struct otx2_ssogws_state *ws,
+ struct otx2_ssogws_state *ws_pair,
+ struct rte_event *ev, const uint32_t flags,
+ const void * const lookup_mem,
+ struct otx2_timesync_info * const tstamp)
+{
+ const uint64_t set_gw = BIT_ULL(16) | 1;
+ union otx2_sso_event event;
+ uint64_t tstamp_ptr;
+ uint64_t get_work1;
+ uint64_t mbuf;
+
+ if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+ rte_prefetch_non_temporal(lookup_mem);
+#ifdef RTE_ARCH_ARM64
+ asm volatile(
+ "rty%=: \n"
+ " ldr %[tag], [%[tag_loc]] \n"
+ " ldr %[wqp], [%[wqp_loc]] \n"
+ " tbnz %[tag], 63, rty%= \n"
+ "done%=: str %[gw], [%[pong]] \n"
+ " dmb ld \n"
+ " prfm pldl1keep, [%[wqp], #8]\n"
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
+ : [tag] "=&r" (event.get_work0),
+ [wqp] "=&r" (get_work1),
+ [mbuf] "=&r" (mbuf)
+ : [tag_loc] "r" (ws->tag_op),
+ [wqp_loc] "r" (ws->wqp_op),
+ [gw] "r" (set_gw),
+ [pong] "r" (ws_pair->getwrk_op)
+ );
+#else
+ event.get_work0 = otx2_read64(ws->tag_op);
+ while ((BIT_ULL(63)) & event.get_work0)
+ event.get_work0 = otx2_read64(ws->tag_op);
+ get_work1 = otx2_read64(ws->wqp_op);
+ otx2_write64(set_gw, ws_pair->getwrk_op);
+
+ rte_prefetch0((const void *)get_work1);
+ mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
+ rte_prefetch0((const void *)mbuf);
+#endif
+ event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
+ (event.get_work0 & (0x3FFull << 36)) << 4 |
+ (event.get_work0 & 0xffffffff);
+ ws->cur_tt = event.sched_type;
+ ws->cur_grp = event.queue_id;
+
+ if (event.sched_type != SSO_TT_EMPTY &&
+ event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
+ (uint32_t) event.get_work0, flags, lookup_mem);
+ /* Extracting tstamp, if PTP enabled. CGX will prepend the
+ * timestamp at starting of packet data and it can be derieved
+ * from WQE 9 dword which corresponds to SG iova.
+ * rte_pktmbuf_mtod_offset can be used for this purpose but it
+ * brings down the performance as it reads mbuf->buf_addr which
+ * is not part of cache in general fast path.
+ */
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, flags,
+ (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
+
+ ev->event = event.get_work0;
+ ev->u64 = get_work1;
+
+ return !!get_work1;
+}
+
+static __rte_always_inline void
+otx2_ssogws_dual_add_work(struct otx2_ssogws_dual *ws, const uint64_t event_ptr,
+ const uint32_t tag, const uint8_t new_tt,
+ const uint16_t grp)
+{
+ uint64_t add_work0;
+
+ add_work0 = tag | ((uint64_t)(new_tt) << 32);
+ otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
+}
+
+#endif
diff --git a/src/spdk/dpdk/drivers/event/octeontx2/rte_pmd_octeontx2_event_version.map b/src/spdk/dpdk/drivers/event/octeontx2/rte_pmd_octeontx2_event_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/octeontx2/rte_pmd_octeontx2_event_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};