summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/event/dpaa2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/event/dpaa2
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/event/dpaa2')
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/Makefile41
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c1214
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h93
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h44
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c833
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c113
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/meson.build13
-rw-r--r--src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map3
8 files changed, 2354 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/Makefile b/src/spdk/dpdk/drivers/event/dpaa2/Makefile
new file mode 100644
index 000000000..75cf197c5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017,2019 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_common_dpaax
+LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
+LDLIBS += -lrte_bus_vdev -lrte_mempool -lrte_mbuf -lrte_ethdev
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
+
+LDLIBS += -lrte_pmd_dpaa2_sec
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_event_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c
new file mode 100644
index 000000000..a196ad4c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -0,0 +1,1214 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017,2019 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_ethdev.h>
+#include <dpaa2_sec_event.h>
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+#include <portal/dpaa2_hw_pvt.h>
+#include <mc/fsl_dpci.h>
+
+/* Clarifications
+ * Evendev = SoC Instance
+ * Eventport = DPIO Instance
+ * Eventqueue = DPCON Instance
+ * 1 Eventdev can have N Eventqueue
+ * Soft Event Flow is DPCI Instance
+ */
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_event;
+#define DPAA2_EV_TX_RETRY_COUNT 10000
+
+static uint16_t
+dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_dpio_dev *dpio_dev;
+ uint32_t queue_id = ev[0].queue_id;
+ struct dpaa2_eventq *evq_info;
+ uint32_t fqid, retry_count;
+ struct qbman_swp *swp;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t loop, frames_to_send;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ uint16_t num_tx = 0;
+ int i, n, ret;
+ uint8_t channel_index;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ /* Affine current thread context to a qman portal */
+ ret = dpaa2_affine_qbman_swp();
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ if (likely(dpaa2_portal->is_port_linked))
+ goto skip_linking;
+
+ /* Create mapping between portal and channel to receive packets */
+ for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+ evq_info = &dpaa2_portal->evq_info[i];
+ if (!evq_info->event_port)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+ CMD_PRI_LOW,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id,
+ &channel_index);
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(swp, channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ }
+ dpaa2_portal->is_port_linked = true;
+
+skip_linking:
+ evq_info = &dpaa2_portal->evq_info[queue_id];
+
+ while (nb_events) {
+ frames_to_send = (nb_events > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_events;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ const struct rte_event *event = &ev[num_tx + loop];
+
+ if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
+ fqid = evq_info->dpci->rx_queue[
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
+ else
+ fqid = evq_info->dpci->rx_queue[
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
+ qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
+
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
+ && event->mbuf->seqn) {
+ uint8_t dqrr_index = event->mbuf->seqn - 1;
+
+ qbman_eq_desc_set_dca(&eqdesc[loop], 1,
+ dqrr_index, 0);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ }
+
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+
+ /*
+ * todo - need to align with hw context data
+ * to avoid copy
+ */
+ struct rte_event *ev_temp = rte_malloc(NULL,
+ sizeof(struct rte_event), 0);
+
+ if (!ev_temp) {
+ if (!loop)
+ return num_tx;
+ frames_to_send = loop;
+ DPAA2_EVENTDEV_ERR(
+ "Unable to allocate event object");
+ goto send_partial;
+ }
+ rte_memcpy(ev_temp, event, sizeof(struct rte_event));
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
+ DPAA2_SET_FD_LEN((&fd_arr[loop]),
+ sizeof(struct rte_event));
+ }
+send_partial:
+ loop = 0;
+ retry_count = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
+ num_tx += loop;
+ nb_events -= loop;
+ return num_tx + loop;
+ }
+ } else {
+ loop += ret;
+ retry_count = 0;
+ }
+ }
+ num_tx += loop;
+ nb_events -= loop;
+ }
+
+ return num_tx;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &dpaa2_portal->evq_info[n];
+ if (!evq_info->event_port)
+ continue;
+ qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ return 0;
+
+}
+
+static uint16_t
+dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa2_eventdev_enqueue_burst(port, ev, 1);
+}
+
+static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
+{
+ struct epoll_event epoll_ev;
+
+ qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
+ QBMAN_SWP_INTERRUPT_DQRI);
+
+ epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ &epoll_ev, 1, timeout_ticks);
+}
+
+static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
+
+ RTE_SET_USED(rxq);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
+static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
+ uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+
+ RTE_SET_USED(swp);
+ RTE_SET_USED(rxq);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+ ev->mbuf->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ const struct qbman_result *dq;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_eventq *evq_info;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct dpaa2_queue *rxq;
+ int num_pkts = 0, ret, i = 0, n;
+ uint8_t channel_index;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ /* Affine current thread context to a qman portal */
+ ret = dpaa2_affine_qbman_swp();
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ if (likely(dpaa2_portal->is_port_linked))
+ goto skip_linking;
+
+ /* Create mapping between portal and channel to receive packets */
+ for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+ evq_info = &dpaa2_portal->evq_info[i];
+ if (!evq_info->event_port)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+ CMD_PRI_LOW,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id,
+ &channel_index);
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(swp, channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ }
+ dpaa2_portal->is_port_linked = true;
+
+skip_linking:
+ /* Check if there are atomic contexts to be released */
+ while (DPAA2_PER_LCORE_DQRR_SIZE) {
+ if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
+ qbman_swp_dqrr_idx_consume(swp, i);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
+ DPAA2_INVALID_MBUF_SEQN;
+ }
+ i++;
+ }
+ DPAA2_PER_LCORE_DQRR_HELD = 0;
+
+ do {
+ dq = qbman_swp_dqrr_next(swp);
+ if (!dq) {
+ if (!num_pkts && timeout_ticks) {
+ dpaa2_eventdev_dequeue_wait(timeout_ticks);
+ timeout_ticks = 0;
+ continue;
+ }
+ return num_pkts;
+ }
+ qbman_swp_prefetch_dqrr_next(swp);
+
+ fd = qbman_result_DQ_fd(dq);
+ rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
+ if (rxq) {
+ rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
+ } else {
+ qbman_swp_dqrr_consume(swp, dq);
+ DPAA2_EVENTDEV_ERR("Null Return VQ received");
+ return 0;
+ }
+
+ num_pkts++;
+ } while (num_pkts < nb_events);
+
+ return num_pkts;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &dpaa2_portal->evq_info[n];
+ if (!evq_info->event_port)
+ continue;
+
+ qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ return 0;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+ dev_info->min_dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+ dev_info->max_event_queues = priv->max_event_queues;
+ dev_info->max_event_queue_flows =
+ DPAA2_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
+ /* we only support dpio up to number of cores */
+ if (dev_info->max_event_ports > rte_lcore_count())
+ dev_info->max_event_ports = rte_lcore_count();
+ dev_info->max_event_port_dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
+}
+
+static int
+dpaa2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ /* Check dequeue timeout method is per dequeue or global */
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ /*
+ * Use timeout value as given in dequeue operation.
+ * So invalidating this timeout value.
+ */
+ priv->dequeue_timeout_ns = 0;
+
+ } else if (conf->dequeue_timeout_ns == 0) {
+ priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+ } else {
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ }
+
+ DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
+ dev->data->dev_id);
+ return 0;
+}
+
+static int
+dpaa2_eventdev_start(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_stop(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa2_eventdev_close(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->nb_atomic_order_sequences =
+ DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
+ queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static int
+dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ switch (queue_conf->schedule_type) {
+ case RTE_SCHED_TYPE_PARALLEL:
+ case RTE_SCHED_TYPE_ATOMIC:
+ case RTE_SCHED_TYPE_ORDERED:
+ break;
+ default:
+ DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
+ return -1;
+ }
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+ evq_info->event_queue_id = queue_id;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static void
+dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold =
+ DPAA2_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ port_conf->disable_implicit_release = 0;
+}
+
+static int
+dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ char event_port_name[32];
+ struct dpaa2_port *portal;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+
+ sprintf(event_port_name, "event-port-%d", port_id);
+ portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
+ if (!portal) {
+ DPAA2_EVENTDEV_ERR("Memory allocation failure");
+ return -ENOMEM;
+ }
+
+ memset(portal, 0, sizeof(struct dpaa2_port));
+ dev->data->ports[port_id] = portal;
+ return 0;
+}
+
+static void
+dpaa2_eventdev_port_release(void *port)
+{
+ struct dpaa2_port *portal = port;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ /* TODO: Cleanup is required when ports are in linked state. */
+ if (portal->is_port_linked)
+ DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
+
+ if (portal)
+ rte_free(portal);
+
+ portal = NULL;
+}
+
+static int
+dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_eventq *evq_info;
+ uint16_t i;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(priorities);
+
+ for (i = 0; i < nb_links; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
+ sizeof(struct dpaa2_eventq));
+ dpaa2_portal->evq_info[queues[i]].event_port = port;
+ dpaa2_portal->num_linked_evq++;
+ }
+
+ return (int)nb_links;
+}
+
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_port *dpaa2_portal = port;
+ int i;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ struct dpaa2_eventq *evq_info;
+ struct qbman_swp *swp;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queues);
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &dpaa2_portal->evq_info[queues[i]];
+
+ if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
+ /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ qbman_swp_push_set(swp,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ memset(evq_info, 0, sizeof(struct dpaa2_eventq));
+ if (dpaa2_portal->num_linked_evq)
+ dpaa2_portal->num_linked_evq--;
+ }
+
+ if (!dpaa2_portal->num_linked_evq)
+ dpaa2_portal->is_port_linked = false;
+
+ return (int)nb_unlinks;
+}
+
+
+static int
+dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint32_t scale = 1000*1000;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns / scale;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+}
+
+static int
+dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ const char *ethdev_driver = eth_dev->device->driver->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strcmp(ethdev_driver, "net_dpaa2"))
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_attach(eth_dev, i,
+ dpcon, queue_conf);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_eth_eventq_detach(eth_dev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_add_all(dev,
+ eth_dev, queue_conf);
+
+ ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
+ dpcon, queue_conf);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_detach(eth_dev, i);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
+
+ ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ const char *name = cdev->data->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strncmp(name, "dpsec-", 6))
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ const struct rte_event *ev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
+ ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
+ ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_sec_eventq_detach(cryptodev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id,
+ const struct rte_event *ev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_crypto_queue_add_all(dev,
+ cryptodev, ev);
+
+ ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
+ dpcon, ev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
+ ret = dpaa2_sec_eventq_detach(cdev, i);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
+
+ ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_tx_adapter_create(uint8_t id,
+ const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+
+ /* Nothing to do. Simply return. */
+ return 0;
+}
+
+static int
+dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+ return 0;
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue_same_dest(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ m0 = (struct rte_mbuf *)ev[0].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m0);
+
+ for (i = 0; i < nb_events; i++)
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+
+ return rte_eth_tx_burst(m0->port, qid, m, nb_events);
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ for (i = 0; i < nb_events; i++) {
+ qid = rte_event_eth_tx_adapter_txq_get(m);
+ rte_eth_tx_burst(m->port, qid, &m, 1);
+ }
+
+ return nb_events;
+}
+
+static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+ .dev_infos_get = dpaa2_eventdev_info_get,
+ .dev_configure = dpaa2_eventdev_configure,
+ .dev_start = dpaa2_eventdev_start,
+ .dev_stop = dpaa2_eventdev_stop,
+ .dev_close = dpaa2_eventdev_close,
+ .queue_def_conf = dpaa2_eventdev_queue_def_conf,
+ .queue_setup = dpaa2_eventdev_queue_setup,
+ .queue_release = dpaa2_eventdev_queue_release,
+ .port_def_conf = dpaa2_eventdev_port_def_conf,
+ .port_setup = dpaa2_eventdev_port_setup,
+ .port_release = dpaa2_eventdev_port_release,
+ .port_link = dpaa2_eventdev_port_link,
+ .port_unlink = dpaa2_eventdev_port_unlink,
+ .timeout_ticks = dpaa2_eventdev_timeout_ticks,
+ .dump = dpaa2_eventdev_dump,
+ .dev_selftest = test_eventdev_dpaa2,
+ .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
+ .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
+ .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
+ .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
+ .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+ .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps,
+ .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create,
+ .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
+ .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
+ .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
+ .crypto_adapter_start = dpaa2_eventdev_crypto_start,
+ .crypto_adapter_stop = dpaa2_eventdev_crypto_stop,
+};
+
+static int
+dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
+ struct dpaa2_dpcon_dev *dpcon_dev)
+{
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ int ret, i;
+
+ /*Do settings to get the frame on a DPCON object*/
+ rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
+ DPCI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
+ rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
+ rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
+
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpaa2_eventdev_process_parallel;
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpaa2_eventdev_process_atomic;
+
+ for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
+ rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
+ ret = dpci_set_rx_queue(&dpci_dev->dpci,
+ CMD_PRI_LOW,
+ dpci_dev->token, i,
+ &rx_queue_cfg);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "DPCI Rx queue setup failed: err(%d)",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+ int ret;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa2_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa2_eventdev_ops;
+ eventdev->enqueue = dpaa2_eventdev_enqueue;
+ eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->dequeue = dpaa2_eventdev_dequeue;
+ eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+ eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue;
+ eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = 0;
+
+ do {
+ dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
+ if (!dpcon_dev)
+ break;
+ priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
+
+ dpci_dev = rte_dpaa2_alloc_dpci_dev();
+ if (!dpci_dev) {
+ rte_dpaa2_free_dpcon_dev(dpcon_dev);
+ break;
+ }
+ priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
+
+ ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "DPCI setup failed: err(%d)", ret);
+ return ret;
+ }
+ priv->max_event_queues++;
+ } while (dpcon_dev && dpci_dev);
+
+ RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa2_eventdev_destroy(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ int i;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL) {
+ RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
+ return -1;
+ }
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ for (i = 0; i < priv->max_event_queues; i++) {
+ if (priv->evq_info[i].dpcon)
+ rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
+
+ if (priv->evq_info[i].dpci)
+ rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
+
+ }
+ priv->max_event_queues = 0;
+
+ RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
+ return 0;
+}
+
+
+static int
+dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ DPAA2_EVENTDEV_INFO("Initializing %s", name);
+ return dpaa2_eventdev_create(name);
+}
+
+static int
+dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ DPAA2_EVENTDEV_INFO("Closing %s", name);
+
+ dpaa2_eventdev_destroy(name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
+ .probe = dpaa2_eventdev_probe,
+ .remove = dpaa2_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
+
+RTE_INIT(dpaa2_eventdev_init_log)
+{
+ dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
+ if (dpaa2_logtype_event >= 0)
+ rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h
new file mode 100644
index 000000000..785e52032
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef __DPAA2_EVENTDEV_H__
+#define __DPAA2_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <mc/fsl_dpcon.h>
+#include <mc/fsl_mc_sys.h>
+
+#define EVENTDEV_NAME_DPAA2_PMD event_dpaa2
+
+#define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
+
+#define DPAA2_EVENT_MAX_QUEUES 16
+#define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
+#define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA2_EVENT_QUEUE_ATOMIC_FLOWS 2048
+#define DPAA2_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+enum {
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE,
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE,
+ DPAA2_EVENT_DPCI_MAX_QUEUES
+};
+
+#define RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID | \
+ RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)
+
+/**< Crypto Rx adapter cap to return If the packet transfers from
+ * the cryptodev to eventdev with DPAA2 devices.
+ */
+#define RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP \
+ (RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW | \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND | \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)
+
+/**< Ethernet Rx adapter cap to return If the packet transfers from
+ * the ethdev to eventdev with DPAA2 devices.
+ */
+
+struct dpaa2_eventq {
+ /* DPcon device */
+ struct dpaa2_dpcon_dev *dpcon;
+ /* Attached DPCI device */
+ struct dpaa2_dpci_dev *dpci;
+ /* Mapped event port */
+ struct dpaa2_io_portal_t *event_port;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+ uint32_t event_queue_id;
+};
+
+struct dpaa2_port {
+ struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint8_t num_linked_evq;
+ uint8_t is_port_linked;
+ uint64_t timeout_us;
+};
+
+struct dpaa2_eventdev {
+ struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint32_t dequeue_timeout_ns;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd_1;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
+
+int test_eventdev_dpaa2(void);
+
+#endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h
new file mode 100644
index 000000000..5da85c60f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#ifndef _DPAA2_EVENTDEV_LOGS_H_
+#define _DPAA2_EVENTDEV_LOGS_H_
+
+extern int dpaa2_logtype_event;
+
+#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "dpaa2_event: " \
+ fmt "\n", ##args)
+
+#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_DEBUG(" >>")
+
+#define DPAA2_EVENTDEV_INFO(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_ERR(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA2_EVENTDEV_WARN(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_EVENTDEV_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_EVENTDEV_DP_DEBUG(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_INFO(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
+
+#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
+#define dpaa2_evdev_selftest dpaa2_evdev_info
+
+#endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
new file mode 100644
index 000000000..ba4f4bd23
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+
+#define MAX_PORTS 4
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS 8
+#define DPAA2_TEST_RUN(setup, teardown, test) \
+ dpaa2_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+ uint32_t flow_id;
+ uint8_t event_type;
+ uint8_t sub_event_type;
+ uint8_t sched_type;
+ uint8_t queue;
+ uint8_t port;
+ uint8_t seq;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static void
+seqn_list_init(void)
+{
+ RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+ memset(seqn_list, 0, sizeof(seqn_list));
+ seqn_list_index = 0;
+}
+
+struct test_core_param {
+ rte_atomic32_t *total_events;
+ uint64_t dequeue_tmo_ticks;
+ uint8_t port;
+ uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+ const char *eventdev_name = "event_dpaa2";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ dpaa2_evdev_err("Error creating eventdev %s",
+ eventdev_name);
+ return -1;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ dpaa2_evdev_err("Error finding newly created eventdev");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_close(evdev);
+}
+
+static void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+enum {
+ TEST_EVENTDEV_SETUP_DEFAULT,
+ TEST_EVENTDEV_SETUP_PRIORITY,
+ TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static int
+_eventdev_setup(int mode)
+{
+ int i, ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ const char *pool_name = "evdev_dpaa2_test_pool";
+
+ /* Create and destrory pool for each test case to make it standalone */
+ eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+ MAX_EVENTS,
+ 0 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* Use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_test_mempool) {
+ dpaa2_evdev_err("ERROR creating mempool");
+ return -1;
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+ "ERROR max_num_events=%d < max_events=%d",
+ info.max_num_events, MAX_EVENTS);
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+ dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ if (queue_count > 8) {
+ dpaa2_evdev_err(
+ "test expects the unique priority per queue");
+ return -ENOTSUP;
+ }
+
+ /* Configure event queues(0 to n) with
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+ * RTE_EVENT_DEV_PRIORITY_LOWEST
+ */
+ uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+ queue_count;
+ for (i = 0; i < (int)queue_count; i++) {
+ struct rte_event_queue_conf queue_conf;
+
+ ret = rte_event_queue_default_conf_get(evdev, i,
+ &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+ i);
+ queue_conf.priority = i * step;
+ ret = rte_event_queue_setup(evdev, i, &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+
+ } else {
+ /* Configure event queues with default priority */
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+ }
+ /* Configure event ports */
+ uint32_t port_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+ ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+ i);
+ }
+
+ ret = rte_event_dev_start(evdev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return 0;
+}
+
+static int
+eventdev_setup(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+ uint32_t flow_id, uint8_t event_type,
+ uint8_t sub_event_type, uint8_t sched_type,
+ uint8_t queue, uint8_t port, uint8_t seq)
+{
+ struct event_attr *attr;
+
+ /* Store the event attributes in mbuf for future reference */
+ attr = rte_pktmbuf_mtod(m, struct event_attr *);
+ attr->flow_id = flow_id;
+ attr->event_type = event_type;
+ attr->sub_event_type = sub_event_type;
+ attr->sched_type = sched_type;
+ attr->queue = queue;
+ attr->port = port;
+ attr->seq = seq;
+
+ ev->flow_id = flow_id;
+ ev->sub_event_type = sub_event_type;
+ ev->event_type = event_type;
+ /* Inject the new event */
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = queue;
+ ev->mbuf = m;
+}
+
+static int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+ uint8_t sched_type, uint8_t queue, uint8_t port,
+ unsigned int events)
+{
+ struct rte_mbuf *m;
+ unsigned int i;
+
+ for (i = 0; i < events; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ update_event_and_validation_attr(m, &ev, flow_id, event_type,
+ sub_event_type, sched_type, queue, port, i);
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ return 0;
+}
+
+static int
+check_excess_events(uint8_t port)
+{
+ int i;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ /* Check for excess events, try for a few times and exit */
+ for (i = 0; i < 32; i++) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+ RTE_TEST_ASSERT_SUCCESS(valid_event,
+ "Unexpected valid event=%d", ev.mbuf->seqn);
+ }
+ return 0;
+}
+
+static int
+generate_random_events(const unsigned int total_events)
+{
+ struct rte_event_dev_info info;
+ unsigned int i;
+ int ret;
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ for (i = 0; i < total_events; i++) {
+ ret = inject_events(
+ rte_rand() % info.max_event_queue_flows /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ rte_rand() % queue_count /* queue */,
+ 0 /* port */,
+ 1 /* events */);
+ if (ret)
+ return -1;
+ }
+ return ret;
+}
+
+
+static int
+validate_event(struct rte_event *ev)
+{
+ struct event_attr *attr;
+
+ attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+ RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+ "flow_id mismatch enq=%d deq =%d",
+ attr->flow_id, ev->flow_id);
+ RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+ "event_type mismatch enq=%d deq =%d",
+ attr->event_type, ev->event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+ "sub_event_type mismatch enq=%d deq =%d",
+ attr->sub_event_type, ev->sub_event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+ "sched_type mismatch enq=%d deq =%d",
+ attr->sched_type, ev->sched_type);
+ RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ attr->queue, ev->queue_id);
+ return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+ struct rte_event *ev);
+
+static int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+ int ret;
+ uint16_t valid_event;
+ uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+ struct rte_event ev;
+
+ while (1) {
+ if (++forward_progress_cnt > UINT16_MAX) {
+ dpaa2_evdev_err("Detected deadlock");
+ return -1;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ forward_progress_cnt = 0;
+ ret = validate_event(&ev);
+ if (ret)
+ return -1;
+
+ if (fn != NULL) {
+ ret = fn(index, port, &ev);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to validate test specific event");
+ }
+
+ ++index;
+
+ rte_pktmbuf_free(ev.mbuf);
+ if (++events >= total_events)
+ break;
+ }
+
+ return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ struct event_attr *attr;
+
+ attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(index, attr->seq,
+ "index=%d != seqn=%d", index, attr->seq);
+ return 0;
+}
+
+static int
+test_simple_enqdeq(uint8_t sched_type)
+{
+ int ret;
+
+ ret = inject_events(0 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type */,
+ sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+ int ret;
+
+ ret = generate_random_events(MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+ int ret;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ ret = validate_event(&ev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ }
+ return 0;
+}
+
+static int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+ uint64_t cycles, print_cycles;
+
+ RTE_SET_USED(count);
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ dpaa2_evdev_dbg("\r%s: events %d", __func__,
+ rte_atomic32_read(count));
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+ dpaa2_evdev_info(
+ "%s: No schedules for seconds, deadlock (%d)",
+ __func__,
+ rte_atomic32_read(count));
+ rte_event_dev_dump(evdev, stdout);
+ cycles = new_cycles;
+ return -1;
+ }
+ }
+ rte_eal_mp_wait_lcore();
+ return 0;
+}
+
+
+static int
+launch_workers_and_wait(int (*master_worker)(void *),
+ int (*slave_workers)(void *), uint32_t total_events,
+ uint8_t nb_workers, uint8_t sched_type)
+{
+ uint8_t port = 0;
+ int w_lcore;
+ int ret;
+ struct test_core_param *param;
+ rte_atomic32_t atomic_total_events;
+ uint64_t dequeue_tmo_ticks;
+
+ if (!nb_workers)
+ return 0;
+
+ rte_atomic32_set(&atomic_total_events, total_events);
+ seqn_list_init();
+
+ param = malloc(sizeof(struct test_core_param) * nb_workers);
+ if (!param)
+ return -1;
+
+ ret = rte_event_dequeue_timeout_ticks(evdev,
+ rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+ if (ret) {
+ free(param);
+ return -1;
+ }
+
+ param[0].total_events = &atomic_total_events;
+ param[0].sched_type = sched_type;
+ param[0].port = 0;
+ param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+
+ w_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+ for (port = 1; port < nb_workers; port++) {
+ param[port].total_events = &atomic_total_events;
+ param[port].sched_type = sched_type;
+ param[port].port = port;
+ param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+ w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+ rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+ }
+
+ ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+ free(param);
+ return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
+ __func__, nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ return launch_workers_and_wait(worker_multi_port_fn,
+ worker_multi_port_fn, total_events,
+ nr_ports, 0xff /* invalid */);
+}
+
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+ return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+ int i, nr_links, ret;
+
+ uint32_t port_count;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_unlink(evdev, i, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0,
+ "Failed to unlink all queues port=%d", i);
+ }
+
+ uint32_t queue_count;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ nr_links = RTE_MIN(port_count, queue_count);
+ const unsigned int total_events = MAX_EVENTS / nr_links;
+
+ /* Link queue x to port x and inject events to queue x through port x */
+ for (i = 0; i < nr_links; i++) {
+ uint8_t queue = (uint8_t)i;
+
+ ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ i /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+ }
+
+ /* Verify the events generated from correct queue */
+ for (i = 0; i < nr_links; i++) {
+ ret = consume_events(i /* port */, total_events,
+ validate_queue_to_port_single_link);
+ if (ret)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+ int ret, port0_events = 0, port1_events = 0;
+ uint8_t queue, port;
+ uint32_t nr_queues = 0;
+ uint32_t nr_ports = 0;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ if (nr_ports < 2) {
+ dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
+ __func__, nr_ports);
+ return 0;
+ }
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (port = 0; port < nr_ports; port++) {
+ ret = rte_event_port_unlink(evdev, port, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+ port);
+ }
+
+ const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+ /* Link all even number of queues to port0 and odd numbers to port 1*/
+ for (queue = 0; queue < nr_queues; queue++) {
+ port = queue & 0x1;
+ ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+ queue, port);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ port /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ if (port == 0)
+ port0_events += total_events;
+ else
+ port1_events += total_events;
+ }
+
+ ret = consume_events(0 /* port */, port0_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+ ret = consume_events(1 /* port */, port1_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
+ int (*test)(void), const char *name)
+{
+ if (setup() < 0) {
+ RTE_LOG(INFO, PMD, "Error setting up test %s", name);
+ unsupported++;
+ } else {
+ if (test() < 0) {
+ failed++;
+ RTE_LOG(INFO, PMD, "%s Failed\n", name);
+ } else {
+ passed++;
+ RTE_LOG(INFO, PMD, "%s Passed", name);
+ }
+ }
+
+ total++;
+ tdown();
+}
+
+int
+test_eventdev_dpaa2(void)
+{
+ testsuite_setup();
+
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_atomic);
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_parallel);
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_single_port_deq);
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_multi_port_deq);
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_single_link);
+ DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_multi_link);
+
+ DPAA2_EVENTDEV_INFO("Total tests : %d", total);
+ DPAA2_EVENTDEV_INFO("Passed : %d", passed);
+ DPAA2_EVENTDEV_INFO("Failed : %d", failed);
+ DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
+
+ testsuite_teardown();
+
+ if (failed)
+ return -1;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c
new file mode 100644
index 000000000..200b71640
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2017 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+
+#include <rte_fslmc.h>
+#include <mc/fsl_dpcon.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+
+TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
+static struct dpcon_dev_list dpcon_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */
+
+static int
+rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpcon_id)
+{
+ struct dpaa2_dpcon_dev *dpcon_node;
+ struct dpcon_attr attr;
+ int ret;
+
+ /* Allocate DPAA2 dpcon handle */
+ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
+ if (!dpcon_node) {
+ DPAA2_EVENTDEV_ERR(
+ "Memory allocation failed for dpcon device");
+ return -1;
+ }
+
+ /* Open the dpcon object */
+ dpcon_node->dpcon.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+ ret = dpcon_open(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("Unable to open dpcon device: err(%d)",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpcon_get_attributes(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_node->token, &attr);
+ if (ret != 0) {
+ DPAA2_EVENTDEV_ERR("dpcon attribute fetch failed: err(%d)",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Updating device specific private information*/
+ dpcon_node->qbman_ch_id = attr.qbman_ch_id;
+ dpcon_node->num_priorities = attr.num_priorities;
+ dpcon_node->dpcon_id = dpcon_id;
+ rte_atomic16_init(&dpcon_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
+
+ return 0;
+}
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Get DPCON dev handle from list using index */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev && rte_atomic16_test_and_set(&dpcon_dev->in_use))
+ break;
+ }
+
+ return dpcon_dev;
+}
+
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Match DPCON handle and mark it free */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev == dpcon) {
+ rte_atomic16_dec(&dpcon_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = {
+ .dev_type = DPAA2_CON,
+ .create = rte_dpaa2_create_dpcon_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpcon, rte_dpaa2_dpcon_obj);
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/meson.build b/src/spdk/dpdk/drivers/event/dpaa2/meson.build
new file mode 100644
index 000000000..71c8be3d6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if not is_linux
+ build = false
+ reason = 'only supported on linux'
+endif
+deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
+sources = files('dpaa2_hw_dpcon.c',
+ 'dpaa2_eventdev.c',
+ 'dpaa2_eventdev_selftest.c')
+
+includes += include_directories('../../crypto/dpaa2_sec/')
diff --git a/src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map b/src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};